2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
29 #ifdef HAVE_SYS_TIME_H
37 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/mono-debug.h>
54 #include <mono/metadata/mono-debug-debugger.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/metadata/debug-mono-symfile.h>
63 #include <mono/utils/mono-compiler.h>
64 #include <mono/utils/mono-memory-model.h>
65 #include <mono/utils/mono-error-internals.h>
66 #include <mono/metadata/mono-basic-block.h>
67 #include <mono/metadata/reflection-internals.h>
68 #include <mono/utils/mono-threads-coop.h>
74 #include "jit-icalls.h"
76 #include "debugger-agent.h"
77 #include "seq-points.h"
78 #include "aot-compiler.h"
79 #include "mini-llvm.h"
81 #define BRANCH_COST 10
82 #define INLINE_LENGTH_LIMIT 20
84 /* These have 'cfg' as an implicit argument */
85 #define INLINE_FAILURE(msg) do { \
86 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
87 inline_failure (cfg, msg); \
88 goto exception_exit; \
91 #define CHECK_CFG_EXCEPTION do {\
92 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 /* helper methods signatures */
156 static MonoMethodSignature *helper_sig_domain_get;
157 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
158 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
160 /* type loading helpers */
161 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
162 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
165 * Instruction metadata
173 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
174 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
180 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
185 /* keep in sync with the enum in mini.h */
188 #include "mini-ops.h"
193 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
194 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
196 * This should contain the index of the last sreg + 1. This is not the same
197 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
199 const gint8 ins_sreg_counts[] = {
200 #include "mini-ops.h"
205 #define MONO_INIT_VARINFO(vi,id) do { \
206 (vi)->range.first_use.pos.bid = 0xffff; \
212 mono_alloc_ireg (MonoCompile *cfg)
214 return alloc_ireg (cfg);
218 mono_alloc_lreg (MonoCompile *cfg)
220 return alloc_lreg (cfg);
224 mono_alloc_freg (MonoCompile *cfg)
226 return alloc_freg (cfg);
230 mono_alloc_preg (MonoCompile *cfg)
232 return alloc_preg (cfg);
236 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
238 return alloc_dreg (cfg, stack_type);
242 * mono_alloc_ireg_ref:
244 * Allocate an IREG, and mark it as holding a GC ref.
247 mono_alloc_ireg_ref (MonoCompile *cfg)
249 return alloc_ireg_ref (cfg);
253 * mono_alloc_ireg_mp:
255 * Allocate an IREG, and mark it as holding a managed pointer.
258 mono_alloc_ireg_mp (MonoCompile *cfg)
260 return alloc_ireg_mp (cfg);
264 * mono_alloc_ireg_copy:
266 * Allocate an IREG with the same GC type as VREG.
269 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
271 if (vreg_is_ref (cfg, vreg))
272 return alloc_ireg_ref (cfg);
273 else if (vreg_is_mp (cfg, vreg))
274 return alloc_ireg_mp (cfg);
276 return alloc_ireg (cfg);
280 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
285 type = mini_get_underlying_type (type);
287 switch (type->type) {
300 case MONO_TYPE_FNPTR:
302 case MONO_TYPE_CLASS:
303 case MONO_TYPE_STRING:
304 case MONO_TYPE_OBJECT:
305 case MONO_TYPE_SZARRAY:
306 case MONO_TYPE_ARRAY:
310 #if SIZEOF_REGISTER == 8
316 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
319 case MONO_TYPE_VALUETYPE:
320 if (type->data.klass->enumtype) {
321 type = mono_class_enum_basetype (type->data.klass);
324 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 case MONO_TYPE_TYPEDBYREF:
329 case MONO_TYPE_GENERICINST:
330 type = &type->data.generic_class->container_class->byval_arg;
334 g_assert (cfg->gshared);
335 if (mini_type_var_is_vt (type))
338 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
340 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
346 mono_print_bb (MonoBasicBlock *bb, const char *msg)
351 printf ("\n%s %d: [IN: ", msg, bb->block_num);
352 for (i = 0; i < bb->in_count; ++i)
353 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
355 for (i = 0; i < bb->out_count; ++i)
356 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
370 static MONO_NEVER_INLINE void
371 break_on_unverified (void)
373 if (mini_get_debug_options ()->break_on_unverified)
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
383 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 ip_in_finally_clause (MonoCompile *cfg, int offset)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
671 for (i = 0; i < header->num_clauses; ++i) {
672 clause = &header->clauses [i];
673 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
676 if (MONO_OFFSET_IN_HANDLER (clause, offset))
683 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
685 MonoMethodHeader *header = cfg->header;
686 MonoExceptionClause *clause;
690 for (i = 0; i < header->num_clauses; ++i) {
691 clause = &header->clauses [i];
692 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
693 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
694 if (clause->flags == type)
695 res = g_list_append (res, clause);
702 mono_create_spvar_for_region (MonoCompile *cfg, int region)
706 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
710 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
711 /* prevent it from being register allocated */
712 var->flags |= MONO_INST_VOLATILE;
714 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
718 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
720 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
724 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
728 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
732 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
733 /* prevent it from being register allocated */
734 var->flags |= MONO_INST_VOLATILE;
736 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
742 * Returns the type used in the eval stack when @type is loaded.
743 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
746 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
750 type = mini_get_underlying_type (type);
751 inst->klass = klass = mono_class_from_mono_type (type);
753 inst->type = STACK_MP;
758 switch (type->type) {
760 inst->type = STACK_INV;
768 inst->type = STACK_I4;
773 case MONO_TYPE_FNPTR:
774 inst->type = STACK_PTR;
776 case MONO_TYPE_CLASS:
777 case MONO_TYPE_STRING:
778 case MONO_TYPE_OBJECT:
779 case MONO_TYPE_SZARRAY:
780 case MONO_TYPE_ARRAY:
781 inst->type = STACK_OBJ;
785 inst->type = STACK_I8;
788 inst->type = cfg->r4_stack_type;
791 inst->type = STACK_R8;
793 case MONO_TYPE_VALUETYPE:
794 if (type->data.klass->enumtype) {
795 type = mono_class_enum_basetype (type->data.klass);
799 inst->type = STACK_VTYPE;
802 case MONO_TYPE_TYPEDBYREF:
803 inst->klass = mono_defaults.typed_reference_class;
804 inst->type = STACK_VTYPE;
806 case MONO_TYPE_GENERICINST:
807 type = &type->data.generic_class->container_class->byval_arg;
811 g_assert (cfg->gshared);
812 if (mini_is_gsharedvt_type (type)) {
813 g_assert (cfg->gsharedvt);
814 inst->type = STACK_VTYPE;
816 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
820 g_error ("unknown type 0x%02x in eval stack type", type->type);
825 * The following tables are used to quickly validate the IL code in type_from_op ().
828 bin_num_table [STACK_MAX] [STACK_MAX] = {
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
834 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
842 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
845 /* reduce the size of this table */
847 bin_int_table [STACK_MAX] [STACK_MAX] = {
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
850 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
851 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
859 bin_comp_table [STACK_MAX] [STACK_MAX] = {
860 /* Inv i L p F & O vt r4 */
862 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
863 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
864 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
865 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
866 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
867 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
868 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
869 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
872 /* reduce the size of this table */
874 shift_table [STACK_MAX] [STACK_MAX] = {
875 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
877 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
878 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
886 * Tables to map from the non-specific opcode to the matching
887 * type-specific opcode.
889 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
891 binops_op_map [STACK_MAX] = {
892 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
895 /* handles from CEE_NEG to CEE_CONV_U8 */
897 unops_op_map [STACK_MAX] = {
898 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
901 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
903 ovfops_op_map [STACK_MAX] = {
904 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
907 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
909 ovf2ops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
913 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
915 ovf3ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
919 /* handles from CEE_BEQ to CEE_BLT_UN */
921 beqops_op_map [STACK_MAX] = {
922 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
925 /* handles from CEE_CEQ to CEE_CLT_UN */
927 ceqops_op_map [STACK_MAX] = {
928 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
932 * Sets ins->type (the type on the eval stack) according to the
933 * type of the opcode and the arguments to it.
934 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
936 * FIXME: this function sets ins->type unconditionally in some cases, but
937 * it should set it to invalid for some types (a conv.x on an object)
940 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
942 switch (ins->opcode) {
949 /* FIXME: check unverifiable args for STACK_MP */
950 ins->type = bin_num_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
958 ins->type = bin_int_table [src1->type] [src2->type];
959 ins->opcode += binops_op_map [ins->type];
964 ins->type = shift_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE;
973 else if (src1->type == STACK_R4)
974 ins->opcode = OP_RCOMPARE;
975 else if (src1->type == STACK_R8)
976 ins->opcode = OP_FCOMPARE;
978 ins->opcode = OP_ICOMPARE;
980 case OP_ICOMPARE_IMM:
981 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
982 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
983 ins->opcode = OP_LCOMPARE_IMM;
995 ins->opcode += beqops_op_map [src1->type];
998 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
999 ins->opcode += ceqops_op_map [src1->type];
1005 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1006 ins->opcode += ceqops_op_map [src1->type];
1010 ins->type = neg_table [src1->type];
1011 ins->opcode += unops_op_map [ins->type];
1014 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1015 ins->type = src1->type;
1017 ins->type = STACK_INV;
1018 ins->opcode += unops_op_map [ins->type];
1024 ins->type = STACK_I4;
1025 ins->opcode += unops_op_map [src1->type];
1028 ins->type = STACK_R8;
1029 switch (src1->type) {
1032 ins->opcode = OP_ICONV_TO_R_UN;
1035 ins->opcode = OP_LCONV_TO_R_UN;
1039 case CEE_CONV_OVF_I1:
1040 case CEE_CONV_OVF_U1:
1041 case CEE_CONV_OVF_I2:
1042 case CEE_CONV_OVF_U2:
1043 case CEE_CONV_OVF_I4:
1044 case CEE_CONV_OVF_U4:
1045 ins->type = STACK_I4;
1046 ins->opcode += ovf3ops_op_map [src1->type];
1048 case CEE_CONV_OVF_I_UN:
1049 case CEE_CONV_OVF_U_UN:
1050 ins->type = STACK_PTR;
1051 ins->opcode += ovf2ops_op_map [src1->type];
1053 case CEE_CONV_OVF_I1_UN:
1054 case CEE_CONV_OVF_I2_UN:
1055 case CEE_CONV_OVF_I4_UN:
1056 case CEE_CONV_OVF_U1_UN:
1057 case CEE_CONV_OVF_U2_UN:
1058 case CEE_CONV_OVF_U4_UN:
1059 ins->type = STACK_I4;
1060 ins->opcode += ovf2ops_op_map [src1->type];
1063 ins->type = STACK_PTR;
1064 switch (src1->type) {
1066 ins->opcode = OP_ICONV_TO_U;
1070 #if SIZEOF_VOID_P == 8
1071 ins->opcode = OP_LCONV_TO_U;
1073 ins->opcode = OP_MOVE;
1077 ins->opcode = OP_LCONV_TO_U;
1080 ins->opcode = OP_FCONV_TO_U;
1086 ins->type = STACK_I8;
1087 ins->opcode += unops_op_map [src1->type];
1089 case CEE_CONV_OVF_I8:
1090 case CEE_CONV_OVF_U8:
1091 ins->type = STACK_I8;
1092 ins->opcode += ovf3ops_op_map [src1->type];
1094 case CEE_CONV_OVF_U8_UN:
1095 case CEE_CONV_OVF_I8_UN:
1096 ins->type = STACK_I8;
1097 ins->opcode += ovf2ops_op_map [src1->type];
1100 ins->type = cfg->r4_stack_type;
1101 ins->opcode += unops_op_map [src1->type];
1104 ins->type = STACK_R8;
1105 ins->opcode += unops_op_map [src1->type];
1108 ins->type = STACK_R8;
1112 ins->type = STACK_I4;
1113 ins->opcode += ovfops_op_map [src1->type];
1116 case CEE_CONV_OVF_I:
1117 case CEE_CONV_OVF_U:
1118 ins->type = STACK_PTR;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_ADD_OVF_UN:
1124 case CEE_MUL_OVF_UN:
1126 case CEE_SUB_OVF_UN:
1127 ins->type = bin_num_table [src1->type] [src2->type];
1128 ins->opcode += ovfops_op_map [src1->type];
1129 if (ins->type == STACK_R8)
1130 ins->type = STACK_INV;
1132 case OP_LOAD_MEMBASE:
1133 ins->type = STACK_PTR;
1135 case OP_LOADI1_MEMBASE:
1136 case OP_LOADU1_MEMBASE:
1137 case OP_LOADI2_MEMBASE:
1138 case OP_LOADU2_MEMBASE:
1139 case OP_LOADI4_MEMBASE:
1140 case OP_LOADU4_MEMBASE:
1141 ins->type = STACK_PTR;
1143 case OP_LOADI8_MEMBASE:
1144 ins->type = STACK_I8;
1146 case OP_LOADR4_MEMBASE:
1147 ins->type = cfg->r4_stack_type;
1149 case OP_LOADR8_MEMBASE:
1150 ins->type = STACK_R8;
1153 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1157 if (ins->type == STACK_MP)
1158 ins->klass = mono_defaults.object_class;
1163 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1169 param_table [STACK_MAX] [STACK_MAX] = {
1174 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1179 switch (args->type) {
1189 for (i = 0; i < sig->param_count; ++i) {
1190 switch (args [i].type) {
1194 if (!sig->params [i]->byref)
1198 if (sig->params [i]->byref)
1200 switch (sig->params [i]->type) {
1201 case MONO_TYPE_CLASS:
1202 case MONO_TYPE_STRING:
1203 case MONO_TYPE_OBJECT:
1204 case MONO_TYPE_SZARRAY:
1205 case MONO_TYPE_ARRAY:
1212 if (sig->params [i]->byref)
1214 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1223 /*if (!param_table [args [i].type] [sig->params [i]->type])
1231 * When we need a pointer to the current domain many times in a method, we
1232 * call mono_domain_get() once and we store the result in a local variable.
1233 * This function returns the variable that represents the MonoDomain*.
1235 inline static MonoInst *
1236 mono_get_domainvar (MonoCompile *cfg)
1238 if (!cfg->domainvar)
1239 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1240 return cfg->domainvar;
1244 * The got_var contains the address of the Global Offset Table when AOT
1248 mono_get_got_var (MonoCompile *cfg)
1250 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1252 if (!cfg->got_var) {
1253 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1255 return cfg->got_var;
1259 mono_get_vtable_var (MonoCompile *cfg)
1261 g_assert (cfg->gshared);
1263 if (!cfg->rgctx_var) {
1264 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1265 /* force the var to be stack allocated */
1266 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1269 return cfg->rgctx_var;
1273 type_from_stack_type (MonoInst *ins) {
1274 switch (ins->type) {
1275 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1276 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1277 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1278 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1279 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1281 return &ins->klass->this_arg;
1282 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1283 case STACK_VTYPE: return &ins->klass->byval_arg;
1285 g_error ("stack type %d to monotype not handled\n", ins->type);
1290 static G_GNUC_UNUSED int
1291 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1293 t = mono_type_get_underlying_type (t);
1305 case MONO_TYPE_FNPTR:
1307 case MONO_TYPE_CLASS:
1308 case MONO_TYPE_STRING:
1309 case MONO_TYPE_OBJECT:
1310 case MONO_TYPE_SZARRAY:
1311 case MONO_TYPE_ARRAY:
1317 return cfg->r4_stack_type;
1320 case MONO_TYPE_VALUETYPE:
1321 case MONO_TYPE_TYPEDBYREF:
1323 case MONO_TYPE_GENERICINST:
1324 if (mono_type_generic_inst_is_valuetype (t))
1330 g_assert_not_reached ();
1337 array_access_to_klass (int opcode)
1341 return mono_defaults.byte_class;
1343 return mono_defaults.uint16_class;
1346 return mono_defaults.int_class;
1349 return mono_defaults.sbyte_class;
1352 return mono_defaults.int16_class;
1355 return mono_defaults.int32_class;
1357 return mono_defaults.uint32_class;
1360 return mono_defaults.int64_class;
1363 return mono_defaults.single_class;
1366 return mono_defaults.double_class;
1367 case CEE_LDELEM_REF:
1368 case CEE_STELEM_REF:
1369 return mono_defaults.object_class;
1371 g_assert_not_reached ();
1377 * We try to share variables when possible
1380 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1385 /* inlining can result in deeper stacks */
1386 if (slot >= cfg->header->max_stack)
1387 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1389 pos = ins->type - 1 + slot * STACK_MAX;
1391 switch (ins->type) {
1398 if ((vnum = cfg->intvars [pos]))
1399 return cfg->varinfo [vnum];
1400 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 cfg->intvars [pos] = res->inst_c0;
1404 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1410 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1413 * Don't use this if a generic_context is set, since that means AOT can't
1414 * look up the method using just the image+token.
1415 * table == 0 means this is a reference made from a wrapper.
1417 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1418 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1419 jump_info_token->image = image;
1420 jump_info_token->token = token;
1421 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1426 * This function is called to handle items that are left on the evaluation stack
1427 * at basic block boundaries. What happens is that we save the values to local variables
1428 * and we reload them later when first entering the target basic block (with the
1429 * handle_loaded_temps () function).
1430 * A single joint point will use the same variables (stored in the array bb->out_stack or
1431 * bb->in_stack, if the basic block is before or after the joint point).
1433 * This function needs to be called _before_ emitting the last instruction of
1434 * the bb (i.e. before emitting a branch).
1435 * If the stack merge fails at a join point, cfg->unverifiable is set.
1438 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1441 MonoBasicBlock *bb = cfg->cbb;
1442 MonoBasicBlock *outb;
1443 MonoInst *inst, **locals;
1448 if (cfg->verbose_level > 3)
1449 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1450 if (!bb->out_scount) {
1451 bb->out_scount = count;
1452 //printf ("bblock %d has out:", bb->block_num);
1454 for (i = 0; i < bb->out_count; ++i) {
1455 outb = bb->out_bb [i];
1456 /* exception handlers are linked, but they should not be considered for stack args */
1457 if (outb->flags & BB_EXCEPTION_HANDLER)
1459 //printf (" %d", outb->block_num);
1460 if (outb->in_stack) {
1462 bb->out_stack = outb->in_stack;
1468 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1469 for (i = 0; i < count; ++i) {
1471 * try to reuse temps already allocated for this purpouse, if they occupy the same
1472 * stack slot and if they are of the same type.
1473 * This won't cause conflicts since if 'local' is used to
1474 * store one of the values in the in_stack of a bblock, then
1475 * the same variable will be used for the same outgoing stack
1477 * This doesn't work when inlining methods, since the bblocks
1478 * in the inlined methods do not inherit their in_stack from
1479 * the bblock they are inlined to. See bug #58863 for an
1482 if (cfg->inlined_method)
1483 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1485 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1490 for (i = 0; i < bb->out_count; ++i) {
1491 outb = bb->out_bb [i];
1492 /* exception handlers are linked, but they should not be considered for stack args */
1493 if (outb->flags & BB_EXCEPTION_HANDLER)
1495 if (outb->in_scount) {
1496 if (outb->in_scount != bb->out_scount) {
1497 cfg->unverifiable = TRUE;
1500 continue; /* check they are the same locals */
1502 outb->in_scount = count;
1503 outb->in_stack = bb->out_stack;
1506 locals = bb->out_stack;
1508 for (i = 0; i < count; ++i) {
1509 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1510 inst->cil_code = sp [i]->cil_code;
1511 sp [i] = locals [i];
1512 if (cfg->verbose_level > 3)
1513 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1517 * It is possible that the out bblocks already have in_stack assigned, and
1518 * the in_stacks differ. In this case, we will store to all the different
1525 /* Find a bblock which has a different in_stack */
1527 while (bindex < bb->out_count) {
1528 outb = bb->out_bb [bindex];
1529 /* exception handlers are linked, but they should not be considered for stack args */
1530 if (outb->flags & BB_EXCEPTION_HANDLER) {
1534 if (outb->in_stack != locals) {
1535 for (i = 0; i < count; ++i) {
1536 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1537 inst->cil_code = sp [i]->cil_code;
1538 sp [i] = locals [i];
1539 if (cfg->verbose_level > 3)
1540 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1542 locals = outb->in_stack;
1552 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1556 if (cfg->compile_aot) {
1557 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1563 ji.type = patch_type;
1564 ji.data.target = data;
1565 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1566 mono_error_assert_ok (&error);
1568 EMIT_NEW_PCONST (cfg, ins, target);
1574 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1576 int ibitmap_reg = alloc_preg (cfg);
1577 #ifdef COMPRESSED_INTERFACE_BITMAP
1579 MonoInst *res, *ins;
1580 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1581 MONO_ADD_INS (cfg->cbb, ins);
1583 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1584 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1585 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1587 int ibitmap_byte_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1591 if (cfg->compile_aot) {
1592 int iid_reg = alloc_preg (cfg);
1593 int shifted_iid_reg = alloc_preg (cfg);
1594 int ibitmap_byte_address_reg = alloc_preg (cfg);
1595 int masked_iid_reg = alloc_preg (cfg);
1596 int iid_one_bit_reg = alloc_preg (cfg);
1597 int iid_bit_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1600 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1603 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1604 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1605 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1607 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1614 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1615 * stored in "klass_reg" implements the interface "klass".
1618 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1620 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1624 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1625 * stored in "vtable_reg" implements the interface "klass".
1628 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1630 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1634 * Emit code which checks whenever the interface id of @klass is smaller than
1635 * than the value given by max_iid_reg.
1638 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1639 MonoBasicBlock *false_target)
1641 if (cfg->compile_aot) {
1642 int iid_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1644 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1651 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1654 /* Same as above, but obtains max_iid from a vtable */
1656 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1657 MonoBasicBlock *false_target)
1659 int max_iid_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU4_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1662 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1665 /* Same as above, but obtains max_iid from a klass */
1667 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1668 MonoBasicBlock *false_target)
1670 int max_iid_reg = alloc_preg (cfg);
1672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU4_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1673 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1677 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1679 int idepth_reg = alloc_preg (cfg);
1680 int stypes_reg = alloc_preg (cfg);
1681 int stype = alloc_preg (cfg);
1683 mono_class_setup_supertypes (klass);
1685 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1693 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1694 } else if (cfg->compile_aot) {
1695 int const_reg = alloc_preg (cfg);
1696 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1697 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1705 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1707 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1711 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1713 int intf_reg = alloc_preg (cfg);
1715 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1716 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1721 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1725 * Variant of the above that takes a register to the class, not the vtable.
1728 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1730 int intf_bit_reg = alloc_preg (cfg);
1732 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1733 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1738 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1742 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1747 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1750 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1754 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1756 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1760 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1762 if (cfg->compile_aot) {
1763 int const_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1773 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1776 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1779 int rank_reg = alloc_preg (cfg);
1780 int eclass_reg = alloc_preg (cfg);
1782 g_assert (!klass_inst);
1783 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1785 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1786 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1788 if (klass->cast_class == mono_defaults.object_class) {
1789 int parent_reg = alloc_preg (cfg);
1790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1791 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1792 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1793 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1794 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1795 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1796 } else if (klass->cast_class == mono_defaults.enum_class) {
1797 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1798 } else if (mono_class_is_interface (klass->cast_class)) {
1799 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1801 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1802 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1805 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1806 /* Check that the object is a vector too */
1807 int bounds_reg = alloc_preg (cfg);
1808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1810 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1813 int idepth_reg = alloc_preg (cfg);
1814 int stypes_reg = alloc_preg (cfg);
1815 int stype = alloc_preg (cfg);
1817 mono_class_setup_supertypes (klass);
1819 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1822 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1826 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1831 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1833 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1837 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1841 g_assert (val == 0);
1846 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1849 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1852 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1855 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1857 #if SIZEOF_REGISTER == 8
1859 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1865 val_reg = alloc_preg (cfg);
1867 if (SIZEOF_REGISTER == 8)
1868 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1870 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1873 /* This could be optimized further if neccesary */
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1882 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1913 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1920 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1921 g_assert (size < 10000);
1924 /* This could be optimized further if neccesary */
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1937 cur_reg = alloc_preg (cfg);
1938 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1939 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1947 cur_reg = alloc_preg (cfg);
1948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1949 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1955 cur_reg = alloc_preg (cfg);
1956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1963 cur_reg = alloc_preg (cfg);
1964 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1965 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1973 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1977 if (cfg->compile_aot) {
1978 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1979 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1981 ins->sreg2 = c->dreg;
1982 MONO_ADD_INS (cfg->cbb, ins);
1984 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1986 ins->inst_offset = mini_get_tls_offset (tls_key);
1987 MONO_ADD_INS (cfg->cbb, ins);
1994 * Emit IR to push the current LMF onto the LMF stack.
1997 emit_push_lmf (MonoCompile *cfg)
2000 * Emit IR to push the LMF:
2001 * lmf_addr = <lmf_addr from tls>
2002 * lmf->lmf_addr = lmf_addr
2003 * lmf->prev_lmf = *lmf_addr
2006 int lmf_reg, prev_lmf_reg;
2007 MonoInst *ins, *lmf_ins;
2012 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2013 /* Load current lmf */
2014 lmf_ins = mono_get_lmf_intrinsic (cfg);
2016 MONO_ADD_INS (cfg->cbb, lmf_ins);
2017 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2018 lmf_reg = ins->dreg;
2019 /* Save previous_lmf */
2020 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2022 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2025 * Store lmf_addr in a variable, so it can be allocated to a global register.
2027 if (!cfg->lmf_addr_var)
2028 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2031 ins = mono_get_jit_tls_intrinsic (cfg);
2033 int jit_tls_dreg = ins->dreg;
2035 MONO_ADD_INS (cfg->cbb, ins);
2036 lmf_reg = alloc_preg (cfg);
2037 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2039 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2042 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2044 MONO_ADD_INS (cfg->cbb, lmf_ins);
2047 MonoInst *args [16], *jit_tls_ins, *ins;
2049 /* Inline mono_get_lmf_addr () */
2050 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2052 /* Load mono_jit_tls_id */
2053 if (cfg->compile_aot)
2054 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2056 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2057 /* call pthread_getspecific () */
2058 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2059 /* lmf_addr = &jit_tls->lmf */
2060 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2063 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2067 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2069 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2070 lmf_reg = ins->dreg;
2072 prev_lmf_reg = alloc_preg (cfg);
2073 /* Save previous_lmf */
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2084 * Emit IR to pop the current LMF from the LMF stack.
2087 emit_pop_lmf (MonoCompile *cfg)
2089 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2095 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2096 lmf_reg = ins->dreg;
2098 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2099 /* Load previous_lmf */
2100 prev_lmf_reg = alloc_preg (cfg);
2101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2103 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2106 * Emit IR to pop the LMF:
2107 * *(lmf->lmf_addr) = lmf->prev_lmf
2109 /* This could be called before emit_push_lmf () */
2110 if (!cfg->lmf_addr_var)
2111 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2112 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2114 prev_lmf_reg = alloc_preg (cfg);
2115 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2116 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2121 emit_instrumentation_call (MonoCompile *cfg, void *func)
2123 MonoInst *iargs [1];
2126 * Avoid instrumenting inlined methods since it can
2127 * distort profiling results.
2129 if (cfg->method != cfg->current_method)
2132 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2133 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2134 mono_emit_jit_icall (cfg, func, iargs);
2139 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2142 type = mini_get_underlying_type (type);
2143 switch (type->type) {
2144 case MONO_TYPE_VOID:
2145 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2152 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2156 case MONO_TYPE_FNPTR:
2157 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2158 case MONO_TYPE_CLASS:
2159 case MONO_TYPE_STRING:
2160 case MONO_TYPE_OBJECT:
2161 case MONO_TYPE_SZARRAY:
2162 case MONO_TYPE_ARRAY:
2163 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2166 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2169 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2171 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2174 case MONO_TYPE_VALUETYPE:
2175 if (type->data.klass->enumtype) {
2176 type = mono_class_enum_basetype (type->data.klass);
2179 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2180 case MONO_TYPE_TYPEDBYREF:
2181 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2182 case MONO_TYPE_GENERICINST:
2183 type = &type->data.generic_class->container_class->byval_arg;
2186 case MONO_TYPE_MVAR:
2188 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2190 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2195 //XXX this ignores if t is byref
2196 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2199 * target_type_is_incompatible:
2200 * @cfg: MonoCompile context
2202 * Check that the item @arg on the evaluation stack can be stored
2203 * in the target type (can be a local, or field, etc).
2204 * The cfg arg can be used to check if we need verification or just
2207 * Returns: non-0 value if arg can't be stored on a target.
2210 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2212 MonoType *simple_type;
2215 if (target->byref) {
2216 /* FIXME: check that the pointed to types match */
2217 if (arg->type == STACK_MP) {
2218 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2219 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2220 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2222 /* if the target is native int& or same type */
2223 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2226 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2227 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2228 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2232 if (arg->type == STACK_PTR)
2237 simple_type = mini_get_underlying_type (target);
2238 switch (simple_type->type) {
2239 case MONO_TYPE_VOID:
2247 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2251 /* STACK_MP is needed when setting pinned locals */
2252 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2257 case MONO_TYPE_FNPTR:
2259 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2260 * in native int. (#688008).
2262 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2265 case MONO_TYPE_CLASS:
2266 case MONO_TYPE_STRING:
2267 case MONO_TYPE_OBJECT:
2268 case MONO_TYPE_SZARRAY:
2269 case MONO_TYPE_ARRAY:
2270 if (arg->type != STACK_OBJ)
2272 /* FIXME: check type compatibility */
2276 if (arg->type != STACK_I8)
2280 if (arg->type != cfg->r4_stack_type)
2284 if (arg->type != STACK_R8)
2287 case MONO_TYPE_VALUETYPE:
2288 if (arg->type != STACK_VTYPE)
2290 klass = mono_class_from_mono_type (simple_type);
2291 if (klass != arg->klass)
2294 case MONO_TYPE_TYPEDBYREF:
2295 if (arg->type != STACK_VTYPE)
2297 klass = mono_class_from_mono_type (simple_type);
2298 if (klass != arg->klass)
2301 case MONO_TYPE_GENERICINST:
2302 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2303 MonoClass *target_class;
2304 if (arg->type != STACK_VTYPE)
2306 klass = mono_class_from_mono_type (simple_type);
2307 target_class = mono_class_from_mono_type (target);
2308 /* The second cases is needed when doing partial sharing */
2309 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2313 if (arg->type != STACK_OBJ)
2315 /* FIXME: check type compatibility */
2319 case MONO_TYPE_MVAR:
2320 g_assert (cfg->gshared);
2321 if (mini_type_var_is_vt (simple_type)) {
2322 if (arg->type != STACK_VTYPE)
2325 if (arg->type != STACK_OBJ)
2330 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2336 * Prepare arguments for passing to a function call.
2337 * Return a non-zero value if the arguments can't be passed to the given
2339 * The type checks are not yet complete and some conversions may need
2340 * casts on 32 or 64 bit architectures.
2342 * FIXME: implement this using target_type_is_incompatible ()
2345 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2347 MonoType *simple_type;
2351 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2355 for (i = 0; i < sig->param_count; ++i) {
2356 if (sig->params [i]->byref) {
2357 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2361 simple_type = mini_get_underlying_type (sig->params [i]);
2363 switch (simple_type->type) {
2364 case MONO_TYPE_VOID:
2373 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2379 case MONO_TYPE_FNPTR:
2380 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2383 case MONO_TYPE_CLASS:
2384 case MONO_TYPE_STRING:
2385 case MONO_TYPE_OBJECT:
2386 case MONO_TYPE_SZARRAY:
2387 case MONO_TYPE_ARRAY:
2388 if (args [i]->type != STACK_OBJ)
2393 if (args [i]->type != STACK_I8)
2397 if (args [i]->type != cfg->r4_stack_type)
2401 if (args [i]->type != STACK_R8)
2404 case MONO_TYPE_VALUETYPE:
2405 if (simple_type->data.klass->enumtype) {
2406 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2409 if (args [i]->type != STACK_VTYPE)
2412 case MONO_TYPE_TYPEDBYREF:
2413 if (args [i]->type != STACK_VTYPE)
2416 case MONO_TYPE_GENERICINST:
2417 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2420 case MONO_TYPE_MVAR:
2422 if (args [i]->type != STACK_VTYPE)
2426 g_error ("unknown type 0x%02x in check_call_signature",
2434 callvirt_to_call (int opcode)
2437 case OP_CALL_MEMBASE:
2439 case OP_VOIDCALL_MEMBASE:
2441 case OP_FCALL_MEMBASE:
2443 case OP_RCALL_MEMBASE:
2445 case OP_VCALL_MEMBASE:
2447 case OP_LCALL_MEMBASE:
2450 g_assert_not_reached ();
2457 callvirt_to_call_reg (int opcode)
2460 case OP_CALL_MEMBASE:
2462 case OP_VOIDCALL_MEMBASE:
2463 return OP_VOIDCALL_REG;
2464 case OP_FCALL_MEMBASE:
2465 return OP_FCALL_REG;
2466 case OP_RCALL_MEMBASE:
2467 return OP_RCALL_REG;
2468 case OP_VCALL_MEMBASE:
2469 return OP_VCALL_REG;
2470 case OP_LCALL_MEMBASE:
2471 return OP_LCALL_REG;
2473 g_assert_not_reached ();
2479 /* Either METHOD or IMT_ARG needs to be set */
2481 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2485 if (COMPILE_LLVM (cfg)) {
2487 method_reg = alloc_preg (cfg);
2488 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2490 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2491 method_reg = ins->dreg;
2495 call->imt_arg_reg = method_reg;
2497 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2502 method_reg = alloc_preg (cfg);
2503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2505 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2506 method_reg = ins->dreg;
2509 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2512 static MonoJumpInfo *
2513 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2515 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2519 ji->data.target = target;
2525 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2528 return mono_class_check_context_used (klass);
2534 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2537 return mono_method_check_context_used (method);
2543 * check_method_sharing:
2545 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2548 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2550 gboolean pass_vtable = FALSE;
2551 gboolean pass_mrgctx = FALSE;
2553 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2554 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2555 gboolean sharable = FALSE;
2557 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2561 * Pass vtable iff target method might
2562 * be shared, which means that sharing
2563 * is enabled for its class and its
2564 * context is sharable (and it's not a
2567 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2571 if (mini_method_get_context (cmethod) &&
2572 mini_method_get_context (cmethod)->method_inst) {
2573 g_assert (!pass_vtable);
2575 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2578 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2583 if (out_pass_vtable)
2584 *out_pass_vtable = pass_vtable;
2585 if (out_pass_mrgctx)
2586 *out_pass_mrgctx = pass_mrgctx;
2589 inline static MonoCallInst *
2590 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2591 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2595 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2603 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2605 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2607 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2610 call->signature = sig;
2611 call->rgctx_reg = rgctx;
2612 sig_ret = mini_get_underlying_type (sig->ret);
2614 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2617 if (mini_type_is_vtype (sig_ret)) {
2618 call->vret_var = cfg->vret_addr;
2619 //g_assert_not_reached ();
2621 } else if (mini_type_is_vtype (sig_ret)) {
2622 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2625 temp->backend.is_pinvoke = sig->pinvoke;
2628 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2629 * address of return value to increase optimization opportunities.
2630 * Before vtype decomposition, the dreg of the call ins itself represents the
2631 * fact the call modifies the return value. After decomposition, the call will
2632 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2633 * will be transformed into an LDADDR.
2635 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2636 loada->dreg = alloc_preg (cfg);
2637 loada->inst_p0 = temp;
2638 /* We reference the call too since call->dreg could change during optimization */
2639 loada->inst_p1 = call;
2640 MONO_ADD_INS (cfg->cbb, loada);
2642 call->inst.dreg = temp->dreg;
2644 call->vret_var = loada;
2645 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2646 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2648 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2649 if (COMPILE_SOFT_FLOAT (cfg)) {
2651 * If the call has a float argument, we would need to do an r8->r4 conversion using
2652 * an icall, but that cannot be done during the call sequence since it would clobber
2653 * the call registers + the stack. So we do it before emitting the call.
2655 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2657 MonoInst *in = call->args [i];
2659 if (i >= sig->hasthis)
2660 t = sig->params [i - sig->hasthis];
2662 t = &mono_defaults.int_class->byval_arg;
2663 t = mono_type_get_underlying_type (t);
2665 if (!t->byref && t->type == MONO_TYPE_R4) {
2666 MonoInst *iargs [1];
2670 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2672 /* The result will be in an int vreg */
2673 call->args [i] = conv;
2679 call->need_unbox_trampoline = unbox_trampoline;
2682 if (COMPILE_LLVM (cfg))
2683 mono_llvm_emit_call (cfg, call);
2685 mono_arch_emit_call (cfg, call);
2687 mono_arch_emit_call (cfg, call);
2690 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2691 cfg->flags |= MONO_CFG_HAS_CALLS;
2697 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2699 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2700 cfg->uses_rgctx_reg = TRUE;
2701 call->rgctx_reg = TRUE;
2703 call->rgctx_arg_reg = rgctx_reg;
2707 inline static MonoInst*
2708 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2713 gboolean check_sp = FALSE;
2715 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2716 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2718 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2723 rgctx_reg = mono_alloc_preg (cfg);
2724 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2728 if (!cfg->stack_inbalance_var)
2729 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2731 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2732 ins->dreg = cfg->stack_inbalance_var->dreg;
2733 MONO_ADD_INS (cfg->cbb, ins);
2736 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2738 call->inst.sreg1 = addr->dreg;
2741 emit_imt_argument (cfg, call, NULL, imt_arg);
2743 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2748 sp_reg = mono_alloc_preg (cfg);
2750 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2752 MONO_ADD_INS (cfg->cbb, ins);
2754 /* Restore the stack so we don't crash when throwing the exception */
2755 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2756 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2757 MONO_ADD_INS (cfg->cbb, ins);
2759 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2760 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2764 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2766 return (MonoInst*)call;
2770 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2773 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2775 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2778 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2779 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2781 #ifndef DISABLE_REMOTING
2782 gboolean might_be_remote = FALSE;
2784 gboolean virtual_ = this_ins != NULL;
2785 gboolean enable_for_aot = TRUE;
2788 MonoInst *call_target = NULL;
2790 gboolean need_unbox_trampoline;
2793 sig = mono_method_signature (method);
2795 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2796 g_assert_not_reached ();
2799 rgctx_reg = mono_alloc_preg (cfg);
2800 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2803 if (method->string_ctor) {
2804 /* Create the real signature */
2805 /* FIXME: Cache these */
2806 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2807 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2812 context_used = mini_method_check_context_used (cfg, method);
2814 #ifndef DISABLE_REMOTING
2815 might_be_remote = this_ins && sig->hasthis &&
2816 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2817 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2819 if (might_be_remote && context_used) {
2822 g_assert (cfg->gshared);
2824 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2826 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2830 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2831 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2833 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2835 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2837 #ifndef DISABLE_REMOTING
2838 if (might_be_remote)
2839 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 call->method = method;
2843 call->inst.flags |= MONO_INST_HAS_METHOD;
2844 call->inst.inst_left = this_ins;
2845 call->tail_call = tail;
2848 int vtable_reg, slot_reg, this_reg;
2851 this_reg = this_ins->dreg;
2853 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2854 MonoInst *dummy_use;
2856 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2858 /* Make a call to delegate->invoke_impl */
2859 call->inst.inst_basereg = this_reg;
2860 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2861 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2863 /* We must emit a dummy use here because the delegate trampoline will
2864 replace the 'this' argument with the delegate target making this activation
2865 no longer a root for the delegate.
2866 This is an issue for delegates that target collectible code such as dynamic
2867 methods of GC'able assemblies.
2869 For a test case look into #667921.
2871 FIXME: a dummy use is not the best way to do it as the local register allocator
2872 will put it on a caller save register and spil it around the call.
2873 Ideally, we would either put it on a callee save register or only do the store part.
2875 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2877 return (MonoInst*)call;
2880 if ((!cfg->compile_aot || enable_for_aot) &&
2881 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2882 (MONO_METHOD_IS_FINAL (method) &&
2883 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2884 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2886 * the method is not virtual, we just need to ensure this is not null
2887 * and then we can call the method directly.
2889 #ifndef DISABLE_REMOTING
2890 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2892 * The check above ensures method is not gshared, this is needed since
2893 * gshared methods can't have wrappers.
2895 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2899 if (!method->string_ctor)
2900 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2902 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2903 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2905 * the method is virtual, but we can statically dispatch since either
2906 * it's class or the method itself are sealed.
2907 * But first we need to ensure it's not a null reference.
2909 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2911 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2912 } else if (call_target) {
2913 vtable_reg = alloc_preg (cfg);
2914 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2916 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2917 call->inst.sreg1 = call_target->dreg;
2918 call->inst.flags &= !MONO_INST_HAS_METHOD;
2920 vtable_reg = alloc_preg (cfg);
2921 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2922 if (mono_class_is_interface (method->klass)) {
2923 guint32 imt_slot = mono_method_get_imt_slot (method);
2924 emit_imt_argument (cfg, call, call->method, imt_arg);
2925 slot_reg = vtable_reg;
2926 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2928 slot_reg = vtable_reg;
2929 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2930 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2932 g_assert (mono_method_signature (method)->generic_param_count);
2933 emit_imt_argument (cfg, call, call->method, imt_arg);
2937 call->inst.sreg1 = slot_reg;
2938 call->inst.inst_offset = offset;
2939 call->is_virtual = TRUE;
2943 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2946 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2948 return (MonoInst*)call;
2952 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2954 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2958 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2965 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2968 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2970 return (MonoInst*)call;
2974 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2976 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2980 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2984 * mono_emit_abs_call:
2986 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2988 inline static MonoInst*
2989 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2990 MonoMethodSignature *sig, MonoInst **args)
2992 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2996 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2999 if (cfg->abs_patches == NULL)
3000 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3001 g_hash_table_insert (cfg->abs_patches, ji, ji);
3002 ins = mono_emit_native_call (cfg, ji, sig, args);
3003 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3007 static MonoMethodSignature*
3008 sig_to_rgctx_sig (MonoMethodSignature *sig)
3010 // FIXME: memory allocation
3011 MonoMethodSignature *res;
3014 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
3015 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
3016 res->param_count = sig->param_count + 1;
3017 for (i = 0; i < sig->param_count; ++i)
3018 res->params [i] = sig->params [i];
3019 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3023 /* Make an indirect call to FSIG passing an additional argument */
3025 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3027 MonoMethodSignature *csig;
3028 MonoInst *args_buf [16];
3030 int i, pindex, tmp_reg;
3032 /* Make a call with an rgctx/extra arg */
3033 if (fsig->param_count + 2 < 16)
3036 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3039 args [pindex ++] = orig_args [0];
3040 for (i = 0; i < fsig->param_count; ++i)
3041 args [pindex ++] = orig_args [fsig->hasthis + i];
3042 tmp_reg = alloc_preg (cfg);
3043 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3044 csig = sig_to_rgctx_sig (fsig);
3045 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3048 /* Emit an indirect call to the function descriptor ADDR */
3050 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3052 int addr_reg, arg_reg;
3053 MonoInst *call_target;
3055 g_assert (cfg->llvm_only);
3058 * addr points to a <addr, arg> pair, load both of them, and
3059 * make a call to addr, passing arg as an extra arg.
3061 addr_reg = alloc_preg (cfg);
3062 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3063 arg_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3066 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3070 direct_icalls_enabled (MonoCompile *cfg)
3072 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3074 if (cfg->compile_llvm && !cfg->llvm_only)
3077 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3083 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3086 * Call the jit icall without a wrapper if possible.
3087 * The wrapper is needed for the following reasons:
3088 * - to handle exceptions thrown using mono_raise_exceptions () from the
3089 * icall function. The EH code needs the lmf frame pushed by the
3090 * wrapper to be able to unwind back to managed code.
3091 * - to be able to do stack walks for asynchronously suspended
3092 * threads when debugging.
3094 if (info->no_raise && direct_icalls_enabled (cfg)) {
3098 if (!info->wrapper_method) {
3099 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3100 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3102 mono_memory_barrier ();
3106 * Inline the wrapper method, which is basically a call to the C icall, and
3107 * an exception check.
3109 costs = inline_method (cfg, info->wrapper_method, NULL,
3110 args, NULL, il_offset, TRUE);
3111 g_assert (costs > 0);
3112 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3116 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3121 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3123 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3124 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3128 * Native code might return non register sized integers
3129 * without initializing the upper bits.
3131 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3132 case OP_LOADI1_MEMBASE:
3133 widen_op = OP_ICONV_TO_I1;
3135 case OP_LOADU1_MEMBASE:
3136 widen_op = OP_ICONV_TO_U1;
3138 case OP_LOADI2_MEMBASE:
3139 widen_op = OP_ICONV_TO_I2;
3141 case OP_LOADU2_MEMBASE:
3142 widen_op = OP_ICONV_TO_U2;
3148 if (widen_op != -1) {
3149 int dreg = alloc_preg (cfg);
3152 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3153 widen->type = ins->type;
3164 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3166 MonoInst *args [16];
3168 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3169 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3171 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3175 get_memcpy_method (void)
3177 static MonoMethod *memcpy_method = NULL;
3178 if (!memcpy_method) {
3179 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3181 g_error ("Old corlib found. Install a new one");
3183 return memcpy_method;
3187 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3189 MonoClassField *field;
3190 gpointer iter = NULL;
3192 while ((field = mono_class_get_fields (klass, &iter))) {
3195 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3197 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3198 if (mini_type_is_reference (mono_field_get_type (field))) {
3199 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3200 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3202 MonoClass *field_class = mono_class_from_mono_type (field->type);
3203 if (field_class->has_references)
3204 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3210 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3212 int card_table_shift_bits;
3213 gpointer card_table_mask;
3215 MonoInst *dummy_use;
3216 int nursery_shift_bits;
3217 size_t nursery_size;
3219 if (!cfg->gen_write_barriers)
3222 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3224 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3226 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3229 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3230 wbarrier->sreg1 = ptr->dreg;
3231 wbarrier->sreg2 = value->dreg;
3232 MONO_ADD_INS (cfg->cbb, wbarrier);
3233 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3234 int offset_reg = alloc_preg (cfg);
3238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3239 if (card_table_mask)
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3242 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3243 * IMM's larger than 32bits.
3245 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3246 card_reg = ins->dreg;
3248 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3249 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3251 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3252 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3255 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3259 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3261 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3262 unsigned need_wb = 0;
3267 /*types with references can't have alignment smaller than sizeof(void*) */
3268 if (align < SIZEOF_VOID_P)
3271 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3272 if (size > 32 * SIZEOF_VOID_P)
3275 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3277 /* We don't unroll more than 5 stores to avoid code bloat. */
3278 if (size > 5 * SIZEOF_VOID_P) {
3279 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3280 size += (SIZEOF_VOID_P - 1);
3281 size &= ~(SIZEOF_VOID_P - 1);
3283 EMIT_NEW_ICONST (cfg, iargs [2], size);
3284 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3285 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3289 destreg = iargs [0]->dreg;
3290 srcreg = iargs [1]->dreg;
3293 dest_ptr_reg = alloc_preg (cfg);
3294 tmp_reg = alloc_preg (cfg);
3297 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3299 while (size >= SIZEOF_VOID_P) {
3300 MonoInst *load_inst;
3301 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3302 load_inst->dreg = tmp_reg;
3303 load_inst->inst_basereg = srcreg;
3304 load_inst->inst_offset = offset;
3305 MONO_ADD_INS (cfg->cbb, load_inst);
3307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3310 emit_write_barrier (cfg, iargs [0], load_inst);
3312 offset += SIZEOF_VOID_P;
3313 size -= SIZEOF_VOID_P;
3316 /*tmp += sizeof (void*)*/
3317 if (size >= SIZEOF_VOID_P) {
3318 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3319 MONO_ADD_INS (cfg->cbb, iargs [0]);
3323 /* Those cannot be references since size < sizeof (void*) */
3325 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3326 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3339 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3349 * Emit code to copy a valuetype of type @klass whose address is stored in
3350 * @src->dreg to memory whose address is stored at @dest->dreg.
3353 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3355 MonoInst *iargs [4];
3358 MonoMethod *memcpy_method;
3359 MonoInst *size_ins = NULL;
3360 MonoInst *memcpy_ins = NULL;
3364 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3367 * This check breaks with spilled vars... need to handle it during verification anyway.
3368 * g_assert (klass && klass == src->klass && klass == dest->klass);
3371 if (mini_is_gsharedvt_klass (klass)) {
3373 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3374 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3378 n = mono_class_native_size (klass, &align);
3380 n = mono_class_value_size (klass, &align);
3382 /* if native is true there should be no references in the struct */
3383 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3384 /* Avoid barriers when storing to the stack */
3385 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3386 (dest->opcode == OP_LDADDR))) {
3392 context_used = mini_class_check_context_used (cfg, klass);
3394 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3395 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3397 } else if (context_used) {
3398 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3400 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3401 if (!cfg->compile_aot)
3402 mono_class_compute_gc_descriptor (klass);
3406 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3408 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3413 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3414 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3415 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3420 iargs [2] = size_ins;
3422 EMIT_NEW_ICONST (cfg, iargs [2], n);
3424 memcpy_method = get_memcpy_method ();
3426 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3428 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3433 get_memset_method (void)
3435 static MonoMethod *memset_method = NULL;
3436 if (!memset_method) {
3437 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3439 g_error ("Old corlib found. Install a new one");
3441 return memset_method;
3445 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3447 MonoInst *iargs [3];
3450 MonoMethod *memset_method;
3451 MonoInst *size_ins = NULL;
3452 MonoInst *bzero_ins = NULL;
3453 static MonoMethod *bzero_method;
3455 /* FIXME: Optimize this for the case when dest is an LDADDR */
3456 mono_class_init (klass);
3457 if (mini_is_gsharedvt_klass (klass)) {
3458 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3459 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3461 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3462 g_assert (bzero_method);
3464 iargs [1] = size_ins;
3465 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3469 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3471 n = mono_class_value_size (klass, &align);
3473 if (n <= sizeof (gpointer) * 8) {
3474 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3477 memset_method = get_memset_method ();
3479 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3480 EMIT_NEW_ICONST (cfg, iargs [2], n);
3481 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3488 * Emit IR to return either the this pointer for instance method,
3489 * or the mrgctx for static methods.
3492 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3494 MonoInst *this_ins = NULL;
3496 g_assert (cfg->gshared);
3498 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3499 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3500 !method->klass->valuetype)
3501 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3503 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3504 MonoInst *mrgctx_loc, *mrgctx_var;
3506 g_assert (!this_ins);
3507 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3509 mrgctx_loc = mono_get_vtable_var (cfg);
3510 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3513 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3514 MonoInst *vtable_loc, *vtable_var;
3516 g_assert (!this_ins);
3518 vtable_loc = mono_get_vtable_var (cfg);
3519 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3521 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3522 MonoInst *mrgctx_var = vtable_var;
3525 vtable_reg = alloc_preg (cfg);
3526 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3527 vtable_var->type = STACK_PTR;
3535 vtable_reg = alloc_preg (cfg);
3536 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3541 static MonoJumpInfoRgctxEntry *
3542 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3544 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3545 res->method = method;
3546 res->in_mrgctx = in_mrgctx;
3547 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3548 res->data->type = patch_type;
3549 res->data->data.target = patch_data;
3550 res->info_type = info_type;
3555 static inline MonoInst*
3556 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3558 MonoInst *args [16];
3561 // FIXME: No fastpath since the slot is not a compile time constant
3563 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3564 if (entry->in_mrgctx)
3565 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3567 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3571 * FIXME: This can be called during decompose, which is a problem since it creates
3573 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3575 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3577 MonoBasicBlock *is_null_bb, *end_bb;
3578 MonoInst *res, *ins, *call;
3581 slot = mini_get_rgctx_entry_slot (entry);
3583 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3584 index = MONO_RGCTX_SLOT_INDEX (slot);
3586 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3587 for (depth = 0; ; ++depth) {
3588 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3590 if (index < size - 1)
3595 NEW_BBLOCK (cfg, end_bb);
3596 NEW_BBLOCK (cfg, is_null_bb);
3599 rgctx_reg = rgctx->dreg;
3601 rgctx_reg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3604 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3605 NEW_BBLOCK (cfg, is_null_bb);
3607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3608 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3611 for (i = 0; i < depth; ++i) {
3612 int array_reg = alloc_preg (cfg);
3614 /* load ptr to next array */
3615 if (mrgctx && i == 0)
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3619 rgctx_reg = array_reg;
3620 /* is the ptr null? */
3621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3622 /* if yes, jump to actual trampoline */
3623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3627 val_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3629 /* is the slot null? */
3630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3631 /* if yes, jump to actual trampoline */
3632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3635 res_reg = alloc_preg (cfg);
3636 MONO_INST_NEW (cfg, ins, OP_MOVE);
3637 ins->dreg = res_reg;
3638 ins->sreg1 = val_reg;
3639 MONO_ADD_INS (cfg->cbb, ins);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3644 MONO_START_BB (cfg, is_null_bb);
3646 EMIT_NEW_ICONST (cfg, args [1], index);
3648 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3650 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3651 MONO_INST_NEW (cfg, ins, OP_MOVE);
3652 ins->dreg = res_reg;
3653 ins->sreg1 = call->dreg;
3654 MONO_ADD_INS (cfg->cbb, ins);
3655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3657 MONO_START_BB (cfg, end_bb);
3666 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3669 static inline MonoInst*
3670 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3673 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3675 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3679 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3680 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3682 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3683 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3685 return emit_rgctx_fetch (cfg, rgctx, entry);
3689 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3690 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3692 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3693 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3695 return emit_rgctx_fetch (cfg, rgctx, entry);
3699 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3700 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3702 MonoJumpInfoGSharedVtCall *call_info;
3703 MonoJumpInfoRgctxEntry *entry;
3706 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3707 call_info->sig = sig;
3708 call_info->method = cmethod;
3710 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3711 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3713 return emit_rgctx_fetch (cfg, rgctx, entry);
3717 * emit_get_rgctx_virt_method:
3719 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3722 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3723 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3725 MonoJumpInfoVirtMethod *info;
3726 MonoJumpInfoRgctxEntry *entry;
3729 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3730 info->klass = klass;
3731 info->method = virt_method;
3733 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3734 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3736 return emit_rgctx_fetch (cfg, rgctx, entry);
3740 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3741 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3743 MonoJumpInfoRgctxEntry *entry;
3746 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3747 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3749 return emit_rgctx_fetch (cfg, rgctx, entry);
3753 * emit_get_rgctx_method:
3755 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3756 * normal constants, else emit a load from the rgctx.
3759 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3760 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3762 if (!context_used) {
3765 switch (rgctx_type) {
3766 case MONO_RGCTX_INFO_METHOD:
3767 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3769 case MONO_RGCTX_INFO_METHOD_RGCTX:
3770 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3773 g_assert_not_reached ();
3776 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3779 return emit_rgctx_fetch (cfg, rgctx, entry);
3784 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3785 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3787 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3788 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3790 return emit_rgctx_fetch (cfg, rgctx, entry);
3794 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3796 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3797 MonoRuntimeGenericContextInfoTemplate *template_;
3802 for (i = 0; i < info->num_entries; ++i) {
3803 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3805 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3809 if (info->num_entries == info->count_entries) {
3810 MonoRuntimeGenericContextInfoTemplate *new_entries;
3811 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3813 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3815 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3816 info->entries = new_entries;
3817 info->count_entries = new_count_entries;
3820 idx = info->num_entries;
3821 template_ = &info->entries [idx];
3822 template_->info_type = rgctx_type;
3823 template_->data = data;
3825 info->num_entries ++;
3831 * emit_get_gsharedvt_info:
3833 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3836 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3841 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3842 /* Load info->entries [idx] */
3843 dreg = alloc_preg (cfg);
3844 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3850 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3852 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3856 * On return the caller must check @klass for load errors.
3859 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3861 MonoInst *vtable_arg;
3864 context_used = mini_class_check_context_used (cfg, klass);
3867 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3868 klass, MONO_RGCTX_INFO_VTABLE);
3870 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3874 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3877 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3881 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3882 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3884 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3885 ins->sreg1 = vtable_arg->dreg;
3886 MONO_ADD_INS (cfg->cbb, ins);
3888 static int byte_offset = -1;
3889 static guint8 bitmask;
3890 int bits_reg, inited_reg;
3891 MonoBasicBlock *inited_bb;
3892 MonoInst *args [16];
3894 if (byte_offset < 0)
3895 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3897 bits_reg = alloc_ireg (cfg);
3898 inited_reg = alloc_ireg (cfg);
3900 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3903 NEW_BBLOCK (cfg, inited_bb);
3905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3908 args [0] = vtable_arg;
3909 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3911 MONO_START_BB (cfg, inited_bb);
3916 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3920 if (cfg->gen_seq_points && cfg->method == method) {
3921 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3923 ins->flags |= MONO_INST_NONEMPTY_STACK;
3924 MONO_ADD_INS (cfg->cbb, ins);
3929 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3931 if (mini_get_debug_options ()->better_cast_details) {
3932 int vtable_reg = alloc_preg (cfg);
3933 int klass_reg = alloc_preg (cfg);
3934 MonoBasicBlock *is_null_bb = NULL;
3936 int to_klass_reg, context_used;
3939 NEW_BBLOCK (cfg, is_null_bb);
3941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3945 tls_get = mono_get_jit_tls_intrinsic (cfg);
3947 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3951 MONO_ADD_INS (cfg->cbb, tls_get);
3952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3955 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3957 context_used = mini_class_check_context_used (cfg, klass);
3959 MonoInst *class_ins;
3961 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3962 to_klass_reg = class_ins->dreg;
3964 to_klass_reg = alloc_preg (cfg);
3965 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3970 MONO_START_BB (cfg, is_null_bb);
3975 reset_cast_details (MonoCompile *cfg)
3977 /* Reset the variables holding the cast details */
3978 if (mini_get_debug_options ()->better_cast_details) {
3979 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3981 MONO_ADD_INS (cfg->cbb, tls_get);
3982 /* It is enough to reset the from field */
3983 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3988 * On return the caller must check @array_class for load errors
3991 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3993 int vtable_reg = alloc_preg (cfg);
3996 context_used = mini_class_check_context_used (cfg, array_class);
3998 save_cast_details (cfg, array_class, obj->dreg, FALSE);
4000 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4002 if (cfg->opt & MONO_OPT_SHARED) {
4003 int class_reg = alloc_preg (cfg);
4006 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4007 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
4008 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
4009 } else if (context_used) {
4010 MonoInst *vtable_ins;
4012 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
4013 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
4015 if (cfg->compile_aot) {
4019 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4021 vt_reg = alloc_preg (cfg);
4022 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4023 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4026 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4032 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4034 reset_cast_details (cfg);
4038 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4039 * generic code is generated.
4042 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4044 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4047 MonoInst *rgctx, *addr;
4049 /* FIXME: What if the class is shared? We might not
4050 have to get the address of the method from the
4052 addr = emit_get_rgctx_method (cfg, context_used, method,
4053 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4054 if (cfg->llvm_only) {
4055 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
4056 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4058 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4060 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4063 gboolean pass_vtable, pass_mrgctx;
4064 MonoInst *rgctx_arg = NULL;
4066 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4067 g_assert (!pass_mrgctx);
4070 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4073 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4076 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4081 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4085 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4086 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4087 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4088 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4090 obj_reg = sp [0]->dreg;
4091 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4092 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4094 /* FIXME: generics */
4095 g_assert (klass->rank == 0);
4098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4099 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4105 MonoInst *element_class;
4107 /* This assertion is from the unboxcast insn */
4108 g_assert (klass->rank == 0);
4110 element_class = emit_get_rgctx_klass (cfg, context_used,
4111 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4113 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4114 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4116 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4117 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4118 reset_cast_details (cfg);
4121 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4122 MONO_ADD_INS (cfg->cbb, add);
4123 add->type = STACK_MP;
4130 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4132 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4133 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4137 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4143 args [1] = klass_inst;
4146 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4148 NEW_BBLOCK (cfg, is_ref_bb);
4149 NEW_BBLOCK (cfg, is_nullable_bb);
4150 NEW_BBLOCK (cfg, end_bb);
4151 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4158 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4159 addr_reg = alloc_dreg (cfg, STACK_MP);
4163 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4164 MONO_ADD_INS (cfg->cbb, addr);
4166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4169 MONO_START_BB (cfg, is_ref_bb);
4171 /* Save the ref to a temporary */
4172 dreg = alloc_ireg (cfg);
4173 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4174 addr->dreg = addr_reg;
4175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4179 MONO_START_BB (cfg, is_nullable_bb);
4182 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4183 MonoInst *unbox_call;
4184 MonoMethodSignature *unbox_sig;
4186 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4187 unbox_sig->ret = &klass->byval_arg;
4188 unbox_sig->param_count = 1;
4189 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4192 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4194 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4196 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4197 addr->dreg = addr_reg;
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4203 MONO_START_BB (cfg, end_bb);
4206 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4212 * Returns NULL and set the cfg exception on error.
4215 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4217 MonoInst *iargs [2];
4222 MonoRgctxInfoType rgctx_info;
4223 MonoInst *iargs [2];
4224 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4226 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4228 if (cfg->opt & MONO_OPT_SHARED)
4229 rgctx_info = MONO_RGCTX_INFO_KLASS;
4231 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4232 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4234 if (cfg->opt & MONO_OPT_SHARED) {
4235 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4237 alloc_ftn = ves_icall_object_new;
4240 alloc_ftn = ves_icall_object_new_specific;
4243 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4244 if (known_instance_size) {
4245 int size = mono_class_instance_size (klass);
4246 if (size < sizeof (MonoObject))
4247 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4249 EMIT_NEW_ICONST (cfg, iargs [1], size);
4251 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4254 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4257 if (cfg->opt & MONO_OPT_SHARED) {
4258 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4259 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4261 alloc_ftn = ves_icall_object_new;
4262 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4263 /* This happens often in argument checking code, eg. throw new FooException... */
4264 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4265 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4266 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4268 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4269 MonoMethod *managed_alloc = NULL;
4273 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4274 cfg->exception_ptr = klass;
4278 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4280 if (managed_alloc) {
4281 int size = mono_class_instance_size (klass);
4282 if (size < sizeof (MonoObject))
4283 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4285 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4286 EMIT_NEW_ICONST (cfg, iargs [1], size);
4287 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4289 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4291 guint32 lw = vtable->klass->instance_size;
4292 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4293 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4294 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4297 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4301 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4305 * Returns NULL and set the cfg exception on error.
4308 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4310 MonoInst *alloc, *ins;
4312 if (mono_class_is_nullable (klass)) {
4313 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4316 if (cfg->llvm_only && cfg->gsharedvt) {
4317 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4318 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4319 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4321 /* FIXME: What if the class is shared? We might not
4322 have to get the method address from the RGCTX. */
4323 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4324 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4325 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4327 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4330 gboolean pass_vtable, pass_mrgctx;
4331 MonoInst *rgctx_arg = NULL;
4333 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4334 g_assert (!pass_mrgctx);
4337 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4340 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4343 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4347 if (mini_is_gsharedvt_klass (klass)) {
4348 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4349 MonoInst *res, *is_ref, *src_var, *addr;
4352 dreg = alloc_ireg (cfg);
4354 NEW_BBLOCK (cfg, is_ref_bb);
4355 NEW_BBLOCK (cfg, is_nullable_bb);
4356 NEW_BBLOCK (cfg, end_bb);
4357 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4365 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4368 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4369 ins->opcode = OP_STOREV_MEMBASE;
4371 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4372 res->type = STACK_OBJ;
4374 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4377 MONO_START_BB (cfg, is_ref_bb);
4379 /* val is a vtype, so has to load the value manually */
4380 src_var = get_vreg_to_inst (cfg, val->dreg);
4382 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4383 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4388 MONO_START_BB (cfg, is_nullable_bb);
4391 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4392 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4394 MonoMethodSignature *box_sig;
4397 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4398 * construct that method at JIT time, so have to do things by hand.
4400 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4401 box_sig->ret = &mono_defaults.object_class->byval_arg;
4402 box_sig->param_count = 1;
4403 box_sig->params [0] = &klass->byval_arg;
4406 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4408 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4409 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4410 res->type = STACK_OBJ;
4414 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4416 MONO_START_BB (cfg, end_bb);
4420 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4424 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4430 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4433 MonoGenericContainer *container;
4434 MonoGenericInst *ginst;
4436 if (mono_class_is_ginst (klass)) {
4437 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
4438 ginst = mono_class_get_generic_class (klass)->context.class_inst;
4439 } else if (mono_class_is_gtd (klass) && context_used) {
4440 container = mono_class_get_generic_container (klass);
4441 ginst = container->context.class_inst;
4446 for (i = 0; i < container->type_argc; ++i) {
4448 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4450 type = ginst->type_argv [i];
4451 if (mini_type_is_reference (type))
4457 static GHashTable* direct_icall_type_hash;
4460 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4462 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4463 if (!direct_icalls_enabled (cfg))
4467 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4468 * Whitelist a few icalls for now.
4470 if (!direct_icall_type_hash) {
4471 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4473 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4474 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4475 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4476 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4477 mono_memory_barrier ();
4478 direct_icall_type_hash = h;
4481 if (cmethod->klass == mono_defaults.math_class)
4483 /* No locking needed */
4484 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4490 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4492 if (cmethod->klass == mono_defaults.systemtype_class) {
4493 if (!strcmp (cmethod->name, "GetType"))
4499 #define is_complex_isinst(klass) (mono_class_is_interface (klass) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || mono_class_is_sealed (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4502 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4504 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4505 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4509 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4511 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4514 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4515 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4516 reset_cast_details (cfg);
4522 get_castclass_cache_idx (MonoCompile *cfg)
4524 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4525 cfg->castclass_cache_index ++;
4526 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4531 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4536 args [0] = obj; /* obj */
4537 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4539 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4540 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4542 return emit_isinst_with_cache (cfg, klass, args);
4546 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4555 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4558 idx = get_castclass_cache_idx (cfg);
4559 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4561 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4562 return emit_castclass_with_cache (cfg, klass, args);
4566 * Returns NULL and set the cfg exception on error.
4569 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4571 MonoBasicBlock *is_null_bb;
4572 int obj_reg = src->dreg;
4573 int vtable_reg = alloc_preg (cfg);
4574 MonoInst *klass_inst = NULL;
4576 if (MONO_INS_IS_PCONST_NULL (src))
4582 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4583 MonoInst *cache_ins;
4585 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4590 /* klass - it's the second element of the cache entry*/
4591 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4594 args [2] = cache_ins;
4596 return emit_castclass_with_cache (cfg, klass, args);
4599 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4602 NEW_BBLOCK (cfg, is_null_bb);
4604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4607 save_cast_details (cfg, klass, obj_reg, FALSE);
4609 if (mono_class_is_interface (klass)) {
4610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4611 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4613 int klass_reg = alloc_preg (cfg);
4615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4617 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
4618 /* the remoting code is broken, access the class for now */
4619 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4620 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4622 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4623 cfg->exception_ptr = klass;
4626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4631 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4634 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4638 MONO_START_BB (cfg, is_null_bb);
4640 reset_cast_details (cfg);
4646 * Returns NULL and set the cfg exception on error.
4649 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4652 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4653 int obj_reg = src->dreg;
4654 int vtable_reg = alloc_preg (cfg);
4655 int res_reg = alloc_ireg_ref (cfg);
4656 MonoInst *klass_inst = NULL;
4661 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4662 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4664 args [0] = src; /* obj */
4666 /* klass - it's the second element of the cache entry*/
4667 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4669 args [2] = cache_ins; /* cache */
4670 return emit_isinst_with_cache (cfg, klass, args);
4673 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4676 NEW_BBLOCK (cfg, is_null_bb);
4677 NEW_BBLOCK (cfg, false_bb);
4678 NEW_BBLOCK (cfg, end_bb);
4680 /* Do the assignment at the beginning, so the other assignment can be if converted */
4681 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4682 ins->type = STACK_OBJ;
4685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4690 if (mono_class_is_interface (klass)) {
4691 g_assert (!context_used);
4692 /* the is_null_bb target simply copies the input register to the output */
4693 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4695 int klass_reg = alloc_preg (cfg);
4698 int rank_reg = alloc_preg (cfg);
4699 int eclass_reg = alloc_preg (cfg);
4701 g_assert (!context_used);
4702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4707 if (klass->cast_class == mono_defaults.object_class) {
4708 int parent_reg = alloc_preg (cfg);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4710 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4711 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4713 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4714 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4715 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4716 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4717 } else if (klass->cast_class == mono_defaults.enum_class) {
4718 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4720 } else if (mono_class_is_interface (klass->cast_class)) {
4721 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4723 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4724 /* Check that the object is a vector too */
4725 int bounds_reg = alloc_preg (cfg);
4726 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4731 /* the is_null_bb target simply copies the input register to the output */
4732 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4734 } else if (mono_class_is_nullable (klass)) {
4735 g_assert (!context_used);
4736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4737 /* the is_null_bb target simply copies the input register to the output */
4738 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4740 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && mono_class_is_sealed (klass)) {
4741 g_assert (!context_used);
4742 /* the remoting code is broken, access the class for now */
4743 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4744 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4746 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4747 cfg->exception_ptr = klass;
4750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4756 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4759 /* the is_null_bb target simply copies the input register to the output */
4760 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4765 MONO_START_BB (cfg, false_bb);
4767 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4770 MONO_START_BB (cfg, is_null_bb);
4772 MONO_START_BB (cfg, end_bb);
4778 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4780 /* This opcode takes as input an object reference and a class, and returns:
4781 0) if the object is an instance of the class,
4782 1) if the object is not instance of the class,
4783 2) if the object is a proxy whose type cannot be determined */
4786 #ifndef DISABLE_REMOTING
4787 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4789 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4791 int obj_reg = src->dreg;
4792 int dreg = alloc_ireg (cfg);
4794 #ifndef DISABLE_REMOTING
4795 int klass_reg = alloc_preg (cfg);
4798 NEW_BBLOCK (cfg, true_bb);
4799 NEW_BBLOCK (cfg, false_bb);
4800 NEW_BBLOCK (cfg, end_bb);
4801 #ifndef DISABLE_REMOTING
4802 NEW_BBLOCK (cfg, false2_bb);
4803 NEW_BBLOCK (cfg, no_proxy_bb);
4806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4807 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4809 if (mono_class_is_interface (klass)) {
4810 #ifndef DISABLE_REMOTING
4811 NEW_BBLOCK (cfg, interface_fail_bb);
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4816 #ifndef DISABLE_REMOTING
4817 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4818 MONO_START_BB (cfg, interface_fail_bb);
4819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4821 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4823 tmp_reg = alloc_preg (cfg);
4824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4826 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4828 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4831 #ifndef DISABLE_REMOTING
4832 tmp_reg = alloc_preg (cfg);
4833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4834 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4836 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4837 tmp_reg = alloc_preg (cfg);
4838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4841 tmp_reg = alloc_preg (cfg);
4842 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4846 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4847 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4849 MONO_START_BB (cfg, no_proxy_bb);
4851 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4853 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4857 MONO_START_BB (cfg, false_bb);
4859 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4860 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4862 #ifndef DISABLE_REMOTING
4863 MONO_START_BB (cfg, false2_bb);
4865 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4869 MONO_START_BB (cfg, true_bb);
4871 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4873 MONO_START_BB (cfg, end_bb);
4876 MONO_INST_NEW (cfg, ins, OP_ICONST);
4878 ins->type = STACK_I4;
4884 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4886 /* This opcode takes as input an object reference and a class, and returns:
4887 0) if the object is an instance of the class,
4888 1) if the object is a proxy whose type cannot be determined
4889 an InvalidCastException exception is thrown otherwhise*/
4892 #ifndef DISABLE_REMOTING
4893 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4895 MonoBasicBlock *ok_result_bb;
4897 int obj_reg = src->dreg;
4898 int dreg = alloc_ireg (cfg);
4899 int tmp_reg = alloc_preg (cfg);
4901 #ifndef DISABLE_REMOTING
4902 int klass_reg = alloc_preg (cfg);
4903 NEW_BBLOCK (cfg, end_bb);
4906 NEW_BBLOCK (cfg, ok_result_bb);
4908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4911 save_cast_details (cfg, klass, obj_reg, FALSE);
4913 if (mono_class_is_interface (klass)) {
4914 #ifndef DISABLE_REMOTING
4915 NEW_BBLOCK (cfg, interface_fail_bb);
4917 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4918 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4919 MONO_START_BB (cfg, interface_fail_bb);
4920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4922 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4924 tmp_reg = alloc_preg (cfg);
4925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4927 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4929 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4932 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4933 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4937 #ifndef DISABLE_REMOTING
4938 NEW_BBLOCK (cfg, no_proxy_bb);
4940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4942 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4944 tmp_reg = alloc_preg (cfg);
4945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4946 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4948 tmp_reg = alloc_preg (cfg);
4949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4951 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4953 NEW_BBLOCK (cfg, fail_1_bb);
4955 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4957 MONO_START_BB (cfg, fail_1_bb);
4959 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4962 MONO_START_BB (cfg, no_proxy_bb);
4964 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4966 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4970 MONO_START_BB (cfg, ok_result_bb);
4972 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4974 #ifndef DISABLE_REMOTING
4975 MONO_START_BB (cfg, end_bb);
4979 MONO_INST_NEW (cfg, ins, OP_ICONST);
4981 ins->type = STACK_I4;
4986 static G_GNUC_UNUSED MonoInst*
4987 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4989 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4990 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4993 switch (enum_type->type) {
4996 #if SIZEOF_REGISTER == 8
5008 MonoInst *load, *and_, *cmp, *ceq;
5009 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5010 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5011 int dest_reg = alloc_ireg (cfg);
5013 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
5014 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
5015 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
5016 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
5018 ceq->type = STACK_I4;
5021 load = mono_decompose_opcode (cfg, load);
5022 and_ = mono_decompose_opcode (cfg, and_);
5023 cmp = mono_decompose_opcode (cfg, cmp);
5024 ceq = mono_decompose_opcode (cfg, ceq);
5032 * Returns NULL and set the cfg exception on error.
5034 static G_GNUC_UNUSED MonoInst*
5035 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5039 gpointer trampoline;
5040 MonoInst *obj, *method_ins, *tramp_ins;
5044 if (virtual_ && !cfg->llvm_only) {
5045 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5048 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5052 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5056 /* Inline the contents of mono_delegate_ctor */
5058 /* Set target field */
5059 /* Optimize away setting of NULL target */
5060 if (!MONO_INS_IS_PCONST_NULL (target)) {
5061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5062 if (cfg->gen_write_barriers) {
5063 dreg = alloc_preg (cfg);
5064 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5065 emit_write_barrier (cfg, ptr, target);
5069 /* Set method field */
5070 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5071 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5074 * To avoid looking up the compiled code belonging to the target method
5075 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5076 * store it, and we fill it after the method has been compiled.
5078 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5079 MonoInst *code_slot_ins;
5082 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5084 domain = mono_domain_get ();
5085 mono_domain_lock (domain);
5086 if (!domain_jit_info (domain)->method_code_hash)
5087 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5088 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5090 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5091 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5093 mono_domain_unlock (domain);
5095 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5100 if (cfg->llvm_only) {
5101 MonoInst *args [16];
5106 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5107 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5110 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5116 if (cfg->compile_aot) {
5117 MonoDelegateClassMethodPair *del_tramp;
5119 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5120 del_tramp->klass = klass;
5121 del_tramp->method = context_used ? NULL : method;
5122 del_tramp->is_virtual = virtual_;
5123 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5126 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5128 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5129 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5132 /* Set invoke_impl field */
5134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5136 dreg = alloc_preg (cfg);
5137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5138 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5140 dreg = alloc_preg (cfg);
5141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5142 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5145 dreg = alloc_preg (cfg);
5146 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5149 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5155 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5157 MonoJitICallInfo *info;
5159 /* Need to register the icall so it gets an icall wrapper */
5160 info = mono_get_array_new_va_icall (rank);
5162 cfg->flags |= MONO_CFG_HAS_VARARGS;
5164 /* mono_array_new_va () needs a vararg calling convention */
5165 cfg->exception_message = g_strdup ("array-new");
5166 cfg->disable_llvm = TRUE;
5168 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5169 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5173 * handle_constrained_gsharedvt_call:
5175 * Handle constrained calls where the receiver is a gsharedvt type.
5176 * Return the instruction representing the call. Set the cfg exception on failure.
5179 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5180 gboolean *ref_emit_widen)
5182 MonoInst *ins = NULL;
5183 gboolean emit_widen = *ref_emit_widen;
5186 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5187 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5188 * pack the arguments into an array, and do the rest of the work in in an icall.
5190 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5191 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5192 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5193 MonoInst *args [16];
5196 * This case handles calls to
5197 * - object:ToString()/Equals()/GetHashCode(),
5198 * - System.IComparable<T>:CompareTo()
5199 * - System.IEquatable<T>:Equals ()
5200 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5204 if (mono_method_check_context_used (cmethod))
5205 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5207 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5208 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5210 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5211 if (fsig->hasthis && fsig->param_count) {
5212 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5213 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5214 ins->dreg = alloc_preg (cfg);
5215 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5216 MONO_ADD_INS (cfg->cbb, ins);
5219 if (mini_is_gsharedvt_type (fsig->params [0])) {
5220 int addr_reg, deref_arg_reg;
5222 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5223 deref_arg_reg = alloc_preg (cfg);
5224 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5225 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5227 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5228 addr_reg = ins->dreg;
5229 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5231 EMIT_NEW_ICONST (cfg, args [3], 0);
5232 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5235 EMIT_NEW_ICONST (cfg, args [3], 0);
5236 EMIT_NEW_ICONST (cfg, args [4], 0);
5238 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5241 if (mini_is_gsharedvt_type (fsig->ret)) {
5242 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5243 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5247 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5248 MONO_ADD_INS (cfg->cbb, add);
5250 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5251 MONO_ADD_INS (cfg->cbb, ins);
5252 /* ins represents the call result */
5255 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5258 *ref_emit_widen = emit_widen;
5267 mono_emit_load_got_addr (MonoCompile *cfg)
5269 MonoInst *getaddr, *dummy_use;
5271 if (!cfg->got_var || cfg->got_var_allocated)
5274 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5275 getaddr->cil_code = cfg->header->code;
5276 getaddr->dreg = cfg->got_var->dreg;
5278 /* Add it to the start of the first bblock */
5279 if (cfg->bb_entry->code) {
5280 getaddr->next = cfg->bb_entry->code;
5281 cfg->bb_entry->code = getaddr;
5284 MONO_ADD_INS (cfg->bb_entry, getaddr);
5286 cfg->got_var_allocated = TRUE;
5289 * Add a dummy use to keep the got_var alive, since real uses might
5290 * only be generated by the back ends.
5291 * Add it to end_bblock, so the variable's lifetime covers the whole
5293 * It would be better to make the usage of the got var explicit in all
5294 * cases when the backend needs it (i.e. calls, throw etc.), so this
5295 * wouldn't be needed.
5297 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5298 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5301 static int inline_limit;
5302 static gboolean inline_limit_inited;
5305 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5307 MonoMethodHeaderSummary header;
5309 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5310 MonoMethodSignature *sig = mono_method_signature (method);
5314 if (cfg->disable_inline)
5319 if (cfg->inline_depth > 10)
5322 if (!mono_method_get_header_summary (method, &header))
5325 /*runtime, icall and pinvoke are checked by summary call*/
5326 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5327 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5328 (mono_class_is_marshalbyref (method->klass)) ||
5332 /* also consider num_locals? */
5333 /* Do the size check early to avoid creating vtables */
5334 if (!inline_limit_inited) {
5335 if (g_getenv ("MONO_INLINELIMIT"))
5336 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5338 inline_limit = INLINE_LENGTH_LIMIT;
5339 inline_limit_inited = TRUE;
5341 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5345 * if we can initialize the class of the method right away, we do,
5346 * otherwise we don't allow inlining if the class needs initialization,
5347 * since it would mean inserting a call to mono_runtime_class_init()
5348 * inside the inlined code
5350 if (!(cfg->opt & MONO_OPT_SHARED)) {
5351 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5352 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5353 vtable = mono_class_vtable (cfg->domain, method->klass);
5356 if (!cfg->compile_aot) {
5358 if (!mono_runtime_class_init_full (vtable, &error)) {
5359 mono_error_cleanup (&error);
5363 } else if (mono_class_is_before_field_init (method->klass)) {
5364 if (cfg->run_cctors && method->klass->has_cctor) {
5365 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5366 if (!method->klass->runtime_info)
5367 /* No vtable created yet */
5369 vtable = mono_class_vtable (cfg->domain, method->klass);
5372 /* This makes so that inline cannot trigger */
5373 /* .cctors: too many apps depend on them */
5374 /* running with a specific order... */
5375 if (! vtable->initialized)
5378 if (!mono_runtime_class_init_full (vtable, &error)) {
5379 mono_error_cleanup (&error);
5383 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5384 if (!method->klass->runtime_info)
5385 /* No vtable created yet */
5387 vtable = mono_class_vtable (cfg->domain, method->klass);
5390 if (!vtable->initialized)
5395 * If we're compiling for shared code
5396 * the cctor will need to be run at aot method load time, for example,
5397 * or at the end of the compilation of the inlining method.
5399 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
5403 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5404 if (mono_arch_is_soft_float ()) {
5406 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5408 for (i = 0; i < sig->param_count; ++i)
5409 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5414 if (g_list_find (cfg->dont_inline, method))
5421 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5423 if (!cfg->compile_aot) {
5425 if (vtable->initialized)
5429 if (mono_class_is_before_field_init (klass)) {
5430 if (cfg->method == method)
5434 if (!mono_class_needs_cctor_run (klass, method))
5437 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5438 /* The initialization is already done before the method is called */
5445 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5449 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5452 if (mini_is_gsharedvt_variable_klass (klass)) {
5455 mono_class_init (klass);
5456 size = mono_class_array_element_size (klass);
5459 mult_reg = alloc_preg (cfg);
5460 array_reg = arr->dreg;
5461 index_reg = index->dreg;
5463 #if SIZEOF_REGISTER == 8
5464 /* The array reg is 64 bits but the index reg is only 32 */
5465 if (COMPILE_LLVM (cfg)) {
5467 index2_reg = index_reg;
5469 index2_reg = alloc_preg (cfg);
5470 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5473 if (index->type == STACK_I8) {
5474 index2_reg = alloc_preg (cfg);
5475 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5477 index2_reg = index_reg;
5482 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5484 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5485 if (size == 1 || size == 2 || size == 4 || size == 8) {
5486 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5488 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5489 ins->klass = mono_class_get_element_class (klass);
5490 ins->type = STACK_MP;
5496 add_reg = alloc_ireg_mp (cfg);
5499 MonoInst *rgctx_ins;
5502 g_assert (cfg->gshared);
5503 context_used = mini_class_check_context_used (cfg, klass);
5504 g_assert (context_used);
5505 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5506 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5510 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5511 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5512 ins->klass = mono_class_get_element_class (klass);
5513 ins->type = STACK_MP;
5514 MONO_ADD_INS (cfg->cbb, ins);
5520 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5522 int bounds_reg = alloc_preg (cfg);
5523 int add_reg = alloc_ireg_mp (cfg);
5524 int mult_reg = alloc_preg (cfg);
5525 int mult2_reg = alloc_preg (cfg);
5526 int low1_reg = alloc_preg (cfg);
5527 int low2_reg = alloc_preg (cfg);
5528 int high1_reg = alloc_preg (cfg);
5529 int high2_reg = alloc_preg (cfg);
5530 int realidx1_reg = alloc_preg (cfg);
5531 int realidx2_reg = alloc_preg (cfg);
5532 int sum_reg = alloc_preg (cfg);
5533 int index1, index2, tmpreg;
5537 mono_class_init (klass);
5538 size = mono_class_array_element_size (klass);
5540 index1 = index_ins1->dreg;
5541 index2 = index_ins2->dreg;
5543 #if SIZEOF_REGISTER == 8
5544 /* The array reg is 64 bits but the index reg is only 32 */
5545 if (COMPILE_LLVM (cfg)) {
5548 tmpreg = alloc_preg (cfg);
5549 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5551 tmpreg = alloc_preg (cfg);
5552 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5556 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5560 /* range checking */
5561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5562 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5565 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5566 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5568 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5570 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5573 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5574 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5576 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5577 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5578 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5580 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5581 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5583 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5584 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5586 ins->type = STACK_MP;
5588 MONO_ADD_INS (cfg->cbb, ins);
5594 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5598 MonoMethod *addr_method;
5600 MonoClass *eclass = cmethod->klass->element_class;
5602 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5605 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5607 /* emit_ldelema_2 depends on OP_LMUL */
5608 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5609 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5612 if (mini_is_gsharedvt_variable_klass (eclass))
5615 element_size = mono_class_array_element_size (eclass);
5616 addr_method = mono_marshal_get_array_address (rank, element_size);
5617 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5622 static MonoBreakPolicy
5623 always_insert_breakpoint (MonoMethod *method)
5625 return MONO_BREAK_POLICY_ALWAYS;
5628 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5631 * mono_set_break_policy:
5632 * policy_callback: the new callback function
5634 * Allow embedders to decide wherther to actually obey breakpoint instructions
5635 * (both break IL instructions and Debugger.Break () method calls), for example
5636 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5637 * untrusted or semi-trusted code.
5639 * @policy_callback will be called every time a break point instruction needs to
5640 * be inserted with the method argument being the method that calls Debugger.Break()
5641 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5642 * if it wants the breakpoint to not be effective in the given method.
5643 * #MONO_BREAK_POLICY_ALWAYS is the default.
5646 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5648 if (policy_callback)
5649 break_policy_func = policy_callback;
5651 break_policy_func = always_insert_breakpoint;
5655 should_insert_brekpoint (MonoMethod *method) {
5656 switch (break_policy_func (method)) {
5657 case MONO_BREAK_POLICY_ALWAYS:
5659 case MONO_BREAK_POLICY_NEVER:
5661 case MONO_BREAK_POLICY_ON_DBG:
5662 g_warning ("mdb no longer supported");
5665 g_warning ("Incorrect value returned from break policy callback");
5670 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5672 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5674 MonoInst *addr, *store, *load;
5675 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5677 /* the bounds check is already done by the callers */
5678 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5680 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5681 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5682 if (mini_type_is_reference (&eklass->byval_arg))
5683 emit_write_barrier (cfg, addr, load);
5685 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5686 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5693 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5695 return mini_type_is_reference (&klass->byval_arg);
5699 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5701 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5702 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
5703 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5704 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5705 MonoInst *iargs [3];
5708 mono_class_setup_vtable (obj_array);
5709 g_assert (helper->slot);
5711 if (sp [0]->type != STACK_OBJ)
5713 if (sp [2]->type != STACK_OBJ)
5720 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5724 if (mini_is_gsharedvt_variable_klass (klass)) {
5727 // FIXME-VT: OP_ICONST optimization
5728 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5729 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5730 ins->opcode = OP_STOREV_MEMBASE;
5731 } else if (sp [1]->opcode == OP_ICONST) {
5732 int array_reg = sp [0]->dreg;
5733 int index_reg = sp [1]->dreg;
5734 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5736 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5737 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5740 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5741 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5743 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5744 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5745 if (generic_class_is_reference_type (cfg, klass))
5746 emit_write_barrier (cfg, addr, sp [2]);
5753 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5758 eklass = mono_class_from_mono_type (fsig->params [2]);
5760 eklass = mono_class_from_mono_type (fsig->ret);
5763 return emit_array_store (cfg, eklass, args, FALSE);
5765 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5766 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5772 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5775 int param_size, return_size;
5777 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5778 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5780 if (cfg->verbose_level > 3)
5781 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5783 //Don't allow mixing reference types with value types
5784 if (param_klass->valuetype != return_klass->valuetype) {
5785 if (cfg->verbose_level > 3)
5786 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5790 if (!param_klass->valuetype) {
5791 if (cfg->verbose_level > 3)
5792 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5797 if (param_klass->has_references || return_klass->has_references)
5800 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5801 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5802 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5803 if (cfg->verbose_level > 3)
5804 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5808 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5809 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5810 if (cfg->verbose_level > 3)
5811 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5815 param_size = mono_class_value_size (param_klass, &align);
5816 return_size = mono_class_value_size (return_klass, &align);
5818 //We can do it if sizes match
5819 if (param_size == return_size) {
5820 if (cfg->verbose_level > 3)
5821 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5825 //No simple way to handle struct if sizes don't match
5826 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5827 if (cfg->verbose_level > 3)
5828 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5833 * Same reg size category.
5834 * A quick note on why we don't require widening here.
5835 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5837 * Since the source value comes from a function argument, the JIT will already have
5838 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5840 if (param_size <= 4 && return_size <= 4) {
5841 if (cfg->verbose_level > 3)
5842 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5850 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5852 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5853 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5855 if (mini_is_gsharedvt_variable_type (fsig->ret))
5858 //Valuetypes that are semantically equivalent or numbers than can be widened to
5859 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5862 //Arrays of valuetypes that are semantically equivalent
5863 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5870 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5872 #ifdef MONO_ARCH_SIMD_INTRINSICS
5873 MonoInst *ins = NULL;
5875 if (cfg->opt & MONO_OPT_SIMD) {
5876 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5882 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5886 emit_memory_barrier (MonoCompile *cfg, int kind)
5888 MonoInst *ins = NULL;
5889 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5890 MONO_ADD_INS (cfg->cbb, ins);
5891 ins->backend.memory_barrier_kind = kind;
5897 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5899 MonoInst *ins = NULL;
5902 /* The LLVM backend supports these intrinsics */
5903 if (cmethod->klass == mono_defaults.math_class) {
5904 if (strcmp (cmethod->name, "Sin") == 0) {
5906 } else if (strcmp (cmethod->name, "Cos") == 0) {
5908 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5910 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5914 if (opcode && fsig->param_count == 1) {
5915 MONO_INST_NEW (cfg, ins, opcode);
5916 ins->type = STACK_R8;
5917 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5918 ins->sreg1 = args [0]->dreg;
5919 MONO_ADD_INS (cfg->cbb, ins);
5923 if (cfg->opt & MONO_OPT_CMOV) {
5924 if (strcmp (cmethod->name, "Min") == 0) {
5925 if (fsig->params [0]->type == MONO_TYPE_I4)
5927 if (fsig->params [0]->type == MONO_TYPE_U4)
5928 opcode = OP_IMIN_UN;
5929 else if (fsig->params [0]->type == MONO_TYPE_I8)
5931 else if (fsig->params [0]->type == MONO_TYPE_U8)
5932 opcode = OP_LMIN_UN;
5933 } else if (strcmp (cmethod->name, "Max") == 0) {
5934 if (fsig->params [0]->type == MONO_TYPE_I4)
5936 if (fsig->params [0]->type == MONO_TYPE_U4)
5937 opcode = OP_IMAX_UN;
5938 else if (fsig->params [0]->type == MONO_TYPE_I8)
5940 else if (fsig->params [0]->type == MONO_TYPE_U8)
5941 opcode = OP_LMAX_UN;
5945 if (opcode && fsig->param_count == 2) {
5946 MONO_INST_NEW (cfg, ins, opcode);
5947 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5948 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5949 ins->sreg1 = args [0]->dreg;
5950 ins->sreg2 = args [1]->dreg;
5951 MONO_ADD_INS (cfg->cbb, ins);
5959 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5961 if (cmethod->klass == mono_defaults.array_class) {
5962 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5963 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5964 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5965 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5966 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5967 return emit_array_unsafe_mov (cfg, fsig, args);
5974 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5976 MonoInst *ins = NULL;
5978 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5980 if (cmethod->klass == mono_defaults.string_class) {
5981 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5982 int dreg = alloc_ireg (cfg);
5983 int index_reg = alloc_preg (cfg);
5984 int add_reg = alloc_preg (cfg);
5986 #if SIZEOF_REGISTER == 8
5987 if (COMPILE_LLVM (cfg)) {
5988 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5990 /* The array reg is 64 bits but the index reg is only 32 */
5991 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5994 index_reg = args [1]->dreg;
5996 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5998 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5999 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
6000 add_reg = ins->dreg;
6001 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6004 int mult_reg = alloc_preg (cfg);
6005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
6006 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
6007 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6008 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
6010 type_from_op (cfg, ins, NULL, NULL);
6012 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6013 int dreg = alloc_ireg (cfg);
6014 /* Decompose later to allow more optimizations */
6015 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
6016 ins->type = STACK_I4;
6017 ins->flags |= MONO_INST_FAULT;
6018 cfg->cbb->has_array_access = TRUE;
6019 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
6024 } else if (cmethod->klass == mono_defaults.object_class) {
6025 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
6026 int dreg = alloc_ireg_ref (cfg);
6027 int vt_reg = alloc_preg (cfg);
6028 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6029 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6030 type_from_op (cfg, ins, NULL, NULL);
6033 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6034 int dreg = alloc_ireg (cfg);
6035 int t1 = alloc_ireg (cfg);
6037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6038 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6039 ins->type = STACK_I4;
6042 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6043 MONO_INST_NEW (cfg, ins, OP_NOP);
6044 MONO_ADD_INS (cfg->cbb, ins);
6048 } else if (cmethod->klass == mono_defaults.array_class) {
6049 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6050 return emit_array_generic_access (cfg, fsig, args, FALSE);
6051 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6052 return emit_array_generic_access (cfg, fsig, args, TRUE);
6054 #ifndef MONO_BIG_ARRAYS
6056 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6059 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6060 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6061 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6062 int dreg = alloc_ireg (cfg);
6063 int bounds_reg = alloc_ireg_mp (cfg);
6064 MonoBasicBlock *end_bb, *szarray_bb;
6065 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6067 NEW_BBLOCK (cfg, end_bb);
6068 NEW_BBLOCK (cfg, szarray_bb);
6070 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6071 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6073 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6074 /* Non-szarray case */
6076 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6077 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6080 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6081 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6082 MONO_START_BB (cfg, szarray_bb);
6085 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6086 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6088 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6089 MONO_START_BB (cfg, end_bb);
6091 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6092 ins->type = STACK_I4;
6098 if (cmethod->name [0] != 'g')
6101 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6102 int dreg = alloc_ireg (cfg);
6103 int vtable_reg = alloc_preg (cfg);
6104 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6105 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6106 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6107 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6108 type_from_op (cfg, ins, NULL, NULL);
6111 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6112 int dreg = alloc_ireg (cfg);
6114 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6115 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6116 type_from_op (cfg, ins, NULL, NULL);
6121 } else if (cmethod->klass == runtime_helpers_class) {
6122 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6123 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6127 } else if (cmethod->klass == mono_defaults.monitor_class) {
6128 gboolean is_enter = FALSE;
6129 gboolean is_v4 = FALSE;
6131 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
6135 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
6140 * To make async stack traces work, icalls which can block should have a wrapper.
6141 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6143 MonoBasicBlock *end_bb;
6145 NEW_BBLOCK (cfg, end_bb);
6147 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6150 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6151 MONO_START_BB (cfg, end_bb);
6154 } else if (cmethod->klass == mono_defaults.thread_class) {
6155 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6156 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6157 MONO_ADD_INS (cfg->cbb, ins);
6159 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6160 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6161 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6163 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6165 if (fsig->params [0]->type == MONO_TYPE_I1)
6166 opcode = OP_LOADI1_MEMBASE;
6167 else if (fsig->params [0]->type == MONO_TYPE_U1)
6168 opcode = OP_LOADU1_MEMBASE;
6169 else if (fsig->params [0]->type == MONO_TYPE_I2)
6170 opcode = OP_LOADI2_MEMBASE;
6171 else if (fsig->params [0]->type == MONO_TYPE_U2)
6172 opcode = OP_LOADU2_MEMBASE;
6173 else if (fsig->params [0]->type == MONO_TYPE_I4)
6174 opcode = OP_LOADI4_MEMBASE;
6175 else if (fsig->params [0]->type == MONO_TYPE_U4)
6176 opcode = OP_LOADU4_MEMBASE;
6177 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6178 opcode = OP_LOADI8_MEMBASE;
6179 else if (fsig->params [0]->type == MONO_TYPE_R4)
6180 opcode = OP_LOADR4_MEMBASE;
6181 else if (fsig->params [0]->type == MONO_TYPE_R8)
6182 opcode = OP_LOADR8_MEMBASE;
6183 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6184 opcode = OP_LOAD_MEMBASE;
6187 MONO_INST_NEW (cfg, ins, opcode);
6188 ins->inst_basereg = args [0]->dreg;
6189 ins->inst_offset = 0;
6190 MONO_ADD_INS (cfg->cbb, ins);
6192 switch (fsig->params [0]->type) {
6199 ins->dreg = mono_alloc_ireg (cfg);
6200 ins->type = STACK_I4;
6204 ins->dreg = mono_alloc_lreg (cfg);
6205 ins->type = STACK_I8;
6209 ins->dreg = mono_alloc_ireg (cfg);
6210 #if SIZEOF_REGISTER == 8
6211 ins->type = STACK_I8;
6213 ins->type = STACK_I4;
6218 ins->dreg = mono_alloc_freg (cfg);
6219 ins->type = STACK_R8;
6222 g_assert (mini_type_is_reference (fsig->params [0]));
6223 ins->dreg = mono_alloc_ireg_ref (cfg);
6224 ins->type = STACK_OBJ;
6228 if (opcode == OP_LOADI8_MEMBASE)
6229 ins = mono_decompose_opcode (cfg, ins);
6231 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6235 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6237 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6239 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6240 opcode = OP_STOREI1_MEMBASE_REG;
6241 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6242 opcode = OP_STOREI2_MEMBASE_REG;
6243 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6244 opcode = OP_STOREI4_MEMBASE_REG;
6245 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6246 opcode = OP_STOREI8_MEMBASE_REG;
6247 else if (fsig->params [0]->type == MONO_TYPE_R4)
6248 opcode = OP_STORER4_MEMBASE_REG;
6249 else if (fsig->params [0]->type == MONO_TYPE_R8)
6250 opcode = OP_STORER8_MEMBASE_REG;
6251 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6252 opcode = OP_STORE_MEMBASE_REG;
6255 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6257 MONO_INST_NEW (cfg, ins, opcode);
6258 ins->sreg1 = args [1]->dreg;
6259 ins->inst_destbasereg = args [0]->dreg;
6260 ins->inst_offset = 0;
6261 MONO_ADD_INS (cfg->cbb, ins);
6263 if (opcode == OP_STOREI8_MEMBASE_REG)
6264 ins = mono_decompose_opcode (cfg, ins);
6269 } else if (cmethod->klass->image == mono_defaults.corlib &&
6270 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6271 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6274 #if SIZEOF_REGISTER == 8
6275 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6276 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6277 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6278 ins->dreg = mono_alloc_preg (cfg);
6279 ins->sreg1 = args [0]->dreg;
6280 ins->type = STACK_I8;
6281 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6282 MONO_ADD_INS (cfg->cbb, ins);
6286 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6288 /* 64 bit reads are already atomic */
6289 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6290 load_ins->dreg = mono_alloc_preg (cfg);
6291 load_ins->inst_basereg = args [0]->dreg;
6292 load_ins->inst_offset = 0;
6293 load_ins->type = STACK_I8;
6294 MONO_ADD_INS (cfg->cbb, load_ins);
6296 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6303 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6304 MonoInst *ins_iconst;
6307 if (fsig->params [0]->type == MONO_TYPE_I4) {
6308 opcode = OP_ATOMIC_ADD_I4;
6309 cfg->has_atomic_add_i4 = TRUE;
6311 #if SIZEOF_REGISTER == 8
6312 else if (fsig->params [0]->type == MONO_TYPE_I8)
6313 opcode = OP_ATOMIC_ADD_I8;
6316 if (!mono_arch_opcode_supported (opcode))
6318 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6319 ins_iconst->inst_c0 = 1;
6320 ins_iconst->dreg = mono_alloc_ireg (cfg);
6321 MONO_ADD_INS (cfg->cbb, ins_iconst);
6323 MONO_INST_NEW (cfg, ins, opcode);
6324 ins->dreg = mono_alloc_ireg (cfg);
6325 ins->inst_basereg = args [0]->dreg;
6326 ins->inst_offset = 0;
6327 ins->sreg2 = ins_iconst->dreg;
6328 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6329 MONO_ADD_INS (cfg->cbb, ins);
6331 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6332 MonoInst *ins_iconst;
6335 if (fsig->params [0]->type == MONO_TYPE_I4) {
6336 opcode = OP_ATOMIC_ADD_I4;
6337 cfg->has_atomic_add_i4 = TRUE;
6339 #if SIZEOF_REGISTER == 8
6340 else if (fsig->params [0]->type == MONO_TYPE_I8)
6341 opcode = OP_ATOMIC_ADD_I8;
6344 if (!mono_arch_opcode_supported (opcode))
6346 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6347 ins_iconst->inst_c0 = -1;
6348 ins_iconst->dreg = mono_alloc_ireg (cfg);
6349 MONO_ADD_INS (cfg->cbb, ins_iconst);
6351 MONO_INST_NEW (cfg, ins, opcode);
6352 ins->dreg = mono_alloc_ireg (cfg);
6353 ins->inst_basereg = args [0]->dreg;
6354 ins->inst_offset = 0;
6355 ins->sreg2 = ins_iconst->dreg;
6356 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6357 MONO_ADD_INS (cfg->cbb, ins);
6359 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6362 if (fsig->params [0]->type == MONO_TYPE_I4) {
6363 opcode = OP_ATOMIC_ADD_I4;
6364 cfg->has_atomic_add_i4 = TRUE;
6366 #if SIZEOF_REGISTER == 8
6367 else if (fsig->params [0]->type == MONO_TYPE_I8)
6368 opcode = OP_ATOMIC_ADD_I8;
6371 if (!mono_arch_opcode_supported (opcode))
6373 MONO_INST_NEW (cfg, ins, opcode);
6374 ins->dreg = mono_alloc_ireg (cfg);
6375 ins->inst_basereg = args [0]->dreg;
6376 ins->inst_offset = 0;
6377 ins->sreg2 = args [1]->dreg;
6378 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6379 MONO_ADD_INS (cfg->cbb, ins);
6382 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6383 MonoInst *f2i = NULL, *i2f;
6384 guint32 opcode, f2i_opcode, i2f_opcode;
6385 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6386 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6388 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6389 fsig->params [0]->type == MONO_TYPE_R4) {
6390 opcode = OP_ATOMIC_EXCHANGE_I4;
6391 f2i_opcode = OP_MOVE_F_TO_I4;
6392 i2f_opcode = OP_MOVE_I4_TO_F;
6393 cfg->has_atomic_exchange_i4 = TRUE;
6395 #if SIZEOF_REGISTER == 8
6397 fsig->params [0]->type == MONO_TYPE_I8 ||
6398 fsig->params [0]->type == MONO_TYPE_R8 ||
6399 fsig->params [0]->type == MONO_TYPE_I) {
6400 opcode = OP_ATOMIC_EXCHANGE_I8;
6401 f2i_opcode = OP_MOVE_F_TO_I8;
6402 i2f_opcode = OP_MOVE_I8_TO_F;
6405 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6406 opcode = OP_ATOMIC_EXCHANGE_I4;
6407 cfg->has_atomic_exchange_i4 = TRUE;
6413 if (!mono_arch_opcode_supported (opcode))
6417 /* TODO: Decompose these opcodes instead of bailing here. */
6418 if (COMPILE_SOFT_FLOAT (cfg))
6421 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6422 f2i->dreg = mono_alloc_ireg (cfg);
6423 f2i->sreg1 = args [1]->dreg;
6424 if (f2i_opcode == OP_MOVE_F_TO_I4)
6425 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6426 MONO_ADD_INS (cfg->cbb, f2i);
6429 MONO_INST_NEW (cfg, ins, opcode);
6430 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6431 ins->inst_basereg = args [0]->dreg;
6432 ins->inst_offset = 0;
6433 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6434 MONO_ADD_INS (cfg->cbb, ins);
6436 switch (fsig->params [0]->type) {
6438 ins->type = STACK_I4;
6441 ins->type = STACK_I8;
6444 #if SIZEOF_REGISTER == 8
6445 ins->type = STACK_I8;
6447 ins->type = STACK_I4;
6452 ins->type = STACK_R8;
6455 g_assert (mini_type_is_reference (fsig->params [0]));
6456 ins->type = STACK_OBJ;
6461 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6462 i2f->dreg = mono_alloc_freg (cfg);
6463 i2f->sreg1 = ins->dreg;
6464 i2f->type = STACK_R8;
6465 if (i2f_opcode == OP_MOVE_I4_TO_F)
6466 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6467 MONO_ADD_INS (cfg->cbb, i2f);
6472 if (cfg->gen_write_barriers && is_ref)
6473 emit_write_barrier (cfg, args [0], args [1]);
6475 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6476 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6477 guint32 opcode, f2i_opcode, i2f_opcode;
6478 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6479 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6481 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6482 fsig->params [1]->type == MONO_TYPE_R4) {
6483 opcode = OP_ATOMIC_CAS_I4;
6484 f2i_opcode = OP_MOVE_F_TO_I4;
6485 i2f_opcode = OP_MOVE_I4_TO_F;
6486 cfg->has_atomic_cas_i4 = TRUE;
6488 #if SIZEOF_REGISTER == 8
6490 fsig->params [1]->type == MONO_TYPE_I8 ||
6491 fsig->params [1]->type == MONO_TYPE_R8 ||
6492 fsig->params [1]->type == MONO_TYPE_I) {
6493 opcode = OP_ATOMIC_CAS_I8;
6494 f2i_opcode = OP_MOVE_F_TO_I8;
6495 i2f_opcode = OP_MOVE_I8_TO_F;
6498 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6499 opcode = OP_ATOMIC_CAS_I4;
6500 cfg->has_atomic_cas_i4 = TRUE;
6506 if (!mono_arch_opcode_supported (opcode))
6510 /* TODO: Decompose these opcodes instead of bailing here. */
6511 if (COMPILE_SOFT_FLOAT (cfg))
6514 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6515 f2i_new->dreg = mono_alloc_ireg (cfg);
6516 f2i_new->sreg1 = args [1]->dreg;
6517 if (f2i_opcode == OP_MOVE_F_TO_I4)
6518 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6519 MONO_ADD_INS (cfg->cbb, f2i_new);
6521 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6522 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6523 f2i_cmp->sreg1 = args [2]->dreg;
6524 if (f2i_opcode == OP_MOVE_F_TO_I4)
6525 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6526 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6529 MONO_INST_NEW (cfg, ins, opcode);
6530 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6531 ins->sreg1 = args [0]->dreg;
6532 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6533 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6534 MONO_ADD_INS (cfg->cbb, ins);
6536 switch (fsig->params [1]->type) {
6538 ins->type = STACK_I4;
6541 ins->type = STACK_I8;
6544 #if SIZEOF_REGISTER == 8
6545 ins->type = STACK_I8;
6547 ins->type = STACK_I4;
6551 ins->type = cfg->r4_stack_type;
6554 ins->type = STACK_R8;
6557 g_assert (mini_type_is_reference (fsig->params [1]));
6558 ins->type = STACK_OBJ;
6563 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6564 i2f->dreg = mono_alloc_freg (cfg);
6565 i2f->sreg1 = ins->dreg;
6566 i2f->type = STACK_R8;
6567 if (i2f_opcode == OP_MOVE_I4_TO_F)
6568 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6569 MONO_ADD_INS (cfg->cbb, i2f);
6574 if (cfg->gen_write_barriers && is_ref)
6575 emit_write_barrier (cfg, args [0], args [1]);
6577 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6578 fsig->params [1]->type == MONO_TYPE_I4) {
6579 MonoInst *cmp, *ceq;
6581 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6584 /* int32 r = CAS (location, value, comparand); */
6585 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6586 ins->dreg = alloc_ireg (cfg);
6587 ins->sreg1 = args [0]->dreg;
6588 ins->sreg2 = args [1]->dreg;
6589 ins->sreg3 = args [2]->dreg;
6590 ins->type = STACK_I4;
6591 MONO_ADD_INS (cfg->cbb, ins);
6593 /* bool result = r == comparand; */
6594 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6595 cmp->sreg1 = ins->dreg;
6596 cmp->sreg2 = args [2]->dreg;
6597 cmp->type = STACK_I4;
6598 MONO_ADD_INS (cfg->cbb, cmp);
6600 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6601 ceq->dreg = alloc_ireg (cfg);
6602 ceq->type = STACK_I4;
6603 MONO_ADD_INS (cfg->cbb, ceq);
6605 /* *success = result; */
6606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6608 cfg->has_atomic_cas_i4 = TRUE;
6610 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6611 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6615 } else if (cmethod->klass->image == mono_defaults.corlib &&
6616 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6617 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6620 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6622 MonoType *t = fsig->params [0];
6624 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6626 g_assert (t->byref);
6627 /* t is a byref type, so the reference check is more complicated */
6628 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6629 if (t->type == MONO_TYPE_I1)
6630 opcode = OP_ATOMIC_LOAD_I1;
6631 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6632 opcode = OP_ATOMIC_LOAD_U1;
6633 else if (t->type == MONO_TYPE_I2)
6634 opcode = OP_ATOMIC_LOAD_I2;
6635 else if (t->type == MONO_TYPE_U2)
6636 opcode = OP_ATOMIC_LOAD_U2;
6637 else if (t->type == MONO_TYPE_I4)
6638 opcode = OP_ATOMIC_LOAD_I4;
6639 else if (t->type == MONO_TYPE_U4)
6640 opcode = OP_ATOMIC_LOAD_U4;
6641 else if (t->type == MONO_TYPE_R4)
6642 opcode = OP_ATOMIC_LOAD_R4;
6643 else if (t->type == MONO_TYPE_R8)
6644 opcode = OP_ATOMIC_LOAD_R8;
6645 #if SIZEOF_REGISTER == 8
6646 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6647 opcode = OP_ATOMIC_LOAD_I8;
6648 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6649 opcode = OP_ATOMIC_LOAD_U8;
6651 else if (t->type == MONO_TYPE_I)
6652 opcode = OP_ATOMIC_LOAD_I4;
6653 else if (is_ref || t->type == MONO_TYPE_U)
6654 opcode = OP_ATOMIC_LOAD_U4;
6658 if (!mono_arch_opcode_supported (opcode))
6661 MONO_INST_NEW (cfg, ins, opcode);
6662 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6663 ins->sreg1 = args [0]->dreg;
6664 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6665 MONO_ADD_INS (cfg->cbb, ins);
6668 case MONO_TYPE_BOOLEAN:
6675 ins->type = STACK_I4;
6679 ins->type = STACK_I8;
6683 #if SIZEOF_REGISTER == 8
6684 ins->type = STACK_I8;
6686 ins->type = STACK_I4;
6690 ins->type = cfg->r4_stack_type;
6693 ins->type = STACK_R8;
6697 ins->type = STACK_OBJ;
6703 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6705 MonoType *t = fsig->params [0];
6708 g_assert (t->byref);
6709 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6710 if (t->type == MONO_TYPE_I1)
6711 opcode = OP_ATOMIC_STORE_I1;
6712 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6713 opcode = OP_ATOMIC_STORE_U1;
6714 else if (t->type == MONO_TYPE_I2)
6715 opcode = OP_ATOMIC_STORE_I2;
6716 else if (t->type == MONO_TYPE_U2)
6717 opcode = OP_ATOMIC_STORE_U2;
6718 else if (t->type == MONO_TYPE_I4)
6719 opcode = OP_ATOMIC_STORE_I4;
6720 else if (t->type == MONO_TYPE_U4)
6721 opcode = OP_ATOMIC_STORE_U4;
6722 else if (t->type == MONO_TYPE_R4)
6723 opcode = OP_ATOMIC_STORE_R4;
6724 else if (t->type == MONO_TYPE_R8)
6725 opcode = OP_ATOMIC_STORE_R8;
6726 #if SIZEOF_REGISTER == 8
6727 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6728 opcode = OP_ATOMIC_STORE_I8;
6729 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6730 opcode = OP_ATOMIC_STORE_U8;
6732 else if (t->type == MONO_TYPE_I)
6733 opcode = OP_ATOMIC_STORE_I4;
6734 else if (is_ref || t->type == MONO_TYPE_U)
6735 opcode = OP_ATOMIC_STORE_U4;
6739 if (!mono_arch_opcode_supported (opcode))
6742 MONO_INST_NEW (cfg, ins, opcode);
6743 ins->dreg = args [0]->dreg;
6744 ins->sreg1 = args [1]->dreg;
6745 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6746 MONO_ADD_INS (cfg->cbb, ins);
6748 if (cfg->gen_write_barriers && is_ref)
6749 emit_write_barrier (cfg, args [0], args [1]);
6755 } else if (cmethod->klass->image == mono_defaults.corlib &&
6756 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6757 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6758 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6759 if (should_insert_brekpoint (cfg->method)) {
6760 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6762 MONO_INST_NEW (cfg, ins, OP_NOP);
6763 MONO_ADD_INS (cfg->cbb, ins);
6767 } else if (cmethod->klass->image == mono_defaults.corlib &&
6768 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6769 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6770 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6772 EMIT_NEW_ICONST (cfg, ins, 1);
6774 EMIT_NEW_ICONST (cfg, ins, 0);
6777 } else if (cmethod->klass->image == mono_defaults.corlib &&
6778 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6779 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6780 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6781 /* No stack walks are currently available, so implement this as an intrinsic */
6782 MonoInst *assembly_ins;
6784 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6785 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6788 } else if (cmethod->klass->image == mono_defaults.corlib &&
6789 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6790 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6791 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6792 /* No stack walks are currently available, so implement this as an intrinsic */
6793 MonoInst *method_ins;
6794 MonoMethod *declaring = cfg->method;
6796 /* This returns the declaring generic method */
6797 if (declaring->is_inflated)
6798 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6799 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6800 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6801 cfg->no_inline = TRUE;
6802 if (cfg->method != cfg->current_method)
6803 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6806 } else if (cmethod->klass == mono_defaults.math_class) {
6808 * There is general branchless code for Min/Max, but it does not work for
6810 * http://everything2.com/?node_id=1051618
6812 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6813 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6814 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6815 ins->dreg = alloc_preg (cfg);
6816 ins->type = STACK_I4;
6817 MONO_ADD_INS (cfg->cbb, ins);
6819 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6820 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6821 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6822 !strcmp (cmethod->klass->name, "Selector")) ||
6823 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6824 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6825 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6826 !strcmp (cmethod->klass->name, "Selector"))
6828 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6829 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6830 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6833 MonoJumpInfoToken *ji;
6836 if (args [0]->opcode == OP_GOT_ENTRY) {
6837 pi = (MonoInst *)args [0]->inst_p1;
6838 g_assert (pi->opcode == OP_PATCH_INFO);
6839 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6840 ji = (MonoJumpInfoToken *)pi->inst_p0;
6842 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6843 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6846 NULLIFY_INS (args [0]);
6848 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6849 return_val_if_nok (&cfg->error, NULL);
6851 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6852 ins->dreg = mono_alloc_ireg (cfg);
6855 MONO_ADD_INS (cfg->cbb, ins);
6860 #ifdef MONO_ARCH_SIMD_INTRINSICS
6861 if (cfg->opt & MONO_OPT_SIMD) {
6862 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6868 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6872 if (COMPILE_LLVM (cfg)) {
6873 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6878 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6882 * This entry point could be used later for arbitrary method
6885 inline static MonoInst*
6886 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6887 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6889 if (method->klass == mono_defaults.string_class) {
6890 /* managed string allocation support */
6891 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6892 MonoInst *iargs [2];
6893 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6894 MonoMethod *managed_alloc = NULL;
6896 g_assert (vtable); /*Should not fail since it System.String*/
6897 #ifndef MONO_CROSS_COMPILE
6898 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6902 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6903 iargs [1] = args [0];
6904 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6911 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6913 MonoInst *store, *temp;
6916 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6917 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6920 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6921 * would be different than the MonoInst's used to represent arguments, and
6922 * the ldelema implementation can't deal with that.
6923 * Solution: When ldelema is used on an inline argument, create a var for
6924 * it, emit ldelema on that var, and emit the saving code below in
6925 * inline_method () if needed.
6927 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6928 cfg->args [i] = temp;
6929 /* This uses cfg->args [i] which is set by the preceeding line */
6930 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6931 store->cil_code = sp [0]->cil_code;
6936 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6937 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6939 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6941 check_inline_called_method_name_limit (MonoMethod *called_method)
6944 static const char *limit = NULL;
6946 if (limit == NULL) {
6947 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6949 if (limit_string != NULL)
6950 limit = limit_string;
6955 if (limit [0] != '\0') {
6956 char *called_method_name = mono_method_full_name (called_method, TRUE);
6958 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6959 g_free (called_method_name);
6961 //return (strncmp_result <= 0);
6962 return (strncmp_result == 0);
6969 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6971 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6974 static const char *limit = NULL;
6976 if (limit == NULL) {
6977 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6978 if (limit_string != NULL) {
6979 limit = limit_string;
6985 if (limit [0] != '\0') {
6986 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6988 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6989 g_free (caller_method_name);
6991 //return (strncmp_result <= 0);
6992 return (strncmp_result == 0);
7000 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7002 static double r8_0 = 0.0;
7003 static float r4_0 = 0.0;
7007 rtype = mini_get_underlying_type (rtype);
7011 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7012 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7013 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
7014 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7015 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
7016 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7017 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7018 ins->type = STACK_R4;
7019 ins->inst_p0 = (void*)&r4_0;
7021 MONO_ADD_INS (cfg->cbb, ins);
7022 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7023 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7024 ins->type = STACK_R8;
7025 ins->inst_p0 = (void*)&r8_0;
7027 MONO_ADD_INS (cfg->cbb, ins);
7028 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7029 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7030 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7031 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7032 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7034 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7039 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7043 rtype = mini_get_underlying_type (rtype);
7047 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7048 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7049 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7050 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7051 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7052 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7053 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7054 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7055 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7056 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7057 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7058 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7059 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7060 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7062 emit_init_rvar (cfg, dreg, rtype);
7066 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7068 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7070 MonoInst *var = cfg->locals [local];
7071 if (COMPILE_SOFT_FLOAT (cfg)) {
7073 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7074 emit_init_rvar (cfg, reg, type);
7075 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7078 emit_init_rvar (cfg, var->dreg, type);
7080 emit_dummy_init_rvar (cfg, var->dreg, type);
7087 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
7090 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7091 guchar *ip, guint real_offset, gboolean inline_always)
7094 MonoInst *ins, *rvar = NULL;
7095 MonoMethodHeader *cheader;
7096 MonoBasicBlock *ebblock, *sbblock;
7098 MonoMethod *prev_inlined_method;
7099 MonoInst **prev_locals, **prev_args;
7100 MonoType **prev_arg_types;
7101 guint prev_real_offset;
7102 GHashTable *prev_cbb_hash;
7103 MonoBasicBlock **prev_cil_offset_to_bb;
7104 MonoBasicBlock *prev_cbb;
7105 const unsigned char *prev_ip;
7106 unsigned char *prev_cil_start;
7107 guint32 prev_cil_offset_to_bb_len;
7108 MonoMethod *prev_current_method;
7109 MonoGenericContext *prev_generic_context;
7110 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7112 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7114 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7115 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7118 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7119 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7124 fsig = mono_method_signature (cmethod);
7126 if (cfg->verbose_level > 2)
7127 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7129 if (!cmethod->inline_info) {
7130 cfg->stat_inlineable_methods++;
7131 cmethod->inline_info = 1;
7134 /* allocate local variables */
7135 cheader = mono_method_get_header_checked (cmethod, &error);
7137 if (inline_always) {
7138 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7139 mono_error_move (&cfg->error, &error);
7141 mono_error_cleanup (&error);
7146 /*Must verify before creating locals as it can cause the JIT to assert.*/
7147 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7148 mono_metadata_free_mh (cheader);
7152 /* allocate space to store the return value */
7153 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7154 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7157 prev_locals = cfg->locals;
7158 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7159 for (i = 0; i < cheader->num_locals; ++i)
7160 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7162 /* allocate start and end blocks */
7163 /* This is needed so if the inline is aborted, we can clean up */
7164 NEW_BBLOCK (cfg, sbblock);
7165 sbblock->real_offset = real_offset;
7167 NEW_BBLOCK (cfg, ebblock);
7168 ebblock->block_num = cfg->num_bblocks++;
7169 ebblock->real_offset = real_offset;
7171 prev_args = cfg->args;
7172 prev_arg_types = cfg->arg_types;
7173 prev_inlined_method = cfg->inlined_method;
7174 cfg->inlined_method = cmethod;
7175 cfg->ret_var_set = FALSE;
7176 cfg->inline_depth ++;
7177 prev_real_offset = cfg->real_offset;
7178 prev_cbb_hash = cfg->cbb_hash;
7179 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7180 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7181 prev_cil_start = cfg->cil_start;
7183 prev_cbb = cfg->cbb;
7184 prev_current_method = cfg->current_method;
7185 prev_generic_context = cfg->generic_context;
7186 prev_ret_var_set = cfg->ret_var_set;
7187 prev_disable_inline = cfg->disable_inline;
7189 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7192 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7194 ret_var_set = cfg->ret_var_set;
7196 cfg->inlined_method = prev_inlined_method;
7197 cfg->real_offset = prev_real_offset;
7198 cfg->cbb_hash = prev_cbb_hash;
7199 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7200 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7201 cfg->cil_start = prev_cil_start;
7203 cfg->locals = prev_locals;
7204 cfg->args = prev_args;
7205 cfg->arg_types = prev_arg_types;
7206 cfg->current_method = prev_current_method;
7207 cfg->generic_context = prev_generic_context;
7208 cfg->ret_var_set = prev_ret_var_set;
7209 cfg->disable_inline = prev_disable_inline;
7210 cfg->inline_depth --;
7212 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7213 if (cfg->verbose_level > 2)
7214 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7216 cfg->stat_inlined_methods++;
7218 /* always add some code to avoid block split failures */
7219 MONO_INST_NEW (cfg, ins, OP_NOP);
7220 MONO_ADD_INS (prev_cbb, ins);
7222 prev_cbb->next_bb = sbblock;
7223 link_bblock (cfg, prev_cbb, sbblock);
7226 * Get rid of the begin and end bblocks if possible to aid local
7229 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7231 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7232 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7234 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7235 MonoBasicBlock *prev = ebblock->in_bb [0];
7237 if (prev->next_bb == ebblock) {
7238 mono_merge_basic_blocks (cfg, prev, ebblock);
7240 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7241 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7242 cfg->cbb = prev_cbb;
7245 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7250 * Its possible that the rvar is set in some prev bblock, but not in others.
7256 for (i = 0; i < ebblock->in_count; ++i) {
7257 bb = ebblock->in_bb [i];
7259 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7262 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7272 * If the inlined method contains only a throw, then the ret var is not
7273 * set, so set it to a dummy value.
7276 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7278 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7281 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7284 if (cfg->verbose_level > 2)
7285 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7286 cfg->exception_type = MONO_EXCEPTION_NONE;
7288 /* This gets rid of the newly added bblocks */
7289 cfg->cbb = prev_cbb;
7291 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7296 * Some of these comments may well be out-of-date.
7297 * Design decisions: we do a single pass over the IL code (and we do bblock
7298 * splitting/merging in the few cases when it's required: a back jump to an IL
7299 * address that was not already seen as bblock starting point).
7300 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7301 * Complex operations are decomposed in simpler ones right away. We need to let the
7302 * arch-specific code peek and poke inside this process somehow (except when the
7303 * optimizations can take advantage of the full semantic info of coarse opcodes).
7304 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7305 * MonoInst->opcode initially is the IL opcode or some simplification of that
7306 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7307 * opcode with value bigger than OP_LAST.
7308 * At this point the IR can be handed over to an interpreter, a dumb code generator
7309 * or to the optimizing code generator that will translate it to SSA form.
7311 * Profiling directed optimizations.
7312 * We may compile by default with few or no optimizations and instrument the code
7313 * or the user may indicate what methods to optimize the most either in a config file
7314 * or through repeated runs where the compiler applies offline the optimizations to
7315 * each method and then decides if it was worth it.
7318 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7319 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7320 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7321 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7322 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7323 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7324 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7325 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7327 /* offset from br.s -> br like opcodes */
7328 #define BIG_BRANCH_OFFSET 13
7331 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7333 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7335 return b == NULL || b == bb;
7339 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7341 unsigned char *ip = start;
7342 unsigned char *target;
7345 MonoBasicBlock *bblock;
7346 const MonoOpcode *opcode;
7349 cli_addr = ip - start;
7350 i = mono_opcode_value ((const guint8 **)&ip, end);
7353 opcode = &mono_opcodes [i];
7354 switch (opcode->argument) {
7355 case MonoInlineNone:
7358 case MonoInlineString:
7359 case MonoInlineType:
7360 case MonoInlineField:
7361 case MonoInlineMethod:
7364 case MonoShortInlineR:
7371 case MonoShortInlineVar:
7372 case MonoShortInlineI:
7375 case MonoShortInlineBrTarget:
7376 target = start + cli_addr + 2 + (signed char)ip [1];
7377 GET_BBLOCK (cfg, bblock, target);
7380 GET_BBLOCK (cfg, bblock, ip);
7382 case MonoInlineBrTarget:
7383 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7384 GET_BBLOCK (cfg, bblock, target);
7387 GET_BBLOCK (cfg, bblock, ip);
7389 case MonoInlineSwitch: {
7390 guint32 n = read32 (ip + 1);
7393 cli_addr += 5 + 4 * n;
7394 target = start + cli_addr;
7395 GET_BBLOCK (cfg, bblock, target);
7397 for (j = 0; j < n; ++j) {
7398 target = start + cli_addr + (gint32)read32 (ip);
7399 GET_BBLOCK (cfg, bblock, target);
7409 g_assert_not_reached ();
7412 if (i == CEE_THROW) {
7413 unsigned char *bb_start = ip - 1;
7415 /* Find the start of the bblock containing the throw */
7417 while ((bb_start >= start) && !bblock) {
7418 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7422 bblock->out_of_line = 1;
7432 static inline MonoMethod *
7433 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7437 mono_error_init (error);
7439 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7440 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7442 method = mono_class_inflate_generic_method_checked (method, context, error);
7445 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7451 static inline MonoMethod *
7452 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7455 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7457 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7458 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7462 if (!method && !cfg)
7463 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7468 static inline MonoClass*
7469 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7474 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7475 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7477 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7478 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7481 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7482 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7485 mono_class_init (klass);
7489 static inline MonoMethodSignature*
7490 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7492 MonoMethodSignature *fsig;
7494 mono_error_init (error);
7495 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7496 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7498 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7499 return_val_if_nok (error, NULL);
7502 fsig = mono_inflate_generic_signature(fsig, context, error);
7508 throw_exception (void)
7510 static MonoMethod *method = NULL;
7513 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7514 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7521 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7523 MonoMethod *thrower = throw_exception ();
7526 EMIT_NEW_PCONST (cfg, args [0], ex);
7527 mono_emit_method_call (cfg, thrower, args, NULL);
7531 * Return the original method is a wrapper is specified. We can only access
7532 * the custom attributes from the original method.
7535 get_original_method (MonoMethod *method)
7537 if (method->wrapper_type == MONO_WRAPPER_NONE)
7540 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7541 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7544 /* in other cases we need to find the original method */
7545 return mono_marshal_method_from_wrapper (method);
7549 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7551 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7552 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7554 emit_throw_exception (cfg, ex);
7558 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7560 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7561 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7563 emit_throw_exception (cfg, ex);
7567 * Check that the IL instructions at ip are the array initialization
7568 * sequence and return the pointer to the data and the size.
7571 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7574 * newarr[System.Int32]
7576 * ldtoken field valuetype ...
7577 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7579 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7581 guint32 token = read32 (ip + 7);
7582 guint32 field_token = read32 (ip + 2);
7583 guint32 field_index = field_token & 0xffffff;
7585 const char *data_ptr;
7587 MonoMethod *cmethod;
7588 MonoClass *dummy_class;
7589 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7593 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7597 *out_field_token = field_token;
7599 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7602 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7604 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7605 case MONO_TYPE_BOOLEAN:
7609 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7610 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7611 case MONO_TYPE_CHAR:
7628 if (size > mono_type_size (field->type, &dummy_align))
7631 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7632 if (!image_is_dynamic (method->klass->image)) {
7633 field_index = read32 (ip + 2) & 0xffffff;
7634 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7635 data_ptr = mono_image_rva_map (method->klass->image, rva);
7636 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7637 /* for aot code we do the lookup on load */
7638 if (aot && data_ptr)
7639 return (const char *)GUINT_TO_POINTER (rva);
7641 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7643 data_ptr = mono_field_get_data (field);
7651 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7654 char *method_fname = mono_method_full_name (method, TRUE);
7656 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7659 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7660 mono_error_cleanup (&error);
7661 } else if (header->code_size == 0)
7662 method_code = g_strdup ("method body is empty.");
7664 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7665 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7666 g_free (method_fname);
7667 g_free (method_code);
7668 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7672 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7675 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7676 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7677 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7678 /* Optimize reg-reg moves away */
7680 * Can't optimize other opcodes, since sp[0] might point to
7681 * the last ins of a decomposed opcode.
7683 sp [0]->dreg = (cfg)->locals [n]->dreg;
7685 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7690 * ldloca inhibits many optimizations so try to get rid of it in common
7693 static inline unsigned char *
7694 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7704 local = read16 (ip + 2);
7708 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7709 /* From the INITOBJ case */
7710 token = read32 (ip + 2);
7711 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7712 CHECK_TYPELOAD (klass);
7713 type = mini_get_underlying_type (&klass->byval_arg);
7714 emit_init_local (cfg, local, type, TRUE);
7722 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7724 MonoInst *icall_args [16];
7725 MonoInst *call_target, *ins, *vtable_ins;
7726 int arg_reg, this_reg, vtable_reg;
7727 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7728 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7729 gboolean variant_iface = FALSE;
7734 * In llvm-only mode, vtables contain function descriptors instead of
7735 * method addresses/trampolines.
7737 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7740 slot = mono_method_get_imt_slot (cmethod);
7742 slot = mono_method_get_vtable_index (cmethod);
7744 this_reg = sp [0]->dreg;
7746 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7747 variant_iface = TRUE;
7749 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7751 * The simplest case, a normal virtual call.
7753 int slot_reg = alloc_preg (cfg);
7754 int addr_reg = alloc_preg (cfg);
7755 int arg_reg = alloc_preg (cfg);
7756 MonoBasicBlock *non_null_bb;
7758 vtable_reg = alloc_preg (cfg);
7759 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7760 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7762 /* Load the vtable slot, which contains a function descriptor. */
7763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7765 NEW_BBLOCK (cfg, non_null_bb);
7767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7768 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7772 // FIXME: Make the wrapper use the preserveall cconv
7773 // FIXME: Use one icall per slot for small slot numbers ?
7774 icall_args [0] = vtable_ins;
7775 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7776 /* Make the icall return the vtable slot value to save some code space */
7777 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7778 ins->dreg = slot_reg;
7779 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7782 MONO_START_BB (cfg, non_null_bb);
7783 /* Load the address + arg from the vtable slot */
7784 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7787 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7790 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7792 * A simple interface call
7794 * We make a call through an imt slot to obtain the function descriptor we need to call.
7795 * The imt slot contains a function descriptor for a runtime function + arg.
7797 int slot_reg = alloc_preg (cfg);
7798 int addr_reg = alloc_preg (cfg);
7799 int arg_reg = alloc_preg (cfg);
7800 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7802 vtable_reg = alloc_preg (cfg);
7803 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7804 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7807 * The slot is already initialized when the vtable is created so there is no need
7811 /* Load the imt slot, which contains a function descriptor. */
7812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7814 /* Load the address + arg of the imt thunk from the imt slot */
7815 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7816 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7818 * IMT thunks in llvm-only mode are C functions which take an info argument
7819 * plus the imt method and return the ftndesc to call.
7821 icall_args [0] = thunk_arg_ins;
7822 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7823 cmethod, MONO_RGCTX_INFO_METHOD);
7824 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7826 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7829 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7831 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7832 * dynamically extended as more instantiations are discovered.
7833 * This handles generic virtual methods both on classes and interfaces.
7835 int slot_reg = alloc_preg (cfg);
7836 int addr_reg = alloc_preg (cfg);
7837 int arg_reg = alloc_preg (cfg);
7838 int ftndesc_reg = alloc_preg (cfg);
7839 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7840 MonoBasicBlock *slowpath_bb, *end_bb;
7842 NEW_BBLOCK (cfg, slowpath_bb);
7843 NEW_BBLOCK (cfg, end_bb);
7845 vtable_reg = alloc_preg (cfg);
7846 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7848 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7850 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7852 /* Load the slot, which contains a function descriptor. */
7853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7855 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7856 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7857 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7861 /* Same as with iface calls */
7862 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7863 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7864 icall_args [0] = thunk_arg_ins;
7865 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7866 cmethod, MONO_RGCTX_INFO_METHOD);
7867 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7868 ftndesc_ins->dreg = ftndesc_reg;
7870 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7871 * they don't know about yet. Fall back to the slowpath in that case.
7873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7874 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7879 MONO_START_BB (cfg, slowpath_bb);
7880 icall_args [0] = vtable_ins;
7881 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7882 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7883 cmethod, MONO_RGCTX_INFO_METHOD);
7885 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7887 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7888 ftndesc_ins->dreg = ftndesc_reg;
7889 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7892 MONO_START_BB (cfg, end_bb);
7893 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7897 * Non-optimized cases
7899 icall_args [0] = sp [0];
7900 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7902 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7903 cmethod, MONO_RGCTX_INFO_METHOD);
7905 arg_reg = alloc_preg (cfg);
7906 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7907 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7909 g_assert (is_gsharedvt);
7911 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7913 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7916 * Pass the extra argument even if the callee doesn't receive it, most
7917 * calling conventions allow this.
7919 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7923 is_exception_class (MonoClass *klass)
7926 if (klass == mono_defaults.exception_class)
7928 klass = klass->parent;
7934 * is_jit_optimizer_disabled:
7936 * Determine whenever M's assembly has a DebuggableAttribute with the
7937 * IsJITOptimizerDisabled flag set.
7940 is_jit_optimizer_disabled (MonoMethod *m)
7943 MonoAssembly *ass = m->klass->image->assembly;
7944 MonoCustomAttrInfo* attrs;
7947 gboolean val = FALSE;
7950 if (ass->jit_optimizer_disabled_inited)
7951 return ass->jit_optimizer_disabled;
7953 klass = mono_class_try_get_debuggable_attribute_class ();
7957 ass->jit_optimizer_disabled = FALSE;
7958 mono_memory_barrier ();
7959 ass->jit_optimizer_disabled_inited = TRUE;
7963 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7964 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7966 for (i = 0; i < attrs->num_attrs; ++i) {
7967 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7969 MonoMethodSignature *sig;
7971 if (!attr->ctor || attr->ctor->klass != klass)
7973 /* Decode the attribute. See reflection.c */
7974 p = (const char*)attr->data;
7975 g_assert (read16 (p) == 0x0001);
7978 // FIXME: Support named parameters
7979 sig = mono_method_signature (attr->ctor);
7980 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7982 /* Two boolean arguments */
7986 mono_custom_attrs_free (attrs);
7989 ass->jit_optimizer_disabled = val;
7990 mono_memory_barrier ();
7991 ass->jit_optimizer_disabled_inited = TRUE;
7997 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7999 gboolean supported_tail_call;
8002 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
8004 for (i = 0; i < fsig->param_count; ++i) {
8005 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
8006 /* These can point to the current method's stack */
8007 supported_tail_call = FALSE;
8009 if (fsig->hasthis && cmethod->klass->valuetype)
8010 /* this might point to the current method's stack */
8011 supported_tail_call = FALSE;
8012 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
8013 supported_tail_call = FALSE;
8014 if (cfg->method->save_lmf)
8015 supported_tail_call = FALSE;
8016 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
8017 supported_tail_call = FALSE;
8018 if (call_opcode != CEE_CALL)
8019 supported_tail_call = FALSE;
8021 /* Debugging support */
8023 if (supported_tail_call) {
8024 if (!mono_debug_count ())
8025 supported_tail_call = FALSE;
8029 return supported_tail_call;
8035 * Handle calls made to ctors from NEWOBJ opcodes.
8038 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8039 MonoInst **sp, guint8 *ip, int *inline_costs)
8041 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8043 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8044 mono_method_is_generic_sharable (cmethod, TRUE)) {
8045 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8046 mono_class_vtable (cfg->domain, cmethod->klass);
8047 CHECK_TYPELOAD (cmethod->klass);
8049 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8050 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8053 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8054 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8056 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8058 CHECK_TYPELOAD (cmethod->klass);
8059 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8064 /* Avoid virtual calls to ctors if possible */
8065 if (mono_class_is_marshalbyref (cmethod->klass))
8066 callvirt_this_arg = sp [0];
8068 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8069 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8070 CHECK_CFG_EXCEPTION;
8071 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8072 mono_method_check_inlining (cfg, cmethod) &&
8073 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8076 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8077 cfg->real_offset += 5;
8079 *inline_costs += costs - 5;
8081 INLINE_FAILURE ("inline failure");
8082 // FIXME-VT: Clean this up
8083 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8084 GSHAREDVT_FAILURE(*ip);
8085 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8087 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8090 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8092 if (cfg->llvm_only) {
8093 // FIXME: Avoid initializing vtable_arg
8094 emit_llvmonly_calli (cfg, fsig, sp, addr);
8096 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8098 } else if (context_used &&
8099 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8100 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8101 MonoInst *cmethod_addr;
8103 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8105 if (cfg->llvm_only) {
8106 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8107 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8108 emit_llvmonly_calli (cfg, fsig, sp, addr);
8110 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8111 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8113 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8116 INLINE_FAILURE ("ctor call");
8117 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8118 callvirt_this_arg, NULL, vtable_arg);
8125 emit_setret (MonoCompile *cfg, MonoInst *val)
8127 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8130 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8133 if (!cfg->vret_addr) {
8134 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8136 EMIT_NEW_RETLOADA (cfg, ret_addr);
8138 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8139 ins->klass = mono_class_from_mono_type (ret_type);
8142 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8143 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8144 MonoInst *iargs [1];
8148 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8149 mono_arch_emit_setret (cfg, cfg->method, conv);
8151 mono_arch_emit_setret (cfg, cfg->method, val);
8154 mono_arch_emit_setret (cfg, cfg->method, val);
8160 * mono_method_to_ir:
8162 * Translate the .net IL into linear IR.
8164 * @start_bblock: if not NULL, the starting basic block, used during inlining.
8165 * @end_bblock: if not NULL, the ending basic block, used during inlining.
8166 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
8167 * @inline_args: if not NULL, contains the arguments to the inline call
8168 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
8169 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
8171 * This method is used to turn ECMA IL into Mono's internal Linear IR
8172 * reprensetation. It is used both for entire methods, as well as
8173 * inlining existing methods. In the former case, the @start_bblock,
8174 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
8175 * inline_offset is set to zero.
8177 * Returns: the inline cost, or -1 if there was an error processing this method.
8180 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8181 MonoInst *return_var, MonoInst **inline_args,
8182 guint inline_offset, gboolean is_virtual_call)
8185 MonoInst *ins, **sp, **stack_start;
8186 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8187 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8188 MonoMethod *cmethod, *method_definition;
8189 MonoInst **arg_array;
8190 MonoMethodHeader *header;
8192 guint32 token, ins_flag;
8194 MonoClass *constrained_class = NULL;
8195 unsigned char *ip, *end, *target, *err_pos;
8196 MonoMethodSignature *sig;
8197 MonoGenericContext *generic_context = NULL;
8198 MonoGenericContainer *generic_container = NULL;
8199 MonoType **param_types;
8200 int i, n, start_new_bblock, dreg;
8201 int num_calls = 0, inline_costs = 0;
8202 int breakpoint_id = 0;
8204 GSList *class_inits = NULL;
8205 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8207 gboolean init_locals, seq_points, skip_dead_blocks;
8208 gboolean sym_seq_points = FALSE;
8209 MonoDebugMethodInfo *minfo;
8210 MonoBitSet *seq_point_locs = NULL;
8211 MonoBitSet *seq_point_set_locs = NULL;
8213 cfg->disable_inline = is_jit_optimizer_disabled (method);
8215 /* serialization and xdomain stuff may need access to private fields and methods */
8216 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8217 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8218 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8219 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8220 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8221 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8223 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8224 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8225 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8226 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8227 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8229 image = method->klass->image;
8230 header = mono_method_get_header_checked (method, &cfg->error);
8232 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8233 goto exception_exit;
8235 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
8238 generic_container = mono_method_get_generic_container (method);
8239 sig = mono_method_signature (method);
8240 num_args = sig->hasthis + sig->param_count;
8241 ip = (unsigned char*)header->code;
8242 cfg->cil_start = ip;
8243 end = ip + header->code_size;
8244 cfg->stat_cil_code_size += header->code_size;
8246 seq_points = cfg->gen_seq_points && cfg->method == method;
8248 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8249 /* We could hit a seq point before attaching to the JIT (#8338) */
8253 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8254 minfo = mono_debug_lookup_method (method);
8256 MonoSymSeqPoint *sps;
8257 int i, n_il_offsets;
8259 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8260 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8261 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8262 sym_seq_points = TRUE;
8263 for (i = 0; i < n_il_offsets; ++i) {
8264 if (sps [i].il_offset < header->code_size)
8265 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8268 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8269 /* Methods without line number info like auto-generated property accessors */
8270 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8271 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8272 sym_seq_points = TRUE;
8277 * Methods without init_locals set could cause asserts in various passes
8278 * (#497220). To work around this, we emit dummy initialization opcodes
8279 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8280 * on some platforms.
8282 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8283 init_locals = header->init_locals;
8287 method_definition = method;
8288 while (method_definition->is_inflated) {
8289 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8290 method_definition = imethod->declaring;
8293 /* SkipVerification is not allowed if core-clr is enabled */
8294 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8296 dont_verify_stloc = TRUE;
8299 if (sig->is_inflated)
8300 generic_context = mono_method_get_context (method);
8301 else if (generic_container)
8302 generic_context = &generic_container->context;
8303 cfg->generic_context = generic_context;
8306 g_assert (!sig->has_type_parameters);
8308 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8309 g_assert (method->is_inflated);
8310 g_assert (mono_method_get_context (method)->method_inst);
8312 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8313 g_assert (sig->generic_param_count);
8315 if (cfg->method == method) {
8316 cfg->real_offset = 0;
8318 cfg->real_offset = inline_offset;
8321 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8322 cfg->cil_offset_to_bb_len = header->code_size;
8324 cfg->current_method = method;
8326 if (cfg->verbose_level > 2)
8327 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8329 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8331 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8332 for (n = 0; n < sig->param_count; ++n)
8333 param_types [n + sig->hasthis] = sig->params [n];
8334 cfg->arg_types = param_types;
8336 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8337 if (cfg->method == method) {
8339 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8340 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8343 NEW_BBLOCK (cfg, start_bblock);
8344 cfg->bb_entry = start_bblock;
8345 start_bblock->cil_code = NULL;
8346 start_bblock->cil_length = 0;
8349 NEW_BBLOCK (cfg, end_bblock);
8350 cfg->bb_exit = end_bblock;
8351 end_bblock->cil_code = NULL;
8352 end_bblock->cil_length = 0;
8353 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8354 g_assert (cfg->num_bblocks == 2);
8356 arg_array = cfg->args;
8358 if (header->num_clauses) {
8359 cfg->spvars = g_hash_table_new (NULL, NULL);
8360 cfg->exvars = g_hash_table_new (NULL, NULL);
8362 /* handle exception clauses */
8363 for (i = 0; i < header->num_clauses; ++i) {
8364 MonoBasicBlock *try_bb;
8365 MonoExceptionClause *clause = &header->clauses [i];
8366 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8368 try_bb->real_offset = clause->try_offset;
8369 try_bb->try_start = TRUE;
8370 try_bb->region = ((i + 1) << 8) | clause->flags;
8371 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8372 tblock->real_offset = clause->handler_offset;
8373 tblock->flags |= BB_EXCEPTION_HANDLER;
8376 * Linking the try block with the EH block hinders inlining as we won't be able to
8377 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8379 if (COMPILE_LLVM (cfg))
8380 link_bblock (cfg, try_bb, tblock);
8382 if (*(ip + clause->handler_offset) == CEE_POP)
8383 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8385 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8386 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8387 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8388 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8389 MONO_ADD_INS (tblock, ins);
8391 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8392 /* finally clauses already have a seq point */
8393 /* seq points for filter clauses are emitted below */
8394 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8395 MONO_ADD_INS (tblock, ins);
8398 /* todo: is a fault block unsafe to optimize? */
8399 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8400 tblock->flags |= BB_EXCEPTION_UNSAFE;
8403 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8405 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8407 /* catch and filter blocks get the exception object on the stack */
8408 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8409 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8411 /* mostly like handle_stack_args (), but just sets the input args */
8412 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8413 tblock->in_scount = 1;
8414 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8415 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8419 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8420 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8421 if (!cfg->compile_llvm) {
8422 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8423 ins->dreg = tblock->in_stack [0]->dreg;
8424 MONO_ADD_INS (tblock, ins);
8427 MonoInst *dummy_use;
8430 * Add a dummy use for the exvar so its liveness info will be
8433 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8436 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8437 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8438 MONO_ADD_INS (tblock, ins);
8441 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8442 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8443 tblock->flags |= BB_EXCEPTION_HANDLER;
8444 tblock->real_offset = clause->data.filter_offset;
8445 tblock->in_scount = 1;
8446 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8447 /* The filter block shares the exvar with the handler block */
8448 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8449 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8450 MONO_ADD_INS (tblock, ins);
8454 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8455 clause->data.catch_class &&
8457 mono_class_check_context_used (clause->data.catch_class)) {
8459 * In shared generic code with catch
8460 * clauses containing type variables
8461 * the exception handling code has to
8462 * be able to get to the rgctx.
8463 * Therefore we have to make sure that
8464 * the vtable/mrgctx argument (for
8465 * static or generic methods) or the
8466 * "this" argument (for non-static
8467 * methods) are live.
8469 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8470 mini_method_get_context (method)->method_inst ||
8471 method->klass->valuetype) {
8472 mono_get_vtable_var (cfg);
8474 MonoInst *dummy_use;
8476 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8481 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8482 cfg->cbb = start_bblock;
8483 cfg->args = arg_array;
8484 mono_save_args (cfg, sig, inline_args);
8487 /* FIRST CODE BLOCK */
8488 NEW_BBLOCK (cfg, tblock);
8489 tblock->cil_code = ip;
8493 ADD_BBLOCK (cfg, tblock);
8495 if (cfg->method == method) {
8496 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8497 if (breakpoint_id) {
8498 MONO_INST_NEW (cfg, ins, OP_BREAK);
8499 MONO_ADD_INS (cfg->cbb, ins);
8503 /* we use a separate basic block for the initialization code */
8504 NEW_BBLOCK (cfg, init_localsbb);
8505 if (cfg->method == method)
8506 cfg->bb_init = init_localsbb;
8507 init_localsbb->real_offset = cfg->real_offset;
8508 start_bblock->next_bb = init_localsbb;
8509 init_localsbb->next_bb = cfg->cbb;
8510 link_bblock (cfg, start_bblock, init_localsbb);
8511 link_bblock (cfg, init_localsbb, cfg->cbb);
8513 cfg->cbb = init_localsbb;
8515 if (cfg->gsharedvt && cfg->method == method) {
8516 MonoGSharedVtMethodInfo *info;
8517 MonoInst *var, *locals_var;
8520 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8521 info->method = cfg->method;
8522 info->count_entries = 16;
8523 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8524 cfg->gsharedvt_info = info;
8526 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8527 /* prevent it from being register allocated */
8528 //var->flags |= MONO_INST_VOLATILE;
8529 cfg->gsharedvt_info_var = var;
8531 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8532 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8534 /* Allocate locals */
8535 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8536 /* prevent it from being register allocated */
8537 //locals_var->flags |= MONO_INST_VOLATILE;
8538 cfg->gsharedvt_locals_var = locals_var;
8540 dreg = alloc_ireg (cfg);
8541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8543 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8544 ins->dreg = locals_var->dreg;
8546 MONO_ADD_INS (cfg->cbb, ins);
8547 cfg->gsharedvt_locals_var_ins = ins;
8549 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8552 ins->flags |= MONO_INST_INIT;
8556 if (mono_security_core_clr_enabled ()) {
8557 /* check if this is native code, e.g. an icall or a p/invoke */
8558 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8559 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8561 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8562 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8564 /* if this ia a native call then it can only be JITted from platform code */
8565 if ((icall || pinvk) && method->klass && method->klass->image) {
8566 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8567 MonoException *ex = icall ? mono_get_exception_security () :
8568 mono_get_exception_method_access ();
8569 emit_throw_exception (cfg, ex);
8576 CHECK_CFG_EXCEPTION;
8578 if (header->code_size == 0)
8581 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8586 if (cfg->method == method)
8587 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8589 for (n = 0; n < header->num_locals; ++n) {
8590 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8595 /* We force the vtable variable here for all shared methods
8596 for the possibility that they might show up in a stack
8597 trace where their exact instantiation is needed. */
8598 if (cfg->gshared && method == cfg->method) {
8599 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8600 mini_method_get_context (method)->method_inst ||
8601 method->klass->valuetype) {
8602 mono_get_vtable_var (cfg);
8604 /* FIXME: Is there a better way to do this?
8605 We need the variable live for the duration
8606 of the whole method. */
8607 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8611 /* add a check for this != NULL to inlined methods */
8612 if (is_virtual_call) {
8615 NEW_ARGLOAD (cfg, arg_ins, 0);
8616 MONO_ADD_INS (cfg->cbb, arg_ins);
8617 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8620 skip_dead_blocks = !dont_verify;
8621 if (skip_dead_blocks) {
8622 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8627 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8628 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8631 start_new_bblock = 0;
8633 if (cfg->method == method)
8634 cfg->real_offset = ip - header->code;
8636 cfg->real_offset = inline_offset;
8641 if (start_new_bblock) {
8642 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8643 if (start_new_bblock == 2) {
8644 g_assert (ip == tblock->cil_code);
8646 GET_BBLOCK (cfg, tblock, ip);
8648 cfg->cbb->next_bb = tblock;
8650 start_new_bblock = 0;
8651 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8652 if (cfg->verbose_level > 3)
8653 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8654 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8658 g_slist_free (class_inits);
8661 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8662 link_bblock (cfg, cfg->cbb, tblock);
8663 if (sp != stack_start) {
8664 handle_stack_args (cfg, stack_start, sp - stack_start);
8666 CHECK_UNVERIFIABLE (cfg);
8668 cfg->cbb->next_bb = tblock;
8670 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8671 if (cfg->verbose_level > 3)
8672 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8673 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8676 g_slist_free (class_inits);
8681 if (skip_dead_blocks) {
8682 int ip_offset = ip - header->code;
8684 if (ip_offset == bb->end)
8688 int op_size = mono_opcode_size (ip, end);
8689 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8691 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8693 if (ip_offset + op_size == bb->end) {
8694 MONO_INST_NEW (cfg, ins, OP_NOP);
8695 MONO_ADD_INS (cfg->cbb, ins);
8696 start_new_bblock = 1;
8704 * Sequence points are points where the debugger can place a breakpoint.
8705 * Currently, we generate these automatically at points where the IL
8708 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8710 * Make methods interruptable at the beginning, and at the targets of
8711 * backward branches.
8712 * Also, do this at the start of every bblock in methods with clauses too,
8713 * to be able to handle instructions with inprecise control flow like
8715 * Backward branches are handled at the end of method-to-ir ().
8717 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8718 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8720 /* Avoid sequence points on empty IL like .volatile */
8721 // FIXME: Enable this
8722 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8723 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8724 if ((sp != stack_start) && !sym_seq_point)
8725 ins->flags |= MONO_INST_NONEMPTY_STACK;
8726 MONO_ADD_INS (cfg->cbb, ins);
8729 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8732 cfg->cbb->real_offset = cfg->real_offset;
8734 if ((cfg->method == method) && cfg->coverage_info) {
8735 guint32 cil_offset = ip - header->code;
8736 cfg->coverage_info->data [cil_offset].cil_code = ip;
8738 /* TODO: Use an increment here */
8739 #if defined(TARGET_X86)
8740 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8741 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8743 MONO_ADD_INS (cfg->cbb, ins);
8745 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8746 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8750 if (cfg->verbose_level > 3)
8751 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8755 if (seq_points && !sym_seq_points && sp != stack_start) {
8757 * The C# compiler uses these nops to notify the JIT that it should
8758 * insert seq points.
8760 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8761 MONO_ADD_INS (cfg->cbb, ins);
8763 if (cfg->keep_cil_nops)
8764 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8766 MONO_INST_NEW (cfg, ins, OP_NOP);
8768 MONO_ADD_INS (cfg->cbb, ins);
8771 if (should_insert_brekpoint (cfg->method)) {
8772 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8774 MONO_INST_NEW (cfg, ins, OP_NOP);
8777 MONO_ADD_INS (cfg->cbb, ins);
8783 CHECK_STACK_OVF (1);
8784 n = (*ip)-CEE_LDARG_0;
8786 EMIT_NEW_ARGLOAD (cfg, ins, n);
8794 CHECK_STACK_OVF (1);
8795 n = (*ip)-CEE_LDLOC_0;
8797 EMIT_NEW_LOCLOAD (cfg, ins, n);
8806 n = (*ip)-CEE_STLOC_0;
8809 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8811 emit_stloc_ir (cfg, sp, header, n);
8818 CHECK_STACK_OVF (1);
8821 EMIT_NEW_ARGLOAD (cfg, ins, n);
8827 CHECK_STACK_OVF (1);
8830 NEW_ARGLOADA (cfg, ins, n);
8831 MONO_ADD_INS (cfg->cbb, ins);
8841 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8843 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8848 CHECK_STACK_OVF (1);
8851 EMIT_NEW_LOCLOAD (cfg, ins, n);
8855 case CEE_LDLOCA_S: {
8856 unsigned char *tmp_ip;
8858 CHECK_STACK_OVF (1);
8859 CHECK_LOCAL (ip [1]);
8861 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8867 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8876 CHECK_LOCAL (ip [1]);
8877 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8879 emit_stloc_ir (cfg, sp, header, ip [1]);
8884 CHECK_STACK_OVF (1);
8885 EMIT_NEW_PCONST (cfg, ins, NULL);
8886 ins->type = STACK_OBJ;
8891 CHECK_STACK_OVF (1);
8892 EMIT_NEW_ICONST (cfg, ins, -1);
8905 CHECK_STACK_OVF (1);
8906 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8912 CHECK_STACK_OVF (1);
8914 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8920 CHECK_STACK_OVF (1);
8921 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8927 CHECK_STACK_OVF (1);
8928 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8929 ins->type = STACK_I8;
8930 ins->dreg = alloc_dreg (cfg, STACK_I8);
8932 ins->inst_l = (gint64)read64 (ip);
8933 MONO_ADD_INS (cfg->cbb, ins);
8939 gboolean use_aotconst = FALSE;
8941 #ifdef TARGET_POWERPC
8942 /* FIXME: Clean this up */
8943 if (cfg->compile_aot)
8944 use_aotconst = TRUE;
8947 /* FIXME: we should really allocate this only late in the compilation process */
8948 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8950 CHECK_STACK_OVF (1);
8956 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8958 dreg = alloc_freg (cfg);
8959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8960 ins->type = cfg->r4_stack_type;
8962 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8963 ins->type = cfg->r4_stack_type;
8964 ins->dreg = alloc_dreg (cfg, STACK_R8);
8966 MONO_ADD_INS (cfg->cbb, ins);
8976 gboolean use_aotconst = FALSE;
8978 #ifdef TARGET_POWERPC
8979 /* FIXME: Clean this up */
8980 if (cfg->compile_aot)
8981 use_aotconst = TRUE;
8984 /* FIXME: we should really allocate this only late in the compilation process */
8985 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8987 CHECK_STACK_OVF (1);
8993 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8995 dreg = alloc_freg (cfg);
8996 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8997 ins->type = STACK_R8;
8999 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9000 ins->type = STACK_R8;
9001 ins->dreg = alloc_dreg (cfg, STACK_R8);
9003 MONO_ADD_INS (cfg->cbb, ins);
9012 MonoInst *temp, *store;
9014 CHECK_STACK_OVF (1);
9018 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
9019 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
9021 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9024 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9037 if (sp [0]->type == STACK_R8)
9038 /* we need to pop the value from the x86 FP stack */
9039 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
9044 MonoMethodSignature *fsig;
9047 INLINE_FAILURE ("jmp");
9048 GSHAREDVT_FAILURE (*ip);
9051 if (stack_start != sp)
9053 token = read32 (ip + 1);
9054 /* FIXME: check the signature matches */
9055 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9058 if (cfg->gshared && mono_method_check_context_used (cmethod))
9059 GENERIC_SHARING_FAILURE (CEE_JMP);
9061 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9063 fsig = mono_method_signature (cmethod);
9064 n = fsig->param_count + fsig->hasthis;
9065 if (cfg->llvm_only) {
9068 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9069 for (i = 0; i < n; ++i)
9070 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9071 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9073 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9074 * have to emit a normal return since llvm expects it.
9077 emit_setret (cfg, ins);
9078 MONO_INST_NEW (cfg, ins, OP_BR);
9079 ins->inst_target_bb = end_bblock;
9080 MONO_ADD_INS (cfg->cbb, ins);
9081 link_bblock (cfg, cfg->cbb, end_bblock);
9084 } else if (cfg->backend->have_op_tail_call) {
9085 /* Handle tail calls similarly to calls */
9088 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9089 call->method = cmethod;
9090 call->tail_call = TRUE;
9091 call->signature = mono_method_signature (cmethod);
9092 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9093 call->inst.inst_p0 = cmethod;
9094 for (i = 0; i < n; ++i)
9095 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9097 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
9098 call->vret_var = cfg->vret_addr;
9100 mono_arch_emit_call (cfg, call);
9101 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9102 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9104 for (i = 0; i < num_args; ++i)
9105 /* Prevent arguments from being optimized away */
9106 arg_array [i]->flags |= MONO_INST_VOLATILE;
9108 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9109 ins = (MonoInst*)call;
9110 ins->inst_p0 = cmethod;
9111 MONO_ADD_INS (cfg->cbb, ins);
9115 start_new_bblock = 1;
9120 MonoMethodSignature *fsig;
9123 token = read32 (ip + 1);
9127 //GSHAREDVT_FAILURE (*ip);
9132 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9135 if (method->dynamic && fsig->pinvoke) {
9139 * This is a call through a function pointer using a pinvoke
9140 * signature. Have to create a wrapper and call that instead.
9141 * FIXME: This is very slow, need to create a wrapper at JIT time
9142 * instead based on the signature.
9144 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9145 EMIT_NEW_PCONST (cfg, args [1], fsig);
9147 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9150 n = fsig->param_count + fsig->hasthis;
9154 //g_assert (!virtual_ || fsig->hasthis);
9158 inline_costs += 10 * num_calls++;
9161 * Making generic calls out of gsharedvt methods.
9162 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9163 * patching gshared method addresses into a gsharedvt method.
9165 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9167 * We pass the address to the gsharedvt trampoline in the rgctx reg
9169 MonoInst *callee = addr;
9171 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9173 GSHAREDVT_FAILURE (*ip);
9177 GSHAREDVT_FAILURE (*ip);
9179 addr = emit_get_rgctx_sig (cfg, context_used,
9180 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9181 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9185 /* Prevent inlining of methods with indirect calls */
9186 INLINE_FAILURE ("indirect call");
9188 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9189 MonoJumpInfoType info_type;
9193 * Instead of emitting an indirect call, emit a direct call
9194 * with the contents of the aotconst as the patch info.
9196 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9197 info_type = (MonoJumpInfoType)addr->inst_c1;
9198 info_data = addr->inst_p0;
9200 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9201 info_data = addr->inst_right->inst_left;
9204 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9205 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9208 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9209 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9214 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9218 /* End of call, INS should contain the result of the call, if any */
9220 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9222 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9225 CHECK_CFG_EXCEPTION;
9229 constrained_class = NULL;
9233 case CEE_CALLVIRT: {
9234 MonoInst *addr = NULL;
9235 MonoMethodSignature *fsig = NULL;
9237 int virtual_ = *ip == CEE_CALLVIRT;
9238 gboolean pass_imt_from_rgctx = FALSE;
9239 MonoInst *imt_arg = NULL;
9240 MonoInst *keep_this_alive = NULL;
9241 gboolean pass_vtable = FALSE;
9242 gboolean pass_mrgctx = FALSE;
9243 MonoInst *vtable_arg = NULL;
9244 gboolean check_this = FALSE;
9245 gboolean supported_tail_call = FALSE;
9246 gboolean tail_call = FALSE;
9247 gboolean need_seq_point = FALSE;
9248 guint32 call_opcode = *ip;
9249 gboolean emit_widen = TRUE;
9250 gboolean push_res = TRUE;
9251 gboolean skip_ret = FALSE;
9252 gboolean delegate_invoke = FALSE;
9253 gboolean direct_icall = FALSE;
9254 gboolean constrained_partial_call = FALSE;
9255 MonoMethod *cil_method;
9258 token = read32 (ip + 1);
9262 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9265 cil_method = cmethod;
9267 if (constrained_class) {
9268 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9269 if (!mini_is_gsharedvt_klass (constrained_class)) {
9270 g_assert (!cmethod->klass->valuetype);
9271 if (!mini_type_is_reference (&constrained_class->byval_arg))
9272 constrained_partial_call = TRUE;
9276 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9277 if (cfg->verbose_level > 2)
9278 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9279 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9280 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9282 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9286 if (cfg->verbose_level > 2)
9287 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9289 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9291 * This is needed since get_method_constrained can't find
9292 * the method in klass representing a type var.
9293 * The type var is guaranteed to be a reference type in this
9296 if (!mini_is_gsharedvt_klass (constrained_class))
9297 g_assert (!cmethod->klass->valuetype);
9299 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9305 if (!dont_verify && !cfg->skip_visibility) {
9306 MonoMethod *target_method = cil_method;
9307 if (method->is_inflated) {
9308 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9311 if (!mono_method_can_access_method (method_definition, target_method) &&
9312 !mono_method_can_access_method (method, cil_method))
9313 emit_method_access_failure (cfg, method, cil_method);
9316 if (mono_security_core_clr_enabled ())
9317 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9319 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9320 /* MS.NET seems to silently convert this to a callvirt */
9325 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9326 * converts to a callvirt.
9328 * tests/bug-515884.il is an example of this behavior
9330 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9331 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9332 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9336 if (!cmethod->klass->inited)
9337 if (!mono_class_init (cmethod->klass))
9338 TYPE_LOAD_ERROR (cmethod->klass);
9340 fsig = mono_method_signature (cmethod);
9343 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9344 mini_class_is_system_array (cmethod->klass)) {
9345 array_rank = cmethod->klass->rank;
9346 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9347 direct_icall = TRUE;
9348 } else if (fsig->pinvoke) {
9349 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9350 fsig = mono_method_signature (wrapper);
9351 } else if (constrained_class) {
9353 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9357 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9358 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9360 /* See code below */
9361 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9362 MonoBasicBlock *tbb;
9364 GET_BBLOCK (cfg, tbb, ip + 5);
9365 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9367 * We want to extend the try block to cover the call, but we can't do it if the
9368 * call is made directly since its followed by an exception check.
9370 direct_icall = FALSE;
9374 mono_save_token_info (cfg, image, token, cil_method);
9376 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9377 need_seq_point = TRUE;
9379 /* Don't support calls made using type arguments for now */
9381 if (cfg->gsharedvt) {
9382 if (mini_is_gsharedvt_signature (fsig))
9383 GSHAREDVT_FAILURE (*ip);
9387 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9388 g_assert_not_reached ();
9390 n = fsig->param_count + fsig->hasthis;
9392 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
9396 g_assert (!mono_method_check_context_used (cmethod));
9400 //g_assert (!virtual_ || fsig->hasthis);
9405 * We have the `constrained.' prefix opcode.
9407 if (constrained_class) {
9408 if (mini_is_gsharedvt_klass (constrained_class)) {
9409 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9410 /* The 'Own method' case below */
9411 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
9412 /* 'The type parameter is instantiated as a reference type' case below. */
9414 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9415 CHECK_CFG_EXCEPTION;
9421 if (constrained_partial_call) {
9422 gboolean need_box = TRUE;
9425 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9426 * called method is not known at compile time either. The called method could end up being
9427 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9428 * to box the receiver.
9429 * A simple solution would be to box always and make a normal virtual call, but that would
9430 * be bad performance wise.
9432 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
9434 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9439 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9440 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9441 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9442 ins->klass = constrained_class;
9443 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9444 CHECK_CFG_EXCEPTION;
9445 } else if (need_box) {
9447 MonoBasicBlock *is_ref_bb, *end_bb;
9448 MonoInst *nonbox_call;
9451 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9453 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9454 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9456 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9458 NEW_BBLOCK (cfg, is_ref_bb);
9459 NEW_BBLOCK (cfg, end_bb);
9461 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9463 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9466 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9471 MONO_START_BB (cfg, is_ref_bb);
9472 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9473 ins->klass = constrained_class;
9474 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9475 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9479 MONO_START_BB (cfg, end_bb);
9482 nonbox_call->dreg = ins->dreg;
9485 g_assert (mono_class_is_interface (cmethod->klass));
9486 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9487 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9490 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9492 * The type parameter is instantiated as a valuetype,
9493 * but that type doesn't override the method we're
9494 * calling, so we need to box `this'.
9496 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9497 ins->klass = constrained_class;
9498 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9499 CHECK_CFG_EXCEPTION;
9500 } else if (!constrained_class->valuetype) {
9501 int dreg = alloc_ireg_ref (cfg);
9504 * The type parameter is instantiated as a reference
9505 * type. We have a managed pointer on the stack, so
9506 * we need to dereference it here.
9508 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9509 ins->type = STACK_OBJ;
9512 if (cmethod->klass->valuetype) {
9515 /* Interface method */
9518 mono_class_setup_vtable (constrained_class);
9519 CHECK_TYPELOAD (constrained_class);
9520 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9522 TYPE_LOAD_ERROR (constrained_class);
9523 slot = mono_method_get_vtable_slot (cmethod);
9525 TYPE_LOAD_ERROR (cmethod->klass);
9526 cmethod = constrained_class->vtable [ioffset + slot];
9528 if (cmethod->klass == mono_defaults.enum_class) {
9529 /* Enum implements some interfaces, so treat this as the first case */
9530 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9531 ins->klass = constrained_class;
9532 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9533 CHECK_CFG_EXCEPTION;
9538 constrained_class = NULL;
9541 if (check_call_signature (cfg, fsig, sp))
9544 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9545 delegate_invoke = TRUE;
9547 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9548 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9549 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9557 * If the callee is a shared method, then its static cctor
9558 * might not get called after the call was patched.
9560 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9561 emit_class_init (cfg, cmethod->klass);
9562 CHECK_TYPELOAD (cmethod->klass);
9565 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9568 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9570 context_used = mini_method_check_context_used (cfg, cmethod);
9572 if (context_used && mono_class_is_interface (cmethod->klass)) {
9573 /* Generic method interface
9574 calls are resolved via a
9575 helper function and don't
9577 if (!cmethod_context || !cmethod_context->method_inst)
9578 pass_imt_from_rgctx = TRUE;
9582 * If a shared method calls another
9583 * shared method then the caller must
9584 * have a generic sharing context
9585 * because the magic trampoline
9586 * requires it. FIXME: We shouldn't
9587 * have to force the vtable/mrgctx
9588 * variable here. Instead there
9589 * should be a flag in the cfg to
9590 * request a generic sharing context.
9593 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9594 mono_get_vtable_var (cfg);
9599 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9601 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9603 CHECK_TYPELOAD (cmethod->klass);
9604 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9609 g_assert (!vtable_arg);
9611 if (!cfg->compile_aot) {
9613 * emit_get_rgctx_method () calls mono_class_vtable () so check
9614 * for type load errors before.
9616 mono_class_setup_vtable (cmethod->klass);
9617 CHECK_TYPELOAD (cmethod->klass);
9620 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9622 /* !marshalbyref is needed to properly handle generic methods + remoting */
9623 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9624 MONO_METHOD_IS_FINAL (cmethod)) &&
9625 !mono_class_is_marshalbyref (cmethod->klass)) {
9632 if (pass_imt_from_rgctx) {
9633 g_assert (!pass_vtable);
9635 imt_arg = emit_get_rgctx_method (cfg, context_used,
9636 cmethod, MONO_RGCTX_INFO_METHOD);
9640 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9642 /* Calling virtual generic methods */
9643 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9644 !(MONO_METHOD_IS_FINAL (cmethod) &&
9645 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9646 fsig->generic_param_count &&
9647 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9649 MonoInst *this_temp, *this_arg_temp, *store;
9650 MonoInst *iargs [4];
9652 g_assert (fsig->is_inflated);
9654 /* Prevent inlining of methods that contain indirect calls */
9655 INLINE_FAILURE ("virtual generic call");
9657 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9658 GSHAREDVT_FAILURE (*ip);
9660 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9661 g_assert (!imt_arg);
9663 g_assert (cmethod->is_inflated);
9664 imt_arg = emit_get_rgctx_method (cfg, context_used,
9665 cmethod, MONO_RGCTX_INFO_METHOD);
9666 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9668 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9669 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9670 MONO_ADD_INS (cfg->cbb, store);
9672 /* FIXME: This should be a managed pointer */
9673 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9675 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9676 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9677 cmethod, MONO_RGCTX_INFO_METHOD);
9678 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9679 addr = mono_emit_jit_icall (cfg,
9680 mono_helper_compile_generic_method, iargs);
9682 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9684 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9691 * Implement a workaround for the inherent races involved in locking:
9697 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9698 * try block, the Exit () won't be executed, see:
9699 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9700 * To work around this, we extend such try blocks to include the last x bytes
9701 * of the Monitor.Enter () call.
9703 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9704 MonoBasicBlock *tbb;
9706 GET_BBLOCK (cfg, tbb, ip + 5);
9708 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9709 * from Monitor.Enter like ArgumentNullException.
9711 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9712 /* Mark this bblock as needing to be extended */
9713 tbb->extend_try_block = TRUE;
9717 /* Conversion to a JIT intrinsic */
9718 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9719 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9720 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9728 if ((cfg->opt & MONO_OPT_INLINE) &&
9729 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9730 mono_method_check_inlining (cfg, cmethod)) {
9732 gboolean always = FALSE;
9734 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9735 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9736 /* Prevent inlining of methods that call wrappers */
9737 INLINE_FAILURE ("wrapper call");
9738 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9742 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9744 cfg->real_offset += 5;
9746 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9747 /* *sp is already set by inline_method */
9752 inline_costs += costs;
9758 /* Tail recursion elimination */
9759 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9760 gboolean has_vtargs = FALSE;
9763 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9764 INLINE_FAILURE ("tail call");
9766 /* keep it simple */
9767 for (i = fsig->param_count - 1; i >= 0; i--) {
9768 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9773 if (need_seq_point) {
9774 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9775 need_seq_point = FALSE;
9777 for (i = 0; i < n; ++i)
9778 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9779 MONO_INST_NEW (cfg, ins, OP_BR);
9780 MONO_ADD_INS (cfg->cbb, ins);
9781 tblock = start_bblock->out_bb [0];
9782 link_bblock (cfg, cfg->cbb, tblock);
9783 ins->inst_target_bb = tblock;
9784 start_new_bblock = 1;
9786 /* skip the CEE_RET, too */
9787 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9794 inline_costs += 10 * num_calls++;
9797 * Synchronized wrappers.
9798 * Its hard to determine where to replace a method with its synchronized
9799 * wrapper without causing an infinite recursion. The current solution is
9800 * to add the synchronized wrapper in the trampolines, and to
9801 * change the called method to a dummy wrapper, and resolve that wrapper
9802 * to the real method in mono_jit_compile_method ().
9804 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9805 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9806 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9807 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9811 * Making generic calls out of gsharedvt methods.
9812 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9813 * patching gshared method addresses into a gsharedvt method.
9815 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9816 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9817 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9818 MonoRgctxInfoType info_type;
9821 //if (mono_class_is_interface (cmethod->klass))
9822 //GSHAREDVT_FAILURE (*ip);
9823 // disable for possible remoting calls
9824 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9825 GSHAREDVT_FAILURE (*ip);
9826 if (fsig->generic_param_count) {
9827 /* virtual generic call */
9828 g_assert (!imt_arg);
9829 /* Same as the virtual generic case above */
9830 imt_arg = emit_get_rgctx_method (cfg, context_used,
9831 cmethod, MONO_RGCTX_INFO_METHOD);
9832 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9834 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9835 /* This can happen when we call a fully instantiated iface method */
9836 imt_arg = emit_get_rgctx_method (cfg, context_used,
9837 cmethod, MONO_RGCTX_INFO_METHOD);
9842 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9843 keep_this_alive = sp [0];
9845 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9846 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9848 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9849 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9851 if (cfg->llvm_only) {
9852 // FIXME: Avoid initializing vtable_arg
9853 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9855 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9860 /* Generic sharing */
9863 * Use this if the callee is gsharedvt sharable too, since
9864 * at runtime we might find an instantiation so the call cannot
9865 * be patched (the 'no_patch' code path in mini-trampolines.c).
9867 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9868 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9869 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9870 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9871 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9872 INLINE_FAILURE ("gshared");
9874 g_assert (cfg->gshared && cmethod);
9878 * We are compiling a call to a
9879 * generic method from shared code,
9880 * which means that we have to look up
9881 * the method in the rgctx and do an
9885 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9887 if (cfg->llvm_only) {
9888 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9889 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9891 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9892 // FIXME: Avoid initializing imt_arg/vtable_arg
9893 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9895 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9896 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9901 /* Direct calls to icalls */
9903 MonoMethod *wrapper;
9906 /* Inline the wrapper */
9907 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9909 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9910 g_assert (costs > 0);
9911 cfg->real_offset += 5;
9913 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9914 /* *sp is already set by inline_method */
9919 inline_costs += costs;
9928 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9929 MonoInst *val = sp [fsig->param_count];
9931 if (val->type == STACK_OBJ) {
9932 MonoInst *iargs [2];
9937 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9940 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9941 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9942 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9943 emit_write_barrier (cfg, addr, val);
9944 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9945 GSHAREDVT_FAILURE (*ip);
9946 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9947 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9949 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9950 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9951 if (!cmethod->klass->element_class->valuetype && !readonly)
9952 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9953 CHECK_TYPELOAD (cmethod->klass);
9956 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9959 g_assert_not_reached ();
9966 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9970 /* Tail prefix / tail call optimization */
9972 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9973 /* FIXME: runtime generic context pointer for jumps? */
9974 /* FIXME: handle this for generic sharing eventually */
9975 if ((ins_flag & MONO_INST_TAILCALL) &&
9976 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9977 supported_tail_call = TRUE;
9979 if (supported_tail_call) {
9982 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9983 INLINE_FAILURE ("tail call");
9985 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9987 if (cfg->backend->have_op_tail_call) {
9988 /* Handle tail calls similarly to normal calls */
9991 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9993 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9994 call->tail_call = TRUE;
9995 call->method = cmethod;
9996 call->signature = mono_method_signature (cmethod);
9999 * We implement tail calls by storing the actual arguments into the
10000 * argument variables, then emitting a CEE_JMP.
10002 for (i = 0; i < n; ++i) {
10003 /* Prevent argument from being register allocated */
10004 arg_array [i]->flags |= MONO_INST_VOLATILE;
10005 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
10007 ins = (MonoInst*)call;
10008 ins->inst_p0 = cmethod;
10009 ins->inst_p1 = arg_array [0];
10010 MONO_ADD_INS (cfg->cbb, ins);
10011 link_bblock (cfg, cfg->cbb, end_bblock);
10012 start_new_bblock = 1;
10014 // FIXME: Eliminate unreachable epilogs
10017 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10018 * only reachable from this call.
10020 GET_BBLOCK (cfg, tblock, ip + 5);
10021 if (tblock == cfg->cbb || tblock->in_count == 0)
10030 * Virtual calls in llvm-only mode.
10032 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
10033 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
10038 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
10039 INLINE_FAILURE ("call");
10040 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
10041 imt_arg, vtable_arg);
10043 if (tail_call && !cfg->llvm_only) {
10044 link_bblock (cfg, cfg->cbb, end_bblock);
10045 start_new_bblock = 1;
10047 // FIXME: Eliminate unreachable epilogs
10050 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10051 * only reachable from this call.
10053 GET_BBLOCK (cfg, tblock, ip + 5);
10054 if (tblock == cfg->cbb || tblock->in_count == 0)
10061 /* End of call, INS should contain the result of the call, if any */
10063 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10066 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10071 if (keep_this_alive) {
10072 MonoInst *dummy_use;
10074 /* See mono_emit_method_call_full () */
10075 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10078 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
10080 * Clang can convert these calls to tail calls which screw up the stack
10081 * walk. This happens even when the -fno-optimize-sibling-calls
10082 * option is passed to clang.
10083 * Work around this by emitting a dummy call.
10085 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
10088 CHECK_CFG_EXCEPTION;
10092 g_assert (*ip == CEE_RET);
10096 constrained_class = NULL;
10097 if (need_seq_point)
10098 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10102 if (cfg->method != method) {
10103 /* return from inlined method */
10105 * If in_count == 0, that means the ret is unreachable due to
10106 * being preceeded by a throw. In that case, inline_method () will
10107 * handle setting the return value
10108 * (test case: test_0_inline_throw ()).
10110 if (return_var && cfg->cbb->in_count) {
10111 MonoType *ret_type = mono_method_signature (method)->ret;
10117 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10120 //g_assert (returnvar != -1);
10121 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10122 cfg->ret_var_set = TRUE;
10125 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10127 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10128 emit_pop_lmf (cfg);
10131 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10133 if (seq_points && !sym_seq_points) {
10135 * Place a seq point here too even through the IL stack is not
10136 * empty, so a step over on
10139 * will work correctly.
10141 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10142 MONO_ADD_INS (cfg->cbb, ins);
10145 g_assert (!return_var);
10149 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10152 emit_setret (cfg, *sp);
10155 if (sp != stack_start)
10157 MONO_INST_NEW (cfg, ins, OP_BR);
10159 ins->inst_target_bb = end_bblock;
10160 MONO_ADD_INS (cfg->cbb, ins);
10161 link_bblock (cfg, cfg->cbb, end_bblock);
10162 start_new_bblock = 1;
10166 MONO_INST_NEW (cfg, ins, OP_BR);
10168 target = ip + 1 + (signed char)(*ip);
10170 GET_BBLOCK (cfg, tblock, target);
10171 link_bblock (cfg, cfg->cbb, tblock);
10172 ins->inst_target_bb = tblock;
10173 if (sp != stack_start) {
10174 handle_stack_args (cfg, stack_start, sp - stack_start);
10176 CHECK_UNVERIFIABLE (cfg);
10178 MONO_ADD_INS (cfg->cbb, ins);
10179 start_new_bblock = 1;
10180 inline_costs += BRANCH_COST;
10194 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10196 target = ip + 1 + *(signed char*)ip;
10199 ADD_BINCOND (NULL);
10202 inline_costs += BRANCH_COST;
10206 MONO_INST_NEW (cfg, ins, OP_BR);
10209 target = ip + 4 + (gint32)read32(ip);
10211 GET_BBLOCK (cfg, tblock, target);
10212 link_bblock (cfg, cfg->cbb, tblock);
10213 ins->inst_target_bb = tblock;
10214 if (sp != stack_start) {
10215 handle_stack_args (cfg, stack_start, sp - stack_start);
10217 CHECK_UNVERIFIABLE (cfg);
10220 MONO_ADD_INS (cfg->cbb, ins);
10222 start_new_bblock = 1;
10223 inline_costs += BRANCH_COST;
10225 case CEE_BRFALSE_S:
10230 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10231 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10232 guint32 opsize = is_short ? 1 : 4;
10234 CHECK_OPSIZE (opsize);
10236 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10239 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10244 GET_BBLOCK (cfg, tblock, target);
10245 link_bblock (cfg, cfg->cbb, tblock);
10246 GET_BBLOCK (cfg, tblock, ip);
10247 link_bblock (cfg, cfg->cbb, tblock);
10249 if (sp != stack_start) {
10250 handle_stack_args (cfg, stack_start, sp - stack_start);
10251 CHECK_UNVERIFIABLE (cfg);
10254 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10255 cmp->sreg1 = sp [0]->dreg;
10256 type_from_op (cfg, cmp, sp [0], NULL);
10259 #if SIZEOF_REGISTER == 4
10260 if (cmp->opcode == OP_LCOMPARE_IMM) {
10261 /* Convert it to OP_LCOMPARE */
10262 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10263 ins->type = STACK_I8;
10264 ins->dreg = alloc_dreg (cfg, STACK_I8);
10266 MONO_ADD_INS (cfg->cbb, ins);
10267 cmp->opcode = OP_LCOMPARE;
10268 cmp->sreg2 = ins->dreg;
10271 MONO_ADD_INS (cfg->cbb, cmp);
10273 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10274 type_from_op (cfg, ins, sp [0], NULL);
10275 MONO_ADD_INS (cfg->cbb, ins);
10276 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10277 GET_BBLOCK (cfg, tblock, target);
10278 ins->inst_true_bb = tblock;
10279 GET_BBLOCK (cfg, tblock, ip);
10280 ins->inst_false_bb = tblock;
10281 start_new_bblock = 2;
10284 inline_costs += BRANCH_COST;
10299 MONO_INST_NEW (cfg, ins, *ip);
10301 target = ip + 4 + (gint32)read32(ip);
10304 ADD_BINCOND (NULL);
10307 inline_costs += BRANCH_COST;
10311 MonoBasicBlock **targets;
10312 MonoBasicBlock *default_bblock;
10313 MonoJumpInfoBBTable *table;
10314 int offset_reg = alloc_preg (cfg);
10315 int target_reg = alloc_preg (cfg);
10316 int table_reg = alloc_preg (cfg);
10317 int sum_reg = alloc_preg (cfg);
10318 gboolean use_op_switch;
10322 n = read32 (ip + 1);
10325 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10329 CHECK_OPSIZE (n * sizeof (guint32));
10330 target = ip + n * sizeof (guint32);
10332 GET_BBLOCK (cfg, default_bblock, target);
10333 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10335 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10336 for (i = 0; i < n; ++i) {
10337 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10338 targets [i] = tblock;
10339 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10343 if (sp != stack_start) {
10345 * Link the current bb with the targets as well, so handle_stack_args
10346 * will set their in_stack correctly.
10348 link_bblock (cfg, cfg->cbb, default_bblock);
10349 for (i = 0; i < n; ++i)
10350 link_bblock (cfg, cfg->cbb, targets [i]);
10352 handle_stack_args (cfg, stack_start, sp - stack_start);
10354 CHECK_UNVERIFIABLE (cfg);
10356 /* Undo the links */
10357 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10358 for (i = 0; i < n; ++i)
10359 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10363 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10365 for (i = 0; i < n; ++i)
10366 link_bblock (cfg, cfg->cbb, targets [i]);
10368 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10369 table->table = targets;
10370 table->table_size = n;
10372 use_op_switch = FALSE;
10374 /* ARM implements SWITCH statements differently */
10375 /* FIXME: Make it use the generic implementation */
10376 if (!cfg->compile_aot)
10377 use_op_switch = TRUE;
10380 if (COMPILE_LLVM (cfg))
10381 use_op_switch = TRUE;
10383 cfg->cbb->has_jump_table = 1;
10385 if (use_op_switch) {
10386 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10387 ins->sreg1 = src1->dreg;
10388 ins->inst_p0 = table;
10389 ins->inst_many_bb = targets;
10390 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10391 MONO_ADD_INS (cfg->cbb, ins);
10393 if (sizeof (gpointer) == 8)
10394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10398 #if SIZEOF_REGISTER == 8
10399 /* The upper word might not be zero, and we add it to a 64 bit address later */
10400 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10403 if (cfg->compile_aot) {
10404 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10406 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10407 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10408 ins->inst_p0 = table;
10409 ins->dreg = table_reg;
10410 MONO_ADD_INS (cfg->cbb, ins);
10413 /* FIXME: Use load_memindex */
10414 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10416 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10418 start_new_bblock = 1;
10419 inline_costs += (BRANCH_COST * 2);
10432 case CEE_LDIND_REF:
10439 dreg = alloc_freg (cfg);
10442 dreg = alloc_lreg (cfg);
10444 case CEE_LDIND_REF:
10445 dreg = alloc_ireg_ref (cfg);
10448 dreg = alloc_preg (cfg);
10451 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10452 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10453 if (*ip == CEE_LDIND_R4)
10454 ins->type = cfg->r4_stack_type;
10455 ins->flags |= ins_flag;
10456 MONO_ADD_INS (cfg->cbb, ins);
10458 if (ins_flag & MONO_INST_VOLATILE) {
10459 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10460 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10465 case CEE_STIND_REF:
10476 if (ins_flag & MONO_INST_VOLATILE) {
10477 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10478 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10481 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10482 ins->flags |= ins_flag;
10485 MONO_ADD_INS (cfg->cbb, ins);
10487 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
10488 emit_write_barrier (cfg, sp [0], sp [1]);
10497 MONO_INST_NEW (cfg, ins, (*ip));
10499 ins->sreg1 = sp [0]->dreg;
10500 ins->sreg2 = sp [1]->dreg;
10501 type_from_op (cfg, ins, sp [0], sp [1]);
10503 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10505 /* Use the immediate opcodes if possible */
10506 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10507 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10508 if (imm_opcode != -1) {
10509 ins->opcode = imm_opcode;
10510 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10513 NULLIFY_INS (sp [1]);
10517 MONO_ADD_INS ((cfg)->cbb, (ins));
10519 *sp++ = mono_decompose_opcode (cfg, ins);
10536 MONO_INST_NEW (cfg, ins, (*ip));
10538 ins->sreg1 = sp [0]->dreg;
10539 ins->sreg2 = sp [1]->dreg;
10540 type_from_op (cfg, ins, sp [0], sp [1]);
10542 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10543 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10545 /* FIXME: Pass opcode to is_inst_imm */
10547 /* Use the immediate opcodes if possible */
10548 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10549 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10550 if (imm_opcode != -1) {
10551 ins->opcode = imm_opcode;
10552 if (sp [1]->opcode == OP_I8CONST) {
10553 #if SIZEOF_REGISTER == 8
10554 ins->inst_imm = sp [1]->inst_l;
10556 ins->inst_ls_word = sp [1]->inst_ls_word;
10557 ins->inst_ms_word = sp [1]->inst_ms_word;
10561 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10564 /* Might be followed by an instruction added by add_widen_op */
10565 if (sp [1]->next == NULL)
10566 NULLIFY_INS (sp [1]);
10569 MONO_ADD_INS ((cfg)->cbb, (ins));
10571 *sp++ = mono_decompose_opcode (cfg, ins);
10584 case CEE_CONV_OVF_I8:
10585 case CEE_CONV_OVF_U8:
10586 case CEE_CONV_R_UN:
10589 /* Special case this earlier so we have long constants in the IR */
10590 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10591 int data = sp [-1]->inst_c0;
10592 sp [-1]->opcode = OP_I8CONST;
10593 sp [-1]->type = STACK_I8;
10594 #if SIZEOF_REGISTER == 8
10595 if ((*ip) == CEE_CONV_U8)
10596 sp [-1]->inst_c0 = (guint32)data;
10598 sp [-1]->inst_c0 = data;
10600 sp [-1]->inst_ls_word = data;
10601 if ((*ip) == CEE_CONV_U8)
10602 sp [-1]->inst_ms_word = 0;
10604 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10606 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10613 case CEE_CONV_OVF_I4:
10614 case CEE_CONV_OVF_I1:
10615 case CEE_CONV_OVF_I2:
10616 case CEE_CONV_OVF_I:
10617 case CEE_CONV_OVF_U:
10620 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10621 ADD_UNOP (CEE_CONV_OVF_I8);
10628 case CEE_CONV_OVF_U1:
10629 case CEE_CONV_OVF_U2:
10630 case CEE_CONV_OVF_U4:
10633 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10634 ADD_UNOP (CEE_CONV_OVF_U8);
10641 case CEE_CONV_OVF_I1_UN:
10642 case CEE_CONV_OVF_I2_UN:
10643 case CEE_CONV_OVF_I4_UN:
10644 case CEE_CONV_OVF_I8_UN:
10645 case CEE_CONV_OVF_U1_UN:
10646 case CEE_CONV_OVF_U2_UN:
10647 case CEE_CONV_OVF_U4_UN:
10648 case CEE_CONV_OVF_U8_UN:
10649 case CEE_CONV_OVF_I_UN:
10650 case CEE_CONV_OVF_U_UN:
10657 CHECK_CFG_EXCEPTION;
10661 case CEE_ADD_OVF_UN:
10663 case CEE_MUL_OVF_UN:
10665 case CEE_SUB_OVF_UN:
10671 GSHAREDVT_FAILURE (*ip);
10674 token = read32 (ip + 1);
10675 klass = mini_get_class (method, token, generic_context);
10676 CHECK_TYPELOAD (klass);
10678 if (generic_class_is_reference_type (cfg, klass)) {
10679 MonoInst *store, *load;
10680 int dreg = alloc_ireg_ref (cfg);
10682 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10683 load->flags |= ins_flag;
10684 MONO_ADD_INS (cfg->cbb, load);
10686 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10687 store->flags |= ins_flag;
10688 MONO_ADD_INS (cfg->cbb, store);
10690 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10691 emit_write_barrier (cfg, sp [0], sp [1]);
10693 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10699 int loc_index = -1;
10705 token = read32 (ip + 1);
10706 klass = mini_get_class (method, token, generic_context);
10707 CHECK_TYPELOAD (klass);
10709 /* Optimize the common ldobj+stloc combination */
10712 loc_index = ip [6];
10719 loc_index = ip [5] - CEE_STLOC_0;
10726 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10727 CHECK_LOCAL (loc_index);
10729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10730 ins->dreg = cfg->locals [loc_index]->dreg;
10731 ins->flags |= ins_flag;
10734 if (ins_flag & MONO_INST_VOLATILE) {
10735 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10736 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10742 /* Optimize the ldobj+stobj combination */
10743 /* The reference case ends up being a load+store anyway */
10744 /* Skip this if the operation is volatile. */
10745 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10750 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10757 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10758 ins->flags |= ins_flag;
10761 if (ins_flag & MONO_INST_VOLATILE) {
10762 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10763 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10772 CHECK_STACK_OVF (1);
10774 n = read32 (ip + 1);
10776 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10777 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10778 ins->type = STACK_OBJ;
10781 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10782 MonoInst *iargs [1];
10783 char *str = (char *)mono_method_get_wrapper_data (method, n);
10785 if (cfg->compile_aot)
10786 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10788 EMIT_NEW_PCONST (cfg, iargs [0], str);
10789 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10791 if (cfg->opt & MONO_OPT_SHARED) {
10792 MonoInst *iargs [3];
10794 if (cfg->compile_aot) {
10795 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10797 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10798 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10799 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10800 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10801 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10804 if (cfg->cbb->out_of_line) {
10805 MonoInst *iargs [2];
10807 if (image == mono_defaults.corlib) {
10809 * Avoid relocations in AOT and save some space by using a
10810 * version of helper_ldstr specialized to mscorlib.
10812 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10813 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10815 /* Avoid creating the string object */
10816 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10817 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10818 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10822 if (cfg->compile_aot) {
10823 NEW_LDSTRCONST (cfg, ins, image, n);
10825 MONO_ADD_INS (cfg->cbb, ins);
10828 NEW_PCONST (cfg, ins, NULL);
10829 ins->type = STACK_OBJ;
10830 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10834 OUT_OF_MEMORY_FAILURE;
10837 MONO_ADD_INS (cfg->cbb, ins);
10846 MonoInst *iargs [2];
10847 MonoMethodSignature *fsig;
10850 MonoInst *vtable_arg = NULL;
10853 token = read32 (ip + 1);
10854 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10857 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10860 mono_save_token_info (cfg, image, token, cmethod);
10862 if (!mono_class_init (cmethod->klass))
10863 TYPE_LOAD_ERROR (cmethod->klass);
10865 context_used = mini_method_check_context_used (cfg, cmethod);
10867 if (mono_security_core_clr_enabled ())
10868 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10870 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10871 emit_class_init (cfg, cmethod->klass);
10872 CHECK_TYPELOAD (cmethod->klass);
10876 if (cfg->gsharedvt) {
10877 if (mini_is_gsharedvt_variable_signature (sig))
10878 GSHAREDVT_FAILURE (*ip);
10882 n = fsig->param_count;
10886 * Generate smaller code for the common newobj <exception> instruction in
10887 * argument checking code.
10889 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10890 is_exception_class (cmethod->klass) && n <= 2 &&
10891 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10892 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10893 MonoInst *iargs [3];
10897 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10900 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10903 iargs [1] = sp [0];
10904 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10907 iargs [1] = sp [0];
10908 iargs [2] = sp [1];
10909 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10912 g_assert_not_reached ();
10920 /* move the args to allow room for 'this' in the first position */
10926 /* check_call_signature () requires sp[0] to be set */
10927 this_ins.type = STACK_OBJ;
10928 sp [0] = &this_ins;
10929 if (check_call_signature (cfg, fsig, sp))
10934 if (mini_class_is_system_array (cmethod->klass)) {
10935 *sp = emit_get_rgctx_method (cfg, context_used,
10936 cmethod, MONO_RGCTX_INFO_METHOD);
10938 /* Avoid varargs in the common case */
10939 if (fsig->param_count == 1)
10940 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10941 else if (fsig->param_count == 2)
10942 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10943 else if (fsig->param_count == 3)
10944 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10945 else if (fsig->param_count == 4)
10946 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10948 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10949 } else if (cmethod->string_ctor) {
10950 g_assert (!context_used);
10951 g_assert (!vtable_arg);
10952 /* we simply pass a null pointer */
10953 EMIT_NEW_PCONST (cfg, *sp, NULL);
10954 /* now call the string ctor */
10955 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10957 if (cmethod->klass->valuetype) {
10958 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10959 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10960 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10965 * The code generated by mini_emit_virtual_call () expects
10966 * iargs [0] to be a boxed instance, but luckily the vcall
10967 * will be transformed into a normal call there.
10969 } else if (context_used) {
10970 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10973 MonoVTable *vtable = NULL;
10975 if (!cfg->compile_aot)
10976 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10977 CHECK_TYPELOAD (cmethod->klass);
10980 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10981 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10982 * As a workaround, we call class cctors before allocating objects.
10984 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10985 emit_class_init (cfg, cmethod->klass);
10986 if (cfg->verbose_level > 2)
10987 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10988 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10991 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10994 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10997 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10999 /* Now call the actual ctor */
11000 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
11001 CHECK_CFG_EXCEPTION;
11004 if (alloc == NULL) {
11006 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
11007 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
11015 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
11016 emit_seq_point (cfg, method, ip, FALSE, TRUE);
11019 case CEE_CASTCLASS:
11024 token = read32 (ip + 1);
11025 klass = mini_get_class (method, token, generic_context);
11026 CHECK_TYPELOAD (klass);
11027 if (sp [0]->type != STACK_OBJ)
11030 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
11031 ins->dreg = alloc_preg (cfg);
11032 ins->sreg1 = (*sp)->dreg;
11033 ins->klass = klass;
11034 ins->type = STACK_OBJ;
11035 MONO_ADD_INS (cfg->cbb, ins);
11037 CHECK_CFG_EXCEPTION;
11041 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11044 case CEE_UNBOX_ANY: {
11045 MonoInst *res, *addr;
11050 token = read32 (ip + 1);
11051 klass = mini_get_class (method, token, generic_context);
11052 CHECK_TYPELOAD (klass);
11054 mono_save_token_info (cfg, image, token, klass);
11056 context_used = mini_class_check_context_used (cfg, klass);
11058 if (mini_is_gsharedvt_klass (klass)) {
11059 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11061 } else if (generic_class_is_reference_type (cfg, klass)) {
11062 if (MONO_INS_IS_PCONST_NULL (*sp)) {
11063 EMIT_NEW_PCONST (cfg, res, NULL);
11064 res->type = STACK_OBJ;
11066 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
11067 res->dreg = alloc_preg (cfg);
11068 res->sreg1 = (*sp)->dreg;
11069 res->klass = klass;
11070 res->type = STACK_OBJ;
11071 MONO_ADD_INS (cfg->cbb, res);
11072 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11074 } else if (mono_class_is_nullable (klass)) {
11075 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11077 addr = handle_unbox (cfg, klass, sp, context_used);
11079 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11090 MonoClass *enum_class;
11091 MonoMethod *has_flag;
11097 token = read32 (ip + 1);
11098 klass = mini_get_class (method, token, generic_context);
11099 CHECK_TYPELOAD (klass);
11101 mono_save_token_info (cfg, image, token, klass);
11103 context_used = mini_class_check_context_used (cfg, klass);
11105 if (generic_class_is_reference_type (cfg, klass)) {
11111 if (klass == mono_defaults.void_class)
11113 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11115 /* frequent check in generic code: box (struct), brtrue */
11120 * <push int/long ptr>
11123 * constrained. MyFlags
11124 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11126 * If we find this sequence and the operand types on box and constrained
11127 * are equal, we can emit a specialized instruction sequence instead of
11128 * the very slow HasFlag () call.
11130 if ((cfg->opt & MONO_OPT_INTRINS) &&
11131 /* Cheap checks first. */
11132 ip + 5 + 6 + 5 < end &&
11133 ip [5] == CEE_PREFIX1 &&
11134 ip [6] == CEE_CONSTRAINED_ &&
11135 ip [11] == CEE_CALLVIRT &&
11136 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11137 mono_class_is_enum (klass) &&
11138 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11139 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11140 has_flag->klass == mono_defaults.enum_class &&
11141 !strcmp (has_flag->name, "HasFlag") &&
11142 has_flag->signature->hasthis &&
11143 has_flag->signature->param_count == 1) {
11144 CHECK_TYPELOAD (enum_class);
11146 if (enum_class == klass) {
11147 MonoInst *enum_this, *enum_flag;
11152 enum_this = sp [0];
11153 enum_flag = sp [1];
11155 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11160 // FIXME: LLVM can't handle the inconsistent bb linking
11161 if (!mono_class_is_nullable (klass) &&
11162 !mini_is_gsharedvt_klass (klass) &&
11163 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11164 (ip [5] == CEE_BRTRUE ||
11165 ip [5] == CEE_BRTRUE_S ||
11166 ip [5] == CEE_BRFALSE ||
11167 ip [5] == CEE_BRFALSE_S)) {
11168 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11170 MonoBasicBlock *true_bb, *false_bb;
11174 if (cfg->verbose_level > 3) {
11175 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11176 printf ("<box+brtrue opt>\n");
11181 case CEE_BRFALSE_S:
11184 target = ip + 1 + (signed char)(*ip);
11191 target = ip + 4 + (gint)(read32 (ip));
11195 g_assert_not_reached ();
11199 * We need to link both bblocks, since it is needed for handling stack
11200 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11201 * Branching to only one of them would lead to inconsistencies, so
11202 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11204 GET_BBLOCK (cfg, true_bb, target);
11205 GET_BBLOCK (cfg, false_bb, ip);
11207 mono_link_bblock (cfg, cfg->cbb, true_bb);
11208 mono_link_bblock (cfg, cfg->cbb, false_bb);
11210 if (sp != stack_start) {
11211 handle_stack_args (cfg, stack_start, sp - stack_start);
11213 CHECK_UNVERIFIABLE (cfg);
11216 if (COMPILE_LLVM (cfg)) {
11217 dreg = alloc_ireg (cfg);
11218 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11221 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11223 /* The JIT can't eliminate the iconst+compare */
11224 MONO_INST_NEW (cfg, ins, OP_BR);
11225 ins->inst_target_bb = is_true ? true_bb : false_bb;
11226 MONO_ADD_INS (cfg->cbb, ins);
11229 start_new_bblock = 1;
11233 *sp++ = handle_box (cfg, val, klass, context_used);
11235 CHECK_CFG_EXCEPTION;
11244 token = read32 (ip + 1);
11245 klass = mini_get_class (method, token, generic_context);
11246 CHECK_TYPELOAD (klass);
11248 mono_save_token_info (cfg, image, token, klass);
11250 context_used = mini_class_check_context_used (cfg, klass);
11252 if (mono_class_is_nullable (klass)) {
11255 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11256 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11260 ins = handle_unbox (cfg, klass, sp, context_used);
11273 MonoClassField *field;
11274 #ifndef DISABLE_REMOTING
11278 gboolean is_instance;
11280 gpointer addr = NULL;
11281 gboolean is_special_static;
11283 MonoInst *store_val = NULL;
11284 MonoInst *thread_ins;
11287 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11289 if (op == CEE_STFLD) {
11292 store_val = sp [1];
11297 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11299 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11302 if (op == CEE_STSFLD) {
11305 store_val = sp [0];
11310 token = read32 (ip + 1);
11311 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11312 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11313 klass = field->parent;
11316 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11319 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11320 FIELD_ACCESS_FAILURE (method, field);
11321 mono_class_init (klass);
11323 /* if the class is Critical then transparent code cannot access it's fields */
11324 if (!is_instance && mono_security_core_clr_enabled ())
11325 ensure_method_is_allowed_to_access_field (cfg, method, field);
11327 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11328 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11329 if (mono_security_core_clr_enabled ())
11330 ensure_method_is_allowed_to_access_field (cfg, method, field);
11333 ftype = mono_field_get_type (field);
11336 * LDFLD etc. is usable on static fields as well, so convert those cases to
11339 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11351 g_assert_not_reached ();
11353 is_instance = FALSE;
11356 context_used = mini_class_check_context_used (cfg, klass);
11358 /* INSTANCE CASE */
11360 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11361 if (op == CEE_STFLD) {
11362 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11364 #ifndef DISABLE_REMOTING
11365 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11366 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11367 MonoInst *iargs [5];
11369 GSHAREDVT_FAILURE (op);
11371 iargs [0] = sp [0];
11372 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11373 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11374 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11376 iargs [4] = sp [1];
11378 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11379 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11380 iargs, ip, cfg->real_offset, TRUE);
11381 CHECK_CFG_EXCEPTION;
11382 g_assert (costs > 0);
11384 cfg->real_offset += 5;
11386 inline_costs += costs;
11388 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11393 MonoInst *store, *wbarrier_ptr_ins = NULL;
11395 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11397 if (ins_flag & MONO_INST_VOLATILE) {
11398 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11399 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11402 if (mini_is_gsharedvt_klass (klass)) {
11403 MonoInst *offset_ins;
11405 context_used = mini_class_check_context_used (cfg, klass);
11407 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11408 /* The value is offset by 1 */
11409 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11410 dreg = alloc_ireg_mp (cfg);
11411 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11412 wbarrier_ptr_ins = ins;
11413 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11414 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11416 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11418 if (sp [0]->opcode != OP_LDADDR)
11419 store->flags |= MONO_INST_FAULT;
11421 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11422 if (mini_is_gsharedvt_klass (klass)) {
11423 g_assert (wbarrier_ptr_ins);
11424 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11426 /* insert call to write barrier */
11430 dreg = alloc_ireg_mp (cfg);
11431 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11432 emit_write_barrier (cfg, ptr, sp [1]);
11436 store->flags |= ins_flag;
11443 #ifndef DISABLE_REMOTING
11444 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11445 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11446 MonoInst *iargs [4];
11448 GSHAREDVT_FAILURE (op);
11450 iargs [0] = sp [0];
11451 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11452 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11453 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11454 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11455 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11456 iargs, ip, cfg->real_offset, TRUE);
11457 CHECK_CFG_EXCEPTION;
11458 g_assert (costs > 0);
11460 cfg->real_offset += 5;
11464 inline_costs += costs;
11466 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11472 if (sp [0]->type == STACK_VTYPE) {
11475 /* Have to compute the address of the variable */
11477 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11479 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11481 g_assert (var->klass == klass);
11483 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11487 if (op == CEE_LDFLDA) {
11488 if (sp [0]->type == STACK_OBJ) {
11489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11490 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11493 dreg = alloc_ireg_mp (cfg);
11495 if (mini_is_gsharedvt_klass (klass)) {
11496 MonoInst *offset_ins;
11498 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11499 /* The value is offset by 1 */
11500 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11501 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11503 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11505 ins->klass = mono_class_from_mono_type (field->type);
11506 ins->type = STACK_MP;
11511 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11513 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
11514 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
11523 if (mini_is_gsharedvt_klass (klass)) {
11524 MonoInst *offset_ins;
11526 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11527 /* The value is offset by 1 */
11528 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11529 dreg = alloc_ireg_mp (cfg);
11530 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11531 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11533 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11535 load->flags |= ins_flag;
11536 if (sp [0]->opcode != OP_LDADDR)
11537 load->flags |= MONO_INST_FAULT;
11549 context_used = mini_class_check_context_used (cfg, klass);
11551 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11552 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11556 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11557 * to be called here.
11559 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11560 mono_class_vtable (cfg->domain, klass);
11561 CHECK_TYPELOAD (klass);
11563 mono_domain_lock (cfg->domain);
11564 if (cfg->domain->special_static_fields)
11565 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11566 mono_domain_unlock (cfg->domain);
11568 is_special_static = mono_class_field_is_special_static (field);
11570 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11571 thread_ins = mono_get_thread_intrinsic (cfg);
11575 /* Generate IR to compute the field address */
11576 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11578 * Fast access to TLS data
11579 * Inline version of get_thread_static_data () in
11583 int idx, static_data_reg, array_reg, dreg;
11585 GSHAREDVT_FAILURE (op);
11587 MONO_ADD_INS (cfg->cbb, thread_ins);
11588 static_data_reg = alloc_ireg (cfg);
11589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11591 if (cfg->compile_aot) {
11592 int offset_reg, offset2_reg, idx_reg;
11594 /* For TLS variables, this will return the TLS offset */
11595 EMIT_NEW_SFLDACONST (cfg, ins, field);
11596 offset_reg = ins->dreg;
11597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11598 idx_reg = alloc_ireg (cfg);
11599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11601 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11602 array_reg = alloc_ireg (cfg);
11603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11604 offset2_reg = alloc_ireg (cfg);
11605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11607 dreg = alloc_ireg (cfg);
11608 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11610 offset = (gsize)addr & 0x7fffffff;
11611 idx = offset & 0x3f;
11613 array_reg = alloc_ireg (cfg);
11614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11615 dreg = alloc_ireg (cfg);
11616 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11618 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11619 (cfg->compile_aot && is_special_static) ||
11620 (context_used && is_special_static)) {
11621 MonoInst *iargs [2];
11623 g_assert (field->parent);
11624 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11625 if (context_used) {
11626 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11627 field, MONO_RGCTX_INFO_CLASS_FIELD);
11629 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11631 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11632 } else if (context_used) {
11633 MonoInst *static_data;
11636 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11637 method->klass->name_space, method->klass->name, method->name,
11638 depth, field->offset);
11641 if (mono_class_needs_cctor_run (klass, method))
11642 emit_class_init (cfg, klass);
11645 * The pointer we're computing here is
11647 * super_info.static_data + field->offset
11649 static_data = emit_get_rgctx_klass (cfg, context_used,
11650 klass, MONO_RGCTX_INFO_STATIC_DATA);
11652 if (mini_is_gsharedvt_klass (klass)) {
11653 MonoInst *offset_ins;
11655 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11656 /* The value is offset by 1 */
11657 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11658 dreg = alloc_ireg_mp (cfg);
11659 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11660 } else if (field->offset == 0) {
11663 int addr_reg = mono_alloc_preg (cfg);
11664 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11666 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11667 MonoInst *iargs [2];
11669 g_assert (field->parent);
11670 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11671 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11672 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11674 MonoVTable *vtable = NULL;
11676 if (!cfg->compile_aot)
11677 vtable = mono_class_vtable (cfg->domain, klass);
11678 CHECK_TYPELOAD (klass);
11681 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11682 if (!(g_slist_find (class_inits, klass))) {
11683 emit_class_init (cfg, klass);
11684 if (cfg->verbose_level > 2)
11685 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11686 class_inits = g_slist_prepend (class_inits, klass);
11689 if (cfg->run_cctors) {
11690 /* This makes so that inline cannot trigger */
11691 /* .cctors: too many apps depend on them */
11692 /* running with a specific order... */
11694 if (! vtable->initialized)
11695 INLINE_FAILURE ("class init");
11696 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11697 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11698 goto exception_exit;
11702 if (cfg->compile_aot)
11703 EMIT_NEW_SFLDACONST (cfg, ins, field);
11706 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11708 EMIT_NEW_PCONST (cfg, ins, addr);
11711 MonoInst *iargs [1];
11712 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11713 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11717 /* Generate IR to do the actual load/store operation */
11719 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11720 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11721 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11724 if (op == CEE_LDSFLDA) {
11725 ins->klass = mono_class_from_mono_type (ftype);
11726 ins->type = STACK_PTR;
11728 } else if (op == CEE_STSFLD) {
11731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11732 store->flags |= ins_flag;
11734 gboolean is_const = FALSE;
11735 MonoVTable *vtable = NULL;
11736 gpointer addr = NULL;
11738 if (!context_used) {
11739 vtable = mono_class_vtable (cfg->domain, klass);
11740 CHECK_TYPELOAD (klass);
11742 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11743 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11744 int ro_type = ftype->type;
11746 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11747 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11748 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11751 GSHAREDVT_FAILURE (op);
11753 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11756 case MONO_TYPE_BOOLEAN:
11758 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11762 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11765 case MONO_TYPE_CHAR:
11767 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11771 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11776 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11780 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11785 case MONO_TYPE_PTR:
11786 case MONO_TYPE_FNPTR:
11787 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11788 type_to_eval_stack_type ((cfg), field->type, *sp);
11791 case MONO_TYPE_STRING:
11792 case MONO_TYPE_OBJECT:
11793 case MONO_TYPE_CLASS:
11794 case MONO_TYPE_SZARRAY:
11795 case MONO_TYPE_ARRAY:
11796 if (!mono_gc_is_moving ()) {
11797 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11798 type_to_eval_stack_type ((cfg), field->type, *sp);
11806 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11811 case MONO_TYPE_VALUETYPE:
11821 CHECK_STACK_OVF (1);
11823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11824 load->flags |= ins_flag;
11830 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11831 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11832 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11843 token = read32 (ip + 1);
11844 klass = mini_get_class (method, token, generic_context);
11845 CHECK_TYPELOAD (klass);
11846 if (ins_flag & MONO_INST_VOLATILE) {
11847 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11848 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11850 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11851 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11852 ins->flags |= ins_flag;
11853 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11854 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11855 /* insert call to write barrier */
11856 emit_write_barrier (cfg, sp [0], sp [1]);
11868 const char *data_ptr;
11870 guint32 field_token;
11876 token = read32 (ip + 1);
11878 klass = mini_get_class (method, token, generic_context);
11879 CHECK_TYPELOAD (klass);
11881 context_used = mini_class_check_context_used (cfg, klass);
11883 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11884 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11885 ins->sreg1 = sp [0]->dreg;
11886 ins->type = STACK_I4;
11887 ins->dreg = alloc_ireg (cfg);
11888 MONO_ADD_INS (cfg->cbb, ins);
11889 *sp = mono_decompose_opcode (cfg, ins);
11892 if (context_used) {
11893 MonoInst *args [3];
11894 MonoClass *array_class = mono_array_class_get (klass, 1);
11895 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11897 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11900 args [0] = emit_get_rgctx_klass (cfg, context_used,
11901 array_class, MONO_RGCTX_INFO_VTABLE);
11906 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11908 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11910 if (cfg->opt & MONO_OPT_SHARED) {
11911 /* Decompose now to avoid problems with references to the domainvar */
11912 MonoInst *iargs [3];
11914 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11915 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11916 iargs [2] = sp [0];
11918 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11920 /* Decompose later since it is needed by abcrem */
11921 MonoClass *array_type = mono_array_class_get (klass, 1);
11922 mono_class_vtable (cfg->domain, array_type);
11923 CHECK_TYPELOAD (array_type);
11925 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11926 ins->dreg = alloc_ireg_ref (cfg);
11927 ins->sreg1 = sp [0]->dreg;
11928 ins->inst_newa_class = klass;
11929 ins->type = STACK_OBJ;
11930 ins->klass = array_type;
11931 MONO_ADD_INS (cfg->cbb, ins);
11932 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11933 cfg->cbb->has_array_access = TRUE;
11935 /* Needed so mono_emit_load_get_addr () gets called */
11936 mono_get_got_var (cfg);
11946 * we inline/optimize the initialization sequence if possible.
11947 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11948 * for small sizes open code the memcpy
11949 * ensure the rva field is big enough
11951 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11952 MonoMethod *memcpy_method = get_memcpy_method ();
11953 MonoInst *iargs [3];
11954 int add_reg = alloc_ireg_mp (cfg);
11956 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11957 if (cfg->compile_aot) {
11958 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11960 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11962 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11963 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11972 if (sp [0]->type != STACK_OBJ)
11975 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11976 ins->dreg = alloc_preg (cfg);
11977 ins->sreg1 = sp [0]->dreg;
11978 ins->type = STACK_I4;
11979 /* This flag will be inherited by the decomposition */
11980 ins->flags |= MONO_INST_FAULT;
11981 MONO_ADD_INS (cfg->cbb, ins);
11982 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11983 cfg->cbb->has_array_access = TRUE;
11991 if (sp [0]->type != STACK_OBJ)
11994 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11996 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11997 CHECK_TYPELOAD (klass);
11998 /* we need to make sure that this array is exactly the type it needs
11999 * to be for correctness. the wrappers are lax with their usage
12000 * so we need to ignore them here
12002 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
12003 MonoClass *array_class = mono_array_class_get (klass, 1);
12004 mini_emit_check_array_type (cfg, sp [0], array_class);
12005 CHECK_TYPELOAD (array_class);
12009 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12014 case CEE_LDELEM_I1:
12015 case CEE_LDELEM_U1:
12016 case CEE_LDELEM_I2:
12017 case CEE_LDELEM_U2:
12018 case CEE_LDELEM_I4:
12019 case CEE_LDELEM_U4:
12020 case CEE_LDELEM_I8:
12022 case CEE_LDELEM_R4:
12023 case CEE_LDELEM_R8:
12024 case CEE_LDELEM_REF: {
12030 if (*ip == CEE_LDELEM) {
12032 token = read32 (ip + 1);
12033 klass = mini_get_class (method, token, generic_context);
12034 CHECK_TYPELOAD (klass);
12035 mono_class_init (klass);
12038 klass = array_access_to_klass (*ip);
12040 if (sp [0]->type != STACK_OBJ)
12043 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12045 if (mini_is_gsharedvt_variable_klass (klass)) {
12046 // FIXME-VT: OP_ICONST optimization
12047 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12048 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12049 ins->opcode = OP_LOADV_MEMBASE;
12050 } else if (sp [1]->opcode == OP_ICONST) {
12051 int array_reg = sp [0]->dreg;
12052 int index_reg = sp [1]->dreg;
12053 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
12055 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12056 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12058 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12059 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12061 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12065 if (*ip == CEE_LDELEM)
12072 case CEE_STELEM_I1:
12073 case CEE_STELEM_I2:
12074 case CEE_STELEM_I4:
12075 case CEE_STELEM_I8:
12076 case CEE_STELEM_R4:
12077 case CEE_STELEM_R8:
12078 case CEE_STELEM_REF:
12083 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12085 if (*ip == CEE_STELEM) {
12087 token = read32 (ip + 1);
12088 klass = mini_get_class (method, token, generic_context);
12089 CHECK_TYPELOAD (klass);
12090 mono_class_init (klass);
12093 klass = array_access_to_klass (*ip);
12095 if (sp [0]->type != STACK_OBJ)
12098 emit_array_store (cfg, klass, sp, TRUE);
12100 if (*ip == CEE_STELEM)
12107 case CEE_CKFINITE: {
12111 if (cfg->llvm_only) {
12112 MonoInst *iargs [1];
12114 iargs [0] = sp [0];
12115 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12117 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12118 ins->sreg1 = sp [0]->dreg;
12119 ins->dreg = alloc_freg (cfg);
12120 ins->type = STACK_R8;
12121 MONO_ADD_INS (cfg->cbb, ins);
12123 *sp++ = mono_decompose_opcode (cfg, ins);
12129 case CEE_REFANYVAL: {
12130 MonoInst *src_var, *src;
12132 int klass_reg = alloc_preg (cfg);
12133 int dreg = alloc_preg (cfg);
12135 GSHAREDVT_FAILURE (*ip);
12138 MONO_INST_NEW (cfg, ins, *ip);
12141 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12142 CHECK_TYPELOAD (klass);
12144 context_used = mini_class_check_context_used (cfg, klass);
12147 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12149 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12150 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12153 if (context_used) {
12154 MonoInst *klass_ins;
12156 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12157 klass, MONO_RGCTX_INFO_KLASS);
12160 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12161 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12163 mini_emit_class_check (cfg, klass_reg, klass);
12165 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12166 ins->type = STACK_MP;
12167 ins->klass = klass;
12172 case CEE_MKREFANY: {
12173 MonoInst *loc, *addr;
12175 GSHAREDVT_FAILURE (*ip);
12178 MONO_INST_NEW (cfg, ins, *ip);
12181 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12182 CHECK_TYPELOAD (klass);
12184 context_used = mini_class_check_context_used (cfg, klass);
12186 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12187 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12189 if (context_used) {
12190 MonoInst *const_ins;
12191 int type_reg = alloc_preg (cfg);
12193 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12194 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12196 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12198 int const_reg = alloc_preg (cfg);
12199 int type_reg = alloc_preg (cfg);
12201 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12202 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12204 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12206 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12208 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12209 ins->type = STACK_VTYPE;
12210 ins->klass = mono_defaults.typed_reference_class;
12215 case CEE_LDTOKEN: {
12217 MonoClass *handle_class;
12219 CHECK_STACK_OVF (1);
12222 n = read32 (ip + 1);
12224 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12225 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12226 handle = mono_method_get_wrapper_data (method, n);
12227 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12228 if (handle_class == mono_defaults.typehandle_class)
12229 handle = &((MonoClass*)handle)->byval_arg;
12232 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12237 mono_class_init (handle_class);
12238 if (cfg->gshared) {
12239 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12240 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12241 /* This case handles ldtoken
12242 of an open type, like for
12245 } else if (handle_class == mono_defaults.typehandle_class) {
12246 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12247 } else if (handle_class == mono_defaults.fieldhandle_class)
12248 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12249 else if (handle_class == mono_defaults.methodhandle_class)
12250 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12252 g_assert_not_reached ();
12255 if ((cfg->opt & MONO_OPT_SHARED) &&
12256 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12257 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12258 MonoInst *addr, *vtvar, *iargs [3];
12259 int method_context_used;
12261 method_context_used = mini_method_check_context_used (cfg, method);
12263 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12265 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12266 EMIT_NEW_ICONST (cfg, iargs [1], n);
12267 if (method_context_used) {
12268 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12269 method, MONO_RGCTX_INFO_METHOD);
12270 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12272 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12273 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12275 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12279 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12281 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12282 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12283 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12284 (cmethod->klass == mono_defaults.systemtype_class) &&
12285 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12286 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12288 mono_class_init (tclass);
12289 if (context_used) {
12290 ins = emit_get_rgctx_klass (cfg, context_used,
12291 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12292 } else if (cfg->compile_aot) {
12293 if (method->wrapper_type) {
12294 mono_error_init (&error); //got to do it since there are multiple conditionals below
12295 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12296 /* Special case for static synchronized wrappers */
12297 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12299 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12300 /* FIXME: n is not a normal token */
12302 EMIT_NEW_PCONST (cfg, ins, NULL);
12305 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12308 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12310 EMIT_NEW_PCONST (cfg, ins, rt);
12312 ins->type = STACK_OBJ;
12313 ins->klass = cmethod->klass;
12316 MonoInst *addr, *vtvar;
12318 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12320 if (context_used) {
12321 if (handle_class == mono_defaults.typehandle_class) {
12322 ins = emit_get_rgctx_klass (cfg, context_used,
12323 mono_class_from_mono_type ((MonoType *)handle),
12324 MONO_RGCTX_INFO_TYPE);
12325 } else if (handle_class == mono_defaults.methodhandle_class) {
12326 ins = emit_get_rgctx_method (cfg, context_used,
12327 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12328 } else if (handle_class == mono_defaults.fieldhandle_class) {
12329 ins = emit_get_rgctx_field (cfg, context_used,
12330 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12332 g_assert_not_reached ();
12334 } else if (cfg->compile_aot) {
12335 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12337 EMIT_NEW_PCONST (cfg, ins, handle);
12339 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12341 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12351 if (sp [-1]->type != STACK_OBJ)
12354 MONO_INST_NEW (cfg, ins, OP_THROW);
12356 ins->sreg1 = sp [0]->dreg;
12358 cfg->cbb->out_of_line = TRUE;
12359 MONO_ADD_INS (cfg->cbb, ins);
12360 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12361 MONO_ADD_INS (cfg->cbb, ins);
12364 link_bblock (cfg, cfg->cbb, end_bblock);
12365 start_new_bblock = 1;
12366 /* This can complicate code generation for llvm since the return value might not be defined */
12367 if (COMPILE_LLVM (cfg))
12368 INLINE_FAILURE ("throw");
12370 case CEE_ENDFINALLY:
12371 if (!ip_in_finally_clause (cfg, ip - header->code))
12373 /* mono_save_seq_point_info () depends on this */
12374 if (sp != stack_start)
12375 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12376 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12377 MONO_ADD_INS (cfg->cbb, ins);
12379 start_new_bblock = 1;
12382 * Control will leave the method so empty the stack, otherwise
12383 * the next basic block will start with a nonempty stack.
12385 while (sp != stack_start) {
12390 case CEE_LEAVE_S: {
12393 if (*ip == CEE_LEAVE) {
12395 target = ip + 5 + (gint32)read32(ip + 1);
12398 target = ip + 2 + (signed char)(ip [1]);
12401 /* empty the stack */
12402 while (sp != stack_start) {
12407 * If this leave statement is in a catch block, check for a
12408 * pending exception, and rethrow it if necessary.
12409 * We avoid doing this in runtime invoke wrappers, since those are called
12410 * by native code which excepts the wrapper to catch all exceptions.
12412 for (i = 0; i < header->num_clauses; ++i) {
12413 MonoExceptionClause *clause = &header->clauses [i];
12416 * Use <= in the final comparison to handle clauses with multiple
12417 * leave statements, like in bug #78024.
12418 * The ordering of the exception clauses guarantees that we find the
12419 * innermost clause.
12421 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12423 MonoBasicBlock *dont_throw;
12428 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12431 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12433 NEW_BBLOCK (cfg, dont_throw);
12436 * Currently, we always rethrow the abort exception, despite the
12437 * fact that this is not correct. See thread6.cs for an example.
12438 * But propagating the abort exception is more important than
12439 * getting the sematics right.
12441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12443 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12445 MONO_START_BB (cfg, dont_throw);
12450 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12453 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12455 MonoExceptionClause *clause;
12457 for (tmp = handlers; tmp; tmp = tmp->next) {
12458 clause = (MonoExceptionClause *)tmp->data;
12459 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12461 link_bblock (cfg, cfg->cbb, tblock);
12462 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12463 ins->inst_target_bb = tblock;
12464 ins->inst_eh_block = clause;
12465 MONO_ADD_INS (cfg->cbb, ins);
12466 cfg->cbb->has_call_handler = 1;
12467 if (COMPILE_LLVM (cfg)) {
12468 MonoBasicBlock *target_bb;
12471 * Link the finally bblock with the target, since it will
12472 * conceptually branch there.
12474 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12475 GET_BBLOCK (cfg, target_bb, target);
12476 link_bblock (cfg, tblock, target_bb);
12479 g_list_free (handlers);
12482 MONO_INST_NEW (cfg, ins, OP_BR);
12483 MONO_ADD_INS (cfg->cbb, ins);
12484 GET_BBLOCK (cfg, tblock, target);
12485 link_bblock (cfg, cfg->cbb, tblock);
12486 ins->inst_target_bb = tblock;
12488 start_new_bblock = 1;
12490 if (*ip == CEE_LEAVE)
12499 * Mono specific opcodes
12501 case MONO_CUSTOM_PREFIX: {
12503 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12507 case CEE_MONO_ICALL: {
12509 MonoJitICallInfo *info;
12511 token = read32 (ip + 2);
12512 func = mono_method_get_wrapper_data (method, token);
12513 info = mono_find_jit_icall_by_addr (func);
12515 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12518 CHECK_STACK (info->sig->param_count);
12519 sp -= info->sig->param_count;
12521 ins = mono_emit_jit_icall (cfg, info->func, sp);
12522 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12526 inline_costs += 10 * num_calls++;
12530 case CEE_MONO_LDPTR_CARD_TABLE:
12531 case CEE_MONO_LDPTR_NURSERY_START:
12532 case CEE_MONO_LDPTR_NURSERY_BITS:
12533 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12534 CHECK_STACK_OVF (1);
12537 case CEE_MONO_LDPTR_CARD_TABLE:
12538 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12540 case CEE_MONO_LDPTR_NURSERY_START:
12541 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12543 case CEE_MONO_LDPTR_NURSERY_BITS:
12544 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12546 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12547 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12553 inline_costs += 10 * num_calls++;
12556 case CEE_MONO_LDPTR: {
12559 CHECK_STACK_OVF (1);
12561 token = read32 (ip + 2);
12563 ptr = mono_method_get_wrapper_data (method, token);
12564 EMIT_NEW_PCONST (cfg, ins, ptr);
12567 inline_costs += 10 * num_calls++;
12568 /* Can't embed random pointers into AOT code */
12572 case CEE_MONO_JIT_ICALL_ADDR: {
12573 MonoJitICallInfo *callinfo;
12576 CHECK_STACK_OVF (1);
12578 token = read32 (ip + 2);
12580 ptr = mono_method_get_wrapper_data (method, token);
12581 callinfo = mono_find_jit_icall_by_addr (ptr);
12582 g_assert (callinfo);
12583 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12586 inline_costs += 10 * num_calls++;
12589 case CEE_MONO_ICALL_ADDR: {
12590 MonoMethod *cmethod;
12593 CHECK_STACK_OVF (1);
12595 token = read32 (ip + 2);
12597 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12599 if (cfg->compile_aot) {
12600 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
12602 * This is generated by emit_native_wrapper () to resolve the pinvoke address
12603 * before the call, its not needed when using direct pinvoke.
12604 * This is not an optimization, but its used to avoid looking up pinvokes
12605 * on platforms which don't support dlopen ().
12607 EMIT_NEW_PCONST (cfg, ins, NULL);
12609 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12612 ptr = mono_lookup_internal_call (cmethod);
12614 EMIT_NEW_PCONST (cfg, ins, ptr);
12620 case CEE_MONO_VTADDR: {
12621 MonoInst *src_var, *src;
12627 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12628 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12633 case CEE_MONO_NEWOBJ: {
12634 MonoInst *iargs [2];
12636 CHECK_STACK_OVF (1);
12638 token = read32 (ip + 2);
12639 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12640 mono_class_init (klass);
12641 NEW_DOMAINCONST (cfg, iargs [0]);
12642 MONO_ADD_INS (cfg->cbb, iargs [0]);
12643 NEW_CLASSCONST (cfg, iargs [1], klass);
12644 MONO_ADD_INS (cfg->cbb, iargs [1]);
12645 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12647 inline_costs += 10 * num_calls++;
12650 case CEE_MONO_OBJADDR:
12653 MONO_INST_NEW (cfg, ins, OP_MOVE);
12654 ins->dreg = alloc_ireg_mp (cfg);
12655 ins->sreg1 = sp [0]->dreg;
12656 ins->type = STACK_MP;
12657 MONO_ADD_INS (cfg->cbb, ins);
12661 case CEE_MONO_LDNATIVEOBJ:
12663 * Similar to LDOBJ, but instead load the unmanaged
12664 * representation of the vtype to the stack.
12669 token = read32 (ip + 2);
12670 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12671 g_assert (klass->valuetype);
12672 mono_class_init (klass);
12675 MonoInst *src, *dest, *temp;
12678 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12679 temp->backend.is_pinvoke = 1;
12680 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12681 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12683 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12684 dest->type = STACK_VTYPE;
12685 dest->klass = klass;
12691 case CEE_MONO_RETOBJ: {
12693 * Same as RET, but return the native representation of a vtype
12696 g_assert (cfg->ret);
12697 g_assert (mono_method_signature (method)->pinvoke);
12702 token = read32 (ip + 2);
12703 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12705 if (!cfg->vret_addr) {
12706 g_assert (cfg->ret_var_is_local);
12708 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12710 EMIT_NEW_RETLOADA (cfg, ins);
12712 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12714 if (sp != stack_start)
12717 MONO_INST_NEW (cfg, ins, OP_BR);
12718 ins->inst_target_bb = end_bblock;
12719 MONO_ADD_INS (cfg->cbb, ins);
12720 link_bblock (cfg, cfg->cbb, end_bblock);
12721 start_new_bblock = 1;
12725 case CEE_MONO_CISINST:
12726 case CEE_MONO_CCASTCLASS: {
12731 token = read32 (ip + 2);
12732 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12733 if (ip [1] == CEE_MONO_CISINST)
12734 ins = handle_cisinst (cfg, klass, sp [0]);
12736 ins = handle_ccastclass (cfg, klass, sp [0]);
12741 case CEE_MONO_SAVE_LMF:
12742 case CEE_MONO_RESTORE_LMF:
12745 case CEE_MONO_CLASSCONST:
12746 CHECK_STACK_OVF (1);
12748 token = read32 (ip + 2);
12749 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12752 inline_costs += 10 * num_calls++;
12754 case CEE_MONO_NOT_TAKEN:
12755 cfg->cbb->out_of_line = TRUE;
12758 case CEE_MONO_TLS: {
12761 CHECK_STACK_OVF (1);
12763 key = (MonoTlsKey)read32 (ip + 2);
12764 g_assert (key < TLS_KEY_NUM);
12766 ins = mono_create_tls_get (cfg, key);
12768 if (cfg->compile_aot) {
12770 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12771 ins->dreg = alloc_preg (cfg);
12772 ins->type = STACK_PTR;
12774 g_assert_not_reached ();
12777 ins->type = STACK_PTR;
12778 MONO_ADD_INS (cfg->cbb, ins);
12783 case CEE_MONO_DYN_CALL: {
12784 MonoCallInst *call;
12786 /* It would be easier to call a trampoline, but that would put an
12787 * extra frame on the stack, confusing exception handling. So
12788 * implement it inline using an opcode for now.
12791 if (!cfg->dyn_call_var) {
12792 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12793 /* prevent it from being register allocated */
12794 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12797 /* Has to use a call inst since it local regalloc expects it */
12798 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12799 ins = (MonoInst*)call;
12801 ins->sreg1 = sp [0]->dreg;
12802 ins->sreg2 = sp [1]->dreg;
12803 MONO_ADD_INS (cfg->cbb, ins);
12805 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12808 inline_costs += 10 * num_calls++;
12812 case CEE_MONO_MEMORY_BARRIER: {
12814 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12818 case CEE_MONO_ATOMIC_STORE_I4: {
12819 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12825 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12826 ins->dreg = sp [0]->dreg;
12827 ins->sreg1 = sp [1]->dreg;
12828 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12829 MONO_ADD_INS (cfg->cbb, ins);
12834 case CEE_MONO_JIT_ATTACH: {
12835 MonoInst *args [16], *domain_ins;
12836 MonoInst *ad_ins, *jit_tls_ins;
12837 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12839 g_assert (!mono_threads_is_coop_enabled ());
12841 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12843 EMIT_NEW_PCONST (cfg, ins, NULL);
12844 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12846 ad_ins = mono_get_domain_intrinsic (cfg);
12847 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12849 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12850 NEW_BBLOCK (cfg, next_bb);
12851 NEW_BBLOCK (cfg, call_bb);
12853 if (cfg->compile_aot) {
12854 /* AOT code is only used in the root domain */
12855 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12857 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12859 MONO_ADD_INS (cfg->cbb, ad_ins);
12860 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12863 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12868 MONO_START_BB (cfg, call_bb);
12871 /* AOT code is only used in the root domain */
12872 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12873 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12874 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12877 MONO_START_BB (cfg, next_bb);
12883 case CEE_MONO_JIT_DETACH: {
12884 MonoInst *args [16];
12886 /* Restore the original domain */
12887 dreg = alloc_ireg (cfg);
12888 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12889 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12893 case CEE_MONO_CALLI_EXTRA_ARG: {
12895 MonoMethodSignature *fsig;
12899 * This is the same as CEE_CALLI, but passes an additional argument
12900 * to the called method in llvmonly mode.
12901 * This is only used by delegate invoke wrappers to call the
12902 * actual delegate method.
12904 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12907 token = read32 (ip + 2);
12915 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12918 if (cfg->llvm_only)
12919 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12921 n = fsig->param_count + fsig->hasthis + 1;
12928 if (cfg->llvm_only) {
12930 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12931 * cconv. This is set by mono_init_delegate ().
12933 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12934 MonoInst *callee = addr;
12935 MonoInst *call, *localloc_ins;
12936 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12937 int low_bit_reg = alloc_preg (cfg);
12939 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12940 NEW_BBLOCK (cfg, end_bb);
12942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12946 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12947 addr = emit_get_rgctx_sig (cfg, context_used,
12948 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12950 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12952 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12953 ins->dreg = alloc_preg (cfg);
12954 ins->inst_imm = 2 * SIZEOF_VOID_P;
12955 MONO_ADD_INS (cfg->cbb, ins);
12956 localloc_ins = ins;
12957 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12958 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12961 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12964 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12965 MONO_START_BB (cfg, is_gsharedvt_bb);
12966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12967 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12968 ins->dreg = call->dreg;
12970 MONO_START_BB (cfg, end_bb);
12972 /* Caller uses a normal calling conv */
12974 MonoInst *callee = addr;
12975 MonoInst *call, *localloc_ins;
12976 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12977 int low_bit_reg = alloc_preg (cfg);
12979 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12980 NEW_BBLOCK (cfg, end_bb);
12982 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12986 /* Normal case: callee uses a normal cconv, no conversion is needed */
12987 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12989 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12990 MONO_START_BB (cfg, is_gsharedvt_bb);
12991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12992 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12993 MONO_ADD_INS (cfg->cbb, addr);
12995 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12997 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12998 ins->dreg = alloc_preg (cfg);
12999 ins->inst_imm = 2 * SIZEOF_VOID_P;
13000 MONO_ADD_INS (cfg->cbb, ins);
13001 localloc_ins = ins;
13002 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13003 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
13004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
13006 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
13007 ins->dreg = call->dreg;
13008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
13010 MONO_START_BB (cfg, end_bb);
13013 /* Same as CEE_CALLI */
13014 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
13016 * We pass the address to the gsharedvt trampoline in the rgctx reg
13018 MonoInst *callee = addr;
13020 addr = emit_get_rgctx_sig (cfg, context_used,
13021 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
13022 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
13024 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
13028 if (!MONO_TYPE_IS_VOID (fsig->ret))
13029 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
13031 CHECK_CFG_EXCEPTION;
13035 constrained_class = NULL;
13038 case CEE_MONO_LDDOMAIN:
13039 CHECK_STACK_OVF (1);
13040 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
13044 case CEE_MONO_GET_LAST_ERROR:
13046 CHECK_STACK_OVF (1);
13048 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
13049 ins->dreg = alloc_dreg (cfg, STACK_I4);
13050 ins->type = STACK_I4;
13051 MONO_ADD_INS (cfg->cbb, ins);
13057 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
13063 case CEE_PREFIX1: {
13066 case CEE_ARGLIST: {
13067 /* somewhat similar to LDTOKEN */
13068 MonoInst *addr, *vtvar;
13069 CHECK_STACK_OVF (1);
13070 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
13072 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
13073 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
13075 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
13076 ins->type = STACK_VTYPE;
13077 ins->klass = mono_defaults.argumenthandle_class;
13087 MonoInst *cmp, *arg1, *arg2;
13095 * The following transforms:
13096 * CEE_CEQ into OP_CEQ
13097 * CEE_CGT into OP_CGT
13098 * CEE_CGT_UN into OP_CGT_UN
13099 * CEE_CLT into OP_CLT
13100 * CEE_CLT_UN into OP_CLT_UN
13102 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13104 MONO_INST_NEW (cfg, ins, cmp->opcode);
13105 cmp->sreg1 = arg1->dreg;
13106 cmp->sreg2 = arg2->dreg;
13107 type_from_op (cfg, cmp, arg1, arg2);
13109 add_widen_op (cfg, cmp, &arg1, &arg2);
13110 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13111 cmp->opcode = OP_LCOMPARE;
13112 else if (arg1->type == STACK_R4)
13113 cmp->opcode = OP_RCOMPARE;
13114 else if (arg1->type == STACK_R8)
13115 cmp->opcode = OP_FCOMPARE;
13117 cmp->opcode = OP_ICOMPARE;
13118 MONO_ADD_INS (cfg->cbb, cmp);
13119 ins->type = STACK_I4;
13120 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13121 type_from_op (cfg, ins, arg1, arg2);
13123 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13125 * The backends expect the fceq opcodes to do the
13128 ins->sreg1 = cmp->sreg1;
13129 ins->sreg2 = cmp->sreg2;
13132 MONO_ADD_INS (cfg->cbb, ins);
13138 MonoInst *argconst;
13139 MonoMethod *cil_method;
13141 CHECK_STACK_OVF (1);
13143 n = read32 (ip + 2);
13144 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13147 mono_class_init (cmethod->klass);
13149 mono_save_token_info (cfg, image, n, cmethod);
13151 context_used = mini_method_check_context_used (cfg, cmethod);
13153 cil_method = cmethod;
13154 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13155 emit_method_access_failure (cfg, method, cil_method);
13157 if (mono_security_core_clr_enabled ())
13158 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13161 * Optimize the common case of ldftn+delegate creation
13163 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13164 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13165 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13166 MonoInst *target_ins, *handle_ins;
13167 MonoMethod *invoke;
13168 int invoke_context_used;
13170 invoke = mono_get_delegate_invoke (ctor_method->klass);
13171 if (!invoke || !mono_method_signature (invoke))
13174 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13176 target_ins = sp [-1];
13178 if (mono_security_core_clr_enabled ())
13179 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13181 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13182 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13183 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13185 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13189 /* FIXME: SGEN support */
13190 if (invoke_context_used == 0 || cfg->llvm_only) {
13192 if (cfg->verbose_level > 3)
13193 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13194 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13197 CHECK_CFG_EXCEPTION;
13207 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13208 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13212 inline_costs += 10 * num_calls++;
13215 case CEE_LDVIRTFTN: {
13216 MonoInst *args [2];
13220 n = read32 (ip + 2);
13221 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13224 mono_class_init (cmethod->klass);
13226 context_used = mini_method_check_context_used (cfg, cmethod);
13228 if (mono_security_core_clr_enabled ())
13229 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13232 * Optimize the common case of ldvirtftn+delegate creation
13234 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13235 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13236 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13237 MonoInst *target_ins, *handle_ins;
13238 MonoMethod *invoke;
13239 int invoke_context_used;
13240 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13242 invoke = mono_get_delegate_invoke (ctor_method->klass);
13243 if (!invoke || !mono_method_signature (invoke))
13246 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13248 target_ins = sp [-1];
13250 if (mono_security_core_clr_enabled ())
13251 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13253 /* FIXME: SGEN support */
13254 if (invoke_context_used == 0 || cfg->llvm_only) {
13256 if (cfg->verbose_level > 3)
13257 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13258 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13261 CHECK_CFG_EXCEPTION;
13274 args [1] = emit_get_rgctx_method (cfg, context_used,
13275 cmethod, MONO_RGCTX_INFO_METHOD);
13278 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13280 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13283 inline_costs += 10 * num_calls++;
13287 CHECK_STACK_OVF (1);
13289 n = read16 (ip + 2);
13291 EMIT_NEW_ARGLOAD (cfg, ins, n);
13296 CHECK_STACK_OVF (1);
13298 n = read16 (ip + 2);
13300 NEW_ARGLOADA (cfg, ins, n);
13301 MONO_ADD_INS (cfg->cbb, ins);
13309 n = read16 (ip + 2);
13311 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13313 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13317 CHECK_STACK_OVF (1);
13319 n = read16 (ip + 2);
13321 EMIT_NEW_LOCLOAD (cfg, ins, n);
13326 unsigned char *tmp_ip;
13327 CHECK_STACK_OVF (1);
13329 n = read16 (ip + 2);
13332 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13338 EMIT_NEW_LOCLOADA (cfg, ins, n);
13347 n = read16 (ip + 2);
13349 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13351 emit_stloc_ir (cfg, sp, header, n);
13355 case CEE_LOCALLOC: {
13357 MonoBasicBlock *non_zero_bb, *end_bb;
13358 int alloc_ptr = alloc_preg (cfg);
13360 if (sp != stack_start)
13362 if (cfg->method != method)
13364 * Inlining this into a loop in a parent could lead to
13365 * stack overflows which is different behavior than the
13366 * non-inlined case, thus disable inlining in this case.
13368 INLINE_FAILURE("localloc");
13370 NEW_BBLOCK (cfg, non_zero_bb);
13371 NEW_BBLOCK (cfg, end_bb);
13373 /* if size != zero */
13374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
13375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
13377 //size is zero, so result is NULL
13378 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
13379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
13381 MONO_START_BB (cfg, non_zero_bb);
13382 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13383 ins->dreg = alloc_ptr;
13384 ins->sreg1 = sp [0]->dreg;
13385 ins->type = STACK_PTR;
13386 MONO_ADD_INS (cfg->cbb, ins);
13388 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13390 ins->flags |= MONO_INST_INIT;
13392 MONO_START_BB (cfg, end_bb);
13393 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
13394 ins->type = STACK_PTR;
13400 case CEE_ENDFILTER: {
13401 MonoExceptionClause *clause, *nearest;
13406 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13408 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13409 ins->sreg1 = (*sp)->dreg;
13410 MONO_ADD_INS (cfg->cbb, ins);
13411 start_new_bblock = 1;
13415 for (cc = 0; cc < header->num_clauses; ++cc) {
13416 clause = &header->clauses [cc];
13417 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13418 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13419 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13422 g_assert (nearest);
13423 if ((ip - header->code) != nearest->handler_offset)
13428 case CEE_UNALIGNED_:
13429 ins_flag |= MONO_INST_UNALIGNED;
13430 /* FIXME: record alignment? we can assume 1 for now */
13434 case CEE_VOLATILE_:
13435 ins_flag |= MONO_INST_VOLATILE;
13439 ins_flag |= MONO_INST_TAILCALL;
13440 cfg->flags |= MONO_CFG_HAS_TAIL;
13441 /* Can't inline tail calls at this time */
13442 inline_costs += 100000;
13449 token = read32 (ip + 2);
13450 klass = mini_get_class (method, token, generic_context);
13451 CHECK_TYPELOAD (klass);
13452 if (generic_class_is_reference_type (cfg, klass))
13453 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13455 mini_emit_initobj (cfg, *sp, NULL, klass);
13459 case CEE_CONSTRAINED_:
13461 token = read32 (ip + 2);
13462 constrained_class = mini_get_class (method, token, generic_context);
13463 CHECK_TYPELOAD (constrained_class);
13467 case CEE_INITBLK: {
13468 MonoInst *iargs [3];
13472 /* Skip optimized paths for volatile operations. */
13473 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13474 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13475 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13476 /* emit_memset only works when val == 0 */
13477 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13480 iargs [0] = sp [0];
13481 iargs [1] = sp [1];
13482 iargs [2] = sp [2];
13483 if (ip [1] == CEE_CPBLK) {
13485 * FIXME: It's unclear whether we should be emitting both the acquire
13486 * and release barriers for cpblk. It is technically both a load and
13487 * store operation, so it seems like that's the sensible thing to do.
13489 * FIXME: We emit full barriers on both sides of the operation for
13490 * simplicity. We should have a separate atomic memcpy method instead.
13492 MonoMethod *memcpy_method = get_memcpy_method ();
13494 if (ins_flag & MONO_INST_VOLATILE)
13495 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13497 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13498 call->flags |= ins_flag;
13500 if (ins_flag & MONO_INST_VOLATILE)
13501 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13503 MonoMethod *memset_method = get_memset_method ();
13504 if (ins_flag & MONO_INST_VOLATILE) {
13505 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13506 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13508 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13509 call->flags |= ins_flag;
13520 ins_flag |= MONO_INST_NOTYPECHECK;
13522 ins_flag |= MONO_INST_NORANGECHECK;
13523 /* we ignore the no-nullcheck for now since we
13524 * really do it explicitly only when doing callvirt->call
13528 case CEE_RETHROW: {
13530 int handler_offset = -1;
13532 for (i = 0; i < header->num_clauses; ++i) {
13533 MonoExceptionClause *clause = &header->clauses [i];
13534 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13535 handler_offset = clause->handler_offset;
13540 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13542 if (handler_offset == -1)
13545 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13546 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13547 ins->sreg1 = load->dreg;
13548 MONO_ADD_INS (cfg->cbb, ins);
13550 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13551 MONO_ADD_INS (cfg->cbb, ins);
13554 link_bblock (cfg, cfg->cbb, end_bblock);
13555 start_new_bblock = 1;
13563 CHECK_STACK_OVF (1);
13565 token = read32 (ip + 2);
13566 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13567 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13570 val = mono_type_size (type, &ialign);
13572 MonoClass *klass = mini_get_class (method, token, generic_context);
13573 CHECK_TYPELOAD (klass);
13575 val = mono_type_size (&klass->byval_arg, &ialign);
13577 if (mini_is_gsharedvt_klass (klass))
13578 GSHAREDVT_FAILURE (*ip);
13580 EMIT_NEW_ICONST (cfg, ins, val);
13585 case CEE_REFANYTYPE: {
13586 MonoInst *src_var, *src;
13588 GSHAREDVT_FAILURE (*ip);
13594 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13596 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13597 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13598 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13603 case CEE_READONLY_:
13616 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13626 g_warning ("opcode 0x%02x not handled", *ip);
13630 if (start_new_bblock != 1)
13633 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13634 if (cfg->cbb->next_bb) {
13635 /* This could already be set because of inlining, #693905 */
13636 MonoBasicBlock *bb = cfg->cbb;
13638 while (bb->next_bb)
13640 bb->next_bb = end_bblock;
13642 cfg->cbb->next_bb = end_bblock;
13645 if (cfg->method == method && cfg->domainvar) {
13647 MonoInst *get_domain;
13649 cfg->cbb = init_localsbb;
13651 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13652 MONO_ADD_INS (cfg->cbb, get_domain);
13654 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13656 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13657 MONO_ADD_INS (cfg->cbb, store);
13660 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13661 if (cfg->compile_aot)
13662 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13663 mono_get_got_var (cfg);
13666 if (cfg->method == method && cfg->got_var)
13667 mono_emit_load_got_addr (cfg);
13669 if (init_localsbb) {
13670 cfg->cbb = init_localsbb;
13672 for (i = 0; i < header->num_locals; ++i) {
13673 emit_init_local (cfg, i, header->locals [i], init_locals);
13677 if (cfg->init_ref_vars && cfg->method == method) {
13678 /* Emit initialization for ref vars */
13679 // FIXME: Avoid duplication initialization for IL locals.
13680 for (i = 0; i < cfg->num_varinfo; ++i) {
13681 MonoInst *ins = cfg->varinfo [i];
13683 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13684 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13688 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13689 cfg->cbb = init_localsbb;
13690 emit_push_lmf (cfg);
13693 cfg->cbb = init_localsbb;
13694 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13697 MonoBasicBlock *bb;
13700 * Make seq points at backward branch targets interruptable.
13702 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13703 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13704 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13707 /* Add a sequence point for method entry/exit events */
13708 if (seq_points && cfg->gen_sdb_seq_points) {
13709 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13710 MONO_ADD_INS (init_localsbb, ins);
13711 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13712 MONO_ADD_INS (cfg->bb_exit, ins);
13716 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13717 * the code they refer to was dead (#11880).
13719 if (sym_seq_points) {
13720 for (i = 0; i < header->code_size; ++i) {
13721 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13724 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13725 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13732 if (cfg->method == method) {
13733 MonoBasicBlock *bb;
13734 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13735 if (bb == cfg->bb_init)
13738 bb->region = mono_find_block_region (cfg, bb->real_offset);
13740 mono_create_spvar_for_region (cfg, bb->region);
13741 if (cfg->verbose_level > 2)
13742 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13745 MonoBasicBlock *bb;
13746 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13747 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13748 bb->real_offset = inline_offset;
13752 if (inline_costs < 0) {
13755 /* Method is too large */
13756 mname = mono_method_full_name (method, TRUE);
13757 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13761 if ((cfg->verbose_level > 2) && (cfg->method == method))
13762 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13767 g_assert (!mono_error_ok (&cfg->error));
13771 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13775 set_exception_type_from_invalid_il (cfg, method, ip);
13779 g_slist_free (class_inits);
13780 mono_basic_block_free (original_bb);
13781 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13782 if (cfg->exception_type)
13785 return inline_costs;
13789 store_membase_reg_to_store_membase_imm (int opcode)
13792 case OP_STORE_MEMBASE_REG:
13793 return OP_STORE_MEMBASE_IMM;
13794 case OP_STOREI1_MEMBASE_REG:
13795 return OP_STOREI1_MEMBASE_IMM;
13796 case OP_STOREI2_MEMBASE_REG:
13797 return OP_STOREI2_MEMBASE_IMM;
13798 case OP_STOREI4_MEMBASE_REG:
13799 return OP_STOREI4_MEMBASE_IMM;
13800 case OP_STOREI8_MEMBASE_REG:
13801 return OP_STOREI8_MEMBASE_IMM;
13803 g_assert_not_reached ();
13810 mono_op_to_op_imm (int opcode)
13814 return OP_IADD_IMM;
13816 return OP_ISUB_IMM;
13818 return OP_IDIV_IMM;
13820 return OP_IDIV_UN_IMM;
13822 return OP_IREM_IMM;
13824 return OP_IREM_UN_IMM;
13826 return OP_IMUL_IMM;
13828 return OP_IAND_IMM;
13832 return OP_IXOR_IMM;
13834 return OP_ISHL_IMM;
13836 return OP_ISHR_IMM;
13838 return OP_ISHR_UN_IMM;
13841 return OP_LADD_IMM;
13843 return OP_LSUB_IMM;
13845 return OP_LAND_IMM;
13849 return OP_LXOR_IMM;
13851 return OP_LSHL_IMM;
13853 return OP_LSHR_IMM;
13855 return OP_LSHR_UN_IMM;
13856 #if SIZEOF_REGISTER == 8
13858 return OP_LREM_IMM;
13862 return OP_COMPARE_IMM;
13864 return OP_ICOMPARE_IMM;
13866 return OP_LCOMPARE_IMM;
13868 case OP_STORE_MEMBASE_REG:
13869 return OP_STORE_MEMBASE_IMM;
13870 case OP_STOREI1_MEMBASE_REG:
13871 return OP_STOREI1_MEMBASE_IMM;
13872 case OP_STOREI2_MEMBASE_REG:
13873 return OP_STOREI2_MEMBASE_IMM;
13874 case OP_STOREI4_MEMBASE_REG:
13875 return OP_STOREI4_MEMBASE_IMM;
13877 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13879 return OP_X86_PUSH_IMM;
13880 case OP_X86_COMPARE_MEMBASE_REG:
13881 return OP_X86_COMPARE_MEMBASE_IMM;
13883 #if defined(TARGET_AMD64)
13884 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13885 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13887 case OP_VOIDCALL_REG:
13888 return OP_VOIDCALL;
13896 return OP_LOCALLOC_IMM;
13903 ldind_to_load_membase (int opcode)
13907 return OP_LOADI1_MEMBASE;
13909 return OP_LOADU1_MEMBASE;
13911 return OP_LOADI2_MEMBASE;
13913 return OP_LOADU2_MEMBASE;
13915 return OP_LOADI4_MEMBASE;
13917 return OP_LOADU4_MEMBASE;
13919 return OP_LOAD_MEMBASE;
13920 case CEE_LDIND_REF:
13921 return OP_LOAD_MEMBASE;
13923 return OP_LOADI8_MEMBASE;
13925 return OP_LOADR4_MEMBASE;
13927 return OP_LOADR8_MEMBASE;
13929 g_assert_not_reached ();
13936 stind_to_store_membase (int opcode)
13940 return OP_STOREI1_MEMBASE_REG;
13942 return OP_STOREI2_MEMBASE_REG;
13944 return OP_STOREI4_MEMBASE_REG;
13946 case CEE_STIND_REF:
13947 return OP_STORE_MEMBASE_REG;
13949 return OP_STOREI8_MEMBASE_REG;
13951 return OP_STORER4_MEMBASE_REG;
13953 return OP_STORER8_MEMBASE_REG;
13955 g_assert_not_reached ();
13962 mono_load_membase_to_load_mem (int opcode)
13964 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13965 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13967 case OP_LOAD_MEMBASE:
13968 return OP_LOAD_MEM;
13969 case OP_LOADU1_MEMBASE:
13970 return OP_LOADU1_MEM;
13971 case OP_LOADU2_MEMBASE:
13972 return OP_LOADU2_MEM;
13973 case OP_LOADI4_MEMBASE:
13974 return OP_LOADI4_MEM;
13975 case OP_LOADU4_MEMBASE:
13976 return OP_LOADU4_MEM;
13977 #if SIZEOF_REGISTER == 8
13978 case OP_LOADI8_MEMBASE:
13979 return OP_LOADI8_MEM;
13988 op_to_op_dest_membase (int store_opcode, int opcode)
13990 #if defined(TARGET_X86)
13991 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13996 return OP_X86_ADD_MEMBASE_REG;
13998 return OP_X86_SUB_MEMBASE_REG;
14000 return OP_X86_AND_MEMBASE_REG;
14002 return OP_X86_OR_MEMBASE_REG;
14004 return OP_X86_XOR_MEMBASE_REG;
14007 return OP_X86_ADD_MEMBASE_IMM;
14010 return OP_X86_SUB_MEMBASE_IMM;
14013 return OP_X86_AND_MEMBASE_IMM;
14016 return OP_X86_OR_MEMBASE_IMM;
14019 return OP_X86_XOR_MEMBASE_IMM;
14025 #if defined(TARGET_AMD64)
14026 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
14031 return OP_X86_ADD_MEMBASE_REG;
14033 return OP_X86_SUB_MEMBASE_REG;
14035 return OP_X86_AND_MEMBASE_REG;
14037 return OP_X86_OR_MEMBASE_REG;
14039 return OP_X86_XOR_MEMBASE_REG;
14041 return OP_X86_ADD_MEMBASE_IMM;
14043 return OP_X86_SUB_MEMBASE_IMM;
14045 return OP_X86_AND_MEMBASE_IMM;
14047 return OP_X86_OR_MEMBASE_IMM;
14049 return OP_X86_XOR_MEMBASE_IMM;
14051 return OP_AMD64_ADD_MEMBASE_REG;
14053 return OP_AMD64_SUB_MEMBASE_REG;
14055 return OP_AMD64_AND_MEMBASE_REG;
14057 return OP_AMD64_OR_MEMBASE_REG;
14059 return OP_AMD64_XOR_MEMBASE_REG;
14062 return OP_AMD64_ADD_MEMBASE_IMM;
14065 return OP_AMD64_SUB_MEMBASE_IMM;
14068 return OP_AMD64_AND_MEMBASE_IMM;
14071 return OP_AMD64_OR_MEMBASE_IMM;
14074 return OP_AMD64_XOR_MEMBASE_IMM;
14084 op_to_op_store_membase (int store_opcode, int opcode)
14086 #if defined(TARGET_X86) || defined(TARGET_AMD64)
14089 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14090 return OP_X86_SETEQ_MEMBASE;
14092 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14093 return OP_X86_SETNE_MEMBASE;
14101 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
14104 /* FIXME: This has sign extension issues */
14106 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14107 return OP_X86_COMPARE_MEMBASE8_IMM;
14110 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14115 return OP_X86_PUSH_MEMBASE;
14116 case OP_COMPARE_IMM:
14117 case OP_ICOMPARE_IMM:
14118 return OP_X86_COMPARE_MEMBASE_IMM;
14121 return OP_X86_COMPARE_MEMBASE_REG;
14125 #ifdef TARGET_AMD64
14126 /* FIXME: This has sign extension issues */
14128 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14129 return OP_X86_COMPARE_MEMBASE8_IMM;
14134 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14135 return OP_X86_PUSH_MEMBASE;
14137 /* FIXME: This only works for 32 bit immediates
14138 case OP_COMPARE_IMM:
14139 case OP_LCOMPARE_IMM:
14140 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14141 return OP_AMD64_COMPARE_MEMBASE_IMM;
14143 case OP_ICOMPARE_IMM:
14144 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14145 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14149 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14150 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14151 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14152 return OP_AMD64_COMPARE_MEMBASE_REG;
14155 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14156 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14165 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14168 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14174 return OP_X86_COMPARE_REG_MEMBASE;
14176 return OP_X86_ADD_REG_MEMBASE;
14178 return OP_X86_SUB_REG_MEMBASE;
14180 return OP_X86_AND_REG_MEMBASE;
14182 return OP_X86_OR_REG_MEMBASE;
14184 return OP_X86_XOR_REG_MEMBASE;
14188 #ifdef TARGET_AMD64
14189 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14192 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14194 return OP_X86_ADD_REG_MEMBASE;
14196 return OP_X86_SUB_REG_MEMBASE;
14198 return OP_X86_AND_REG_MEMBASE;
14200 return OP_X86_OR_REG_MEMBASE;
14202 return OP_X86_XOR_REG_MEMBASE;
14204 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14208 return OP_AMD64_COMPARE_REG_MEMBASE;
14210 return OP_AMD64_ADD_REG_MEMBASE;
14212 return OP_AMD64_SUB_REG_MEMBASE;
14214 return OP_AMD64_AND_REG_MEMBASE;
14216 return OP_AMD64_OR_REG_MEMBASE;
14218 return OP_AMD64_XOR_REG_MEMBASE;
14227 mono_op_to_op_imm_noemul (int opcode)
14230 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14236 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14243 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14248 return mono_op_to_op_imm (opcode);
14253 * mono_handle_global_vregs:
14255 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14259 mono_handle_global_vregs (MonoCompile *cfg)
14261 gint32 *vreg_to_bb;
14262 MonoBasicBlock *bb;
14265 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14267 #ifdef MONO_ARCH_SIMD_INTRINSICS
14268 if (cfg->uses_simd_intrinsics)
14269 mono_simd_simplify_indirection (cfg);
14272 /* Find local vregs used in more than one bb */
14273 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14274 MonoInst *ins = bb->code;
14275 int block_num = bb->block_num;
14277 if (cfg->verbose_level > 2)
14278 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14281 for (; ins; ins = ins->next) {
14282 const char *spec = INS_INFO (ins->opcode);
14283 int regtype = 0, regindex;
14286 if (G_UNLIKELY (cfg->verbose_level > 2))
14287 mono_print_ins (ins);
14289 g_assert (ins->opcode >= MONO_CEE_LAST);
14291 for (regindex = 0; regindex < 4; regindex ++) {
14294 if (regindex == 0) {
14295 regtype = spec [MONO_INST_DEST];
14296 if (regtype == ' ')
14299 } else if (regindex == 1) {
14300 regtype = spec [MONO_INST_SRC1];
14301 if (regtype == ' ')
14304 } else if (regindex == 2) {
14305 regtype = spec [MONO_INST_SRC2];
14306 if (regtype == ' ')
14309 } else if (regindex == 3) {
14310 regtype = spec [MONO_INST_SRC3];
14311 if (regtype == ' ')
14316 #if SIZEOF_REGISTER == 4
14317 /* In the LLVM case, the long opcodes are not decomposed */
14318 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14320 * Since some instructions reference the original long vreg,
14321 * and some reference the two component vregs, it is quite hard
14322 * to determine when it needs to be global. So be conservative.
14324 if (!get_vreg_to_inst (cfg, vreg)) {
14325 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14327 if (cfg->verbose_level > 2)
14328 printf ("LONG VREG R%d made global.\n", vreg);
14332 * Make the component vregs volatile since the optimizations can
14333 * get confused otherwise.
14335 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14336 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14340 g_assert (vreg != -1);
14342 prev_bb = vreg_to_bb [vreg];
14343 if (prev_bb == 0) {
14344 /* 0 is a valid block num */
14345 vreg_to_bb [vreg] = block_num + 1;
14346 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14347 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14350 if (!get_vreg_to_inst (cfg, vreg)) {
14351 if (G_UNLIKELY (cfg->verbose_level > 2))
14352 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14356 if (vreg_is_ref (cfg, vreg))
14357 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14359 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14362 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14365 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14368 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14371 g_assert_not_reached ();
14375 /* Flag as having been used in more than one bb */
14376 vreg_to_bb [vreg] = -1;
14382 /* If a variable is used in only one bblock, convert it into a local vreg */
14383 for (i = 0; i < cfg->num_varinfo; i++) {
14384 MonoInst *var = cfg->varinfo [i];
14385 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14387 switch (var->type) {
14393 #if SIZEOF_REGISTER == 8
14396 #if !defined(TARGET_X86)
14397 /* Enabling this screws up the fp stack on x86 */
14400 if (mono_arch_is_soft_float ())
14404 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14408 /* Arguments are implicitly global */
14409 /* Putting R4 vars into registers doesn't work currently */
14410 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14411 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14413 * Make that the variable's liveness interval doesn't contain a call, since
14414 * that would cause the lvreg to be spilled, making the whole optimization
14417 /* This is too slow for JIT compilation */
14419 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14421 int def_index, call_index, ins_index;
14422 gboolean spilled = FALSE;
14427 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14428 const char *spec = INS_INFO (ins->opcode);
14430 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14431 def_index = ins_index;
14433 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14434 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14435 if (call_index > def_index) {
14441 if (MONO_IS_CALL (ins))
14442 call_index = ins_index;
14452 if (G_UNLIKELY (cfg->verbose_level > 2))
14453 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14454 var->flags |= MONO_INST_IS_DEAD;
14455 cfg->vreg_to_inst [var->dreg] = NULL;
14462 * Compress the varinfo and vars tables so the liveness computation is faster and
14463 * takes up less space.
14466 for (i = 0; i < cfg->num_varinfo; ++i) {
14467 MonoInst *var = cfg->varinfo [i];
14468 if (pos < i && cfg->locals_start == i)
14469 cfg->locals_start = pos;
14470 if (!(var->flags & MONO_INST_IS_DEAD)) {
14472 cfg->varinfo [pos] = cfg->varinfo [i];
14473 cfg->varinfo [pos]->inst_c0 = pos;
14474 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14475 cfg->vars [pos].idx = pos;
14476 #if SIZEOF_REGISTER == 4
14477 if (cfg->varinfo [pos]->type == STACK_I8) {
14478 /* Modify the two component vars too */
14481 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14482 var1->inst_c0 = pos;
14483 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14484 var1->inst_c0 = pos;
14491 cfg->num_varinfo = pos;
14492 if (cfg->locals_start > cfg->num_varinfo)
14493 cfg->locals_start = cfg->num_varinfo;
14497 * mono_allocate_gsharedvt_vars:
14499 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14500 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14503 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14507 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14509 for (i = 0; i < cfg->num_varinfo; ++i) {
14510 MonoInst *ins = cfg->varinfo [i];
14513 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14514 if (i >= cfg->locals_start) {
14516 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14517 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14518 ins->opcode = OP_GSHAREDVT_LOCAL;
14519 ins->inst_imm = idx;
14522 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14523 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14530 * mono_spill_global_vars:
14532 * Generate spill code for variables which are not allocated to registers,
14533 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14534 * code is generated which could be optimized by the local optimization passes.
14537 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14539 MonoBasicBlock *bb;
14541 int orig_next_vreg;
14542 guint32 *vreg_to_lvreg;
14544 guint32 i, lvregs_len;
14545 gboolean dest_has_lvreg = FALSE;
14546 MonoStackType stacktypes [128];
14547 MonoInst **live_range_start, **live_range_end;
14548 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14550 *need_local_opts = FALSE;
14552 memset (spec2, 0, sizeof (spec2));
14554 /* FIXME: Move this function to mini.c */
14555 stacktypes ['i'] = STACK_PTR;
14556 stacktypes ['l'] = STACK_I8;
14557 stacktypes ['f'] = STACK_R8;
14558 #ifdef MONO_ARCH_SIMD_INTRINSICS
14559 stacktypes ['x'] = STACK_VTYPE;
14562 #if SIZEOF_REGISTER == 4
14563 /* Create MonoInsts for longs */
14564 for (i = 0; i < cfg->num_varinfo; i++) {
14565 MonoInst *ins = cfg->varinfo [i];
14567 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14568 switch (ins->type) {
14573 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14576 g_assert (ins->opcode == OP_REGOFFSET);
14578 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14580 tree->opcode = OP_REGOFFSET;
14581 tree->inst_basereg = ins->inst_basereg;
14582 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14584 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14586 tree->opcode = OP_REGOFFSET;
14587 tree->inst_basereg = ins->inst_basereg;
14588 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14598 if (cfg->compute_gc_maps) {
14599 /* registers need liveness info even for !non refs */
14600 for (i = 0; i < cfg->num_varinfo; i++) {
14601 MonoInst *ins = cfg->varinfo [i];
14603 if (ins->opcode == OP_REGVAR)
14604 ins->flags |= MONO_INST_GC_TRACK;
14608 /* FIXME: widening and truncation */
14611 * As an optimization, when a variable allocated to the stack is first loaded into
14612 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14613 * the variable again.
14615 orig_next_vreg = cfg->next_vreg;
14616 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14617 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14621 * These arrays contain the first and last instructions accessing a given
14623 * Since we emit bblocks in the same order we process them here, and we
14624 * don't split live ranges, these will precisely describe the live range of
14625 * the variable, i.e. the instruction range where a valid value can be found
14626 * in the variables location.
14627 * The live range is computed using the liveness info computed by the liveness pass.
14628 * We can't use vmv->range, since that is an abstract live range, and we need
14629 * one which is instruction precise.
14630 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14632 /* FIXME: Only do this if debugging info is requested */
14633 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14634 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14635 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14636 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14638 /* Add spill loads/stores */
14639 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14642 if (cfg->verbose_level > 2)
14643 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14645 /* Clear vreg_to_lvreg array */
14646 for (i = 0; i < lvregs_len; i++)
14647 vreg_to_lvreg [lvregs [i]] = 0;
14651 MONO_BB_FOR_EACH_INS (bb, ins) {
14652 const char *spec = INS_INFO (ins->opcode);
14653 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14654 gboolean store, no_lvreg;
14655 int sregs [MONO_MAX_SRC_REGS];
14657 if (G_UNLIKELY (cfg->verbose_level > 2))
14658 mono_print_ins (ins);
14660 if (ins->opcode == OP_NOP)
14664 * We handle LDADDR here as well, since it can only be decomposed
14665 * when variable addresses are known.
14667 if (ins->opcode == OP_LDADDR) {
14668 MonoInst *var = (MonoInst *)ins->inst_p0;
14670 if (var->opcode == OP_VTARG_ADDR) {
14671 /* Happens on SPARC/S390 where vtypes are passed by reference */
14672 MonoInst *vtaddr = var->inst_left;
14673 if (vtaddr->opcode == OP_REGVAR) {
14674 ins->opcode = OP_MOVE;
14675 ins->sreg1 = vtaddr->dreg;
14677 else if (var->inst_left->opcode == OP_REGOFFSET) {
14678 ins->opcode = OP_LOAD_MEMBASE;
14679 ins->inst_basereg = vtaddr->inst_basereg;
14680 ins->inst_offset = vtaddr->inst_offset;
14683 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14684 /* gsharedvt arg passed by ref */
14685 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14687 ins->opcode = OP_LOAD_MEMBASE;
14688 ins->inst_basereg = var->inst_basereg;
14689 ins->inst_offset = var->inst_offset;
14690 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14691 MonoInst *load, *load2, *load3;
14692 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14693 int reg1, reg2, reg3;
14694 MonoInst *info_var = cfg->gsharedvt_info_var;
14695 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14699 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14702 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14704 g_assert (info_var);
14705 g_assert (locals_var);
14707 /* Mark the instruction used to compute the locals var as used */
14708 cfg->gsharedvt_locals_var_ins = NULL;
14710 /* Load the offset */
14711 if (info_var->opcode == OP_REGOFFSET) {
14712 reg1 = alloc_ireg (cfg);
14713 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14714 } else if (info_var->opcode == OP_REGVAR) {
14716 reg1 = info_var->dreg;
14718 g_assert_not_reached ();
14720 reg2 = alloc_ireg (cfg);
14721 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14722 /* Load the locals area address */
14723 reg3 = alloc_ireg (cfg);
14724 if (locals_var->opcode == OP_REGOFFSET) {
14725 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14726 } else if (locals_var->opcode == OP_REGVAR) {
14727 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14729 g_assert_not_reached ();
14731 /* Compute the address */
14732 ins->opcode = OP_PADD;
14736 mono_bblock_insert_before_ins (bb, ins, load3);
14737 mono_bblock_insert_before_ins (bb, load3, load2);
14739 mono_bblock_insert_before_ins (bb, load2, load);
14741 g_assert (var->opcode == OP_REGOFFSET);
14743 ins->opcode = OP_ADD_IMM;
14744 ins->sreg1 = var->inst_basereg;
14745 ins->inst_imm = var->inst_offset;
14748 *need_local_opts = TRUE;
14749 spec = INS_INFO (ins->opcode);
14752 if (ins->opcode < MONO_CEE_LAST) {
14753 mono_print_ins (ins);
14754 g_assert_not_reached ();
14758 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14762 if (MONO_IS_STORE_MEMBASE (ins)) {
14763 tmp_reg = ins->dreg;
14764 ins->dreg = ins->sreg2;
14765 ins->sreg2 = tmp_reg;
14768 spec2 [MONO_INST_DEST] = ' ';
14769 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14770 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14771 spec2 [MONO_INST_SRC3] = ' ';
14773 } else if (MONO_IS_STORE_MEMINDEX (ins))
14774 g_assert_not_reached ();
14779 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14780 printf ("\t %.3s %d", spec, ins->dreg);
14781 num_sregs = mono_inst_get_src_registers (ins, sregs);
14782 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14783 printf (" %d", sregs [srcindex]);
14790 regtype = spec [MONO_INST_DEST];
14791 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14794 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14795 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14796 MonoInst *store_ins;
14798 MonoInst *def_ins = ins;
14799 int dreg = ins->dreg; /* The original vreg */
14801 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14803 if (var->opcode == OP_REGVAR) {
14804 ins->dreg = var->dreg;
14805 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14807 * Instead of emitting a load+store, use a _membase opcode.
14809 g_assert (var->opcode == OP_REGOFFSET);
14810 if (ins->opcode == OP_MOVE) {
14814 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14815 ins->inst_basereg = var->inst_basereg;
14816 ins->inst_offset = var->inst_offset;
14819 spec = INS_INFO (ins->opcode);
14823 g_assert (var->opcode == OP_REGOFFSET);
14825 prev_dreg = ins->dreg;
14827 /* Invalidate any previous lvreg for this vreg */
14828 vreg_to_lvreg [ins->dreg] = 0;
14832 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14834 store_opcode = OP_STOREI8_MEMBASE_REG;
14837 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14839 #if SIZEOF_REGISTER != 8
14840 if (regtype == 'l') {
14841 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14842 mono_bblock_insert_after_ins (bb, ins, store_ins);
14843 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14844 mono_bblock_insert_after_ins (bb, ins, store_ins);
14845 def_ins = store_ins;
14850 g_assert (store_opcode != OP_STOREV_MEMBASE);
14852 /* Try to fuse the store into the instruction itself */
14853 /* FIXME: Add more instructions */
14854 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14855 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14856 ins->inst_imm = ins->inst_c0;
14857 ins->inst_destbasereg = var->inst_basereg;
14858 ins->inst_offset = var->inst_offset;
14859 spec = INS_INFO (ins->opcode);
14860 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14861 ins->opcode = store_opcode;
14862 ins->inst_destbasereg = var->inst_basereg;
14863 ins->inst_offset = var->inst_offset;
14867 tmp_reg = ins->dreg;
14868 ins->dreg = ins->sreg2;
14869 ins->sreg2 = tmp_reg;
14872 spec2 [MONO_INST_DEST] = ' ';
14873 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14874 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14875 spec2 [MONO_INST_SRC3] = ' ';
14877 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14878 // FIXME: The backends expect the base reg to be in inst_basereg
14879 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14881 ins->inst_basereg = var->inst_basereg;
14882 ins->inst_offset = var->inst_offset;
14883 spec = INS_INFO (ins->opcode);
14885 /* printf ("INS: "); mono_print_ins (ins); */
14886 /* Create a store instruction */
14887 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14889 /* Insert it after the instruction */
14890 mono_bblock_insert_after_ins (bb, ins, store_ins);
14892 def_ins = store_ins;
14895 * We can't assign ins->dreg to var->dreg here, since the
14896 * sregs could use it. So set a flag, and do it after
14899 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14900 dest_has_lvreg = TRUE;
14905 if (def_ins && !live_range_start [dreg]) {
14906 live_range_start [dreg] = def_ins;
14907 live_range_start_bb [dreg] = bb;
14910 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14913 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14914 tmp->inst_c1 = dreg;
14915 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14922 num_sregs = mono_inst_get_src_registers (ins, sregs);
14923 for (srcindex = 0; srcindex < 3; ++srcindex) {
14924 regtype = spec [MONO_INST_SRC1 + srcindex];
14925 sreg = sregs [srcindex];
14927 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14928 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14929 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14930 MonoInst *use_ins = ins;
14931 MonoInst *load_ins;
14932 guint32 load_opcode;
14934 if (var->opcode == OP_REGVAR) {
14935 sregs [srcindex] = var->dreg;
14936 //mono_inst_set_src_registers (ins, sregs);
14937 live_range_end [sreg] = use_ins;
14938 live_range_end_bb [sreg] = bb;
14940 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14943 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14944 /* var->dreg is a hreg */
14945 tmp->inst_c1 = sreg;
14946 mono_bblock_insert_after_ins (bb, ins, tmp);
14952 g_assert (var->opcode == OP_REGOFFSET);
14954 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14956 g_assert (load_opcode != OP_LOADV_MEMBASE);
14958 if (vreg_to_lvreg [sreg]) {
14959 g_assert (vreg_to_lvreg [sreg] != -1);
14961 /* The variable is already loaded to an lvreg */
14962 if (G_UNLIKELY (cfg->verbose_level > 2))
14963 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14964 sregs [srcindex] = vreg_to_lvreg [sreg];
14965 //mono_inst_set_src_registers (ins, sregs);
14969 /* Try to fuse the load into the instruction */
14970 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14971 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14972 sregs [0] = var->inst_basereg;
14973 //mono_inst_set_src_registers (ins, sregs);
14974 ins->inst_offset = var->inst_offset;
14975 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14976 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14977 sregs [1] = var->inst_basereg;
14978 //mono_inst_set_src_registers (ins, sregs);
14979 ins->inst_offset = var->inst_offset;
14981 if (MONO_IS_REAL_MOVE (ins)) {
14982 ins->opcode = OP_NOP;
14985 //printf ("%d ", srcindex); mono_print_ins (ins);
14987 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14989 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14990 if (var->dreg == prev_dreg) {
14992 * sreg refers to the value loaded by the load
14993 * emitted below, but we need to use ins->dreg
14994 * since it refers to the store emitted earlier.
14998 g_assert (sreg != -1);
14999 vreg_to_lvreg [var->dreg] = sreg;
15000 g_assert (lvregs_len < 1024);
15001 lvregs [lvregs_len ++] = var->dreg;
15005 sregs [srcindex] = sreg;
15006 //mono_inst_set_src_registers (ins, sregs);
15008 #if SIZEOF_REGISTER != 8
15009 if (regtype == 'l') {
15010 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
15011 mono_bblock_insert_before_ins (bb, ins, load_ins);
15012 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
15013 mono_bblock_insert_before_ins (bb, ins, load_ins);
15014 use_ins = load_ins;
15019 #if SIZEOF_REGISTER == 4
15020 g_assert (load_opcode != OP_LOADI8_MEMBASE);
15022 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
15023 mono_bblock_insert_before_ins (bb, ins, load_ins);
15024 use_ins = load_ins;
15028 if (var->dreg < orig_next_vreg) {
15029 live_range_end [var->dreg] = use_ins;
15030 live_range_end_bb [var->dreg] = bb;
15033 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
15036 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
15037 tmp->inst_c1 = var->dreg;
15038 mono_bblock_insert_after_ins (bb, ins, tmp);
15042 mono_inst_set_src_registers (ins, sregs);
15044 if (dest_has_lvreg) {
15045 g_assert (ins->dreg != -1);
15046 vreg_to_lvreg [prev_dreg] = ins->dreg;
15047 g_assert (lvregs_len < 1024);
15048 lvregs [lvregs_len ++] = prev_dreg;
15049 dest_has_lvreg = FALSE;
15053 tmp_reg = ins->dreg;
15054 ins->dreg = ins->sreg2;
15055 ins->sreg2 = tmp_reg;
15058 if (MONO_IS_CALL (ins)) {
15059 /* Clear vreg_to_lvreg array */
15060 for (i = 0; i < lvregs_len; i++)
15061 vreg_to_lvreg [lvregs [i]] = 0;
15063 } else if (ins->opcode == OP_NOP) {
15065 MONO_INST_NULLIFY_SREGS (ins);
15068 if (cfg->verbose_level > 2)
15069 mono_print_ins_index (1, ins);
15072 /* Extend the live range based on the liveness info */
15073 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
15074 for (i = 0; i < cfg->num_varinfo; i ++) {
15075 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
15077 if (vreg_is_volatile (cfg, vi->vreg))
15078 /* The liveness info is incomplete */
15081 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
15082 /* Live from at least the first ins of this bb */
15083 live_range_start [vi->vreg] = bb->code;
15084 live_range_start_bb [vi->vreg] = bb;
15087 if (mono_bitset_test_fast (bb->live_out_set, i)) {
15088 /* Live at least until the last ins of this bb */
15089 live_range_end [vi->vreg] = bb->last_ins;
15090 live_range_end_bb [vi->vreg] = bb;
15097 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
15098 * by storing the current native offset into MonoMethodVar->live_range_start/end.
15100 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
15101 for (i = 0; i < cfg->num_varinfo; ++i) {
15102 int vreg = MONO_VARINFO (cfg, i)->vreg;
15105 if (live_range_start [vreg]) {
15106 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
15108 ins->inst_c1 = vreg;
15109 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
15111 if (live_range_end [vreg]) {
15112 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
15114 ins->inst_c1 = vreg;
15115 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
15116 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
15118 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15123 if (cfg->gsharedvt_locals_var_ins) {
15124 /* Nullify if unused */
15125 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15126 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15129 g_free (live_range_start);
15130 g_free (live_range_end);
15131 g_free (live_range_start_bb);
15132 g_free (live_range_end_bb);
15136 mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
15138 MonoInst *ret, *move, *source;
15139 MonoClass *klass = ins->klass;
15140 int context_used = mini_class_check_context_used (cfg, klass);
15141 int is_isinst = ins->opcode == OP_ISINST;
15142 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
15143 source = get_vreg_to_inst (cfg, ins->sreg1);
15144 if (!source || source == (MonoInst *) -1)
15145 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15146 g_assert (source && source != (MonoInst *) -1);
15148 MonoBasicBlock *first_bb;
15149 NEW_BBLOCK (cfg, first_bb);
15150 cfg->cbb = first_bb;
15152 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15154 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15156 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15157 } else if (!context_used && (mono_class_is_marshalbyref (klass) || mono_class_is_interface (klass))) {
15158 MonoInst *iargs [1];
15161 iargs [0] = source;
15163 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15164 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15166 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15167 save_cast_details (cfg, klass, source->dreg, TRUE);
15168 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15169 reset_cast_details (cfg);
15171 g_assert (costs > 0);
15175 ret = handle_isinst (cfg, klass, source, context_used);
15177 ret = handle_castclass (cfg, klass, source, context_used);
15179 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15181 g_assert (cfg->cbb->code || first_bb->code);
15182 MonoInst *prev = ins->prev;
15183 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15187 mono_decompose_typechecks (MonoCompile *cfg)
15189 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15191 MONO_BB_FOR_EACH_INS (bb, ins) {
15192 switch (ins->opcode) {
15195 mono_decompose_typecheck (cfg, bb, ins);
15205 * - use 'iadd' instead of 'int_add'
15206 * - handling ovf opcodes: decompose in method_to_ir.
15207 * - unify iregs/fregs
15208 * -> partly done, the missing parts are:
15209 * - a more complete unification would involve unifying the hregs as well, so
15210 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15211 * would no longer map to the machine hregs, so the code generators would need to
15212 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15213 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15214 * fp/non-fp branches speeds it up by about 15%.
15215 * - use sext/zext opcodes instead of shifts
15217 * - get rid of TEMPLOADs if possible and use vregs instead
15218 * - clean up usage of OP_P/OP_ opcodes
15219 * - cleanup usage of DUMMY_USE
15220 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15222 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15223 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15224 * - make sure handle_stack_args () is called before the branch is emitted
15225 * - when the new IR is done, get rid of all unused stuff
15226 * - COMPARE/BEQ as separate instructions or unify them ?
15227 * - keeping them separate allows specialized compare instructions like
15228 * compare_imm, compare_membase
15229 * - most back ends unify fp compare+branch, fp compare+ceq
15230 * - integrate mono_save_args into inline_method
15231 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15232 * - handle long shift opts on 32 bit platforms somehow: they require
15233 * 3 sregs (2 for arg1 and 1 for arg2)
15234 * - make byref a 'normal' type.
15235 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15236 * variable if needed.
15237 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15238 * like inline_method.
15239 * - remove inlining restrictions
15240 * - fix LNEG and enable cfold of INEG
15241 * - generalize x86 optimizations like ldelema as a peephole optimization
15242 * - add store_mem_imm for amd64
15243 * - optimize the loading of the interruption flag in the managed->native wrappers
15244 * - avoid special handling of OP_NOP in passes
15245 * - move code inserting instructions into one function/macro.
15246 * - try a coalescing phase after liveness analysis
15247 * - add float -> vreg conversion + local optimizations on !x86
15248 * - figure out how to handle decomposed branches during optimizations, ie.
15249 * compare+branch, op_jump_table+op_br etc.
15250 * - promote RuntimeXHandles to vregs
15251 * - vtype cleanups:
15252 * - add a NEW_VARLOADA_VREG macro
15253 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15254 * accessing vtype fields.
15255 * - get rid of I8CONST on 64 bit platforms
15256 * - dealing with the increase in code size due to branches created during opcode
15258 * - use extended basic blocks
15259 * - all parts of the JIT
15260 * - handle_global_vregs () && local regalloc
15261 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15262 * - sources of increase in code size:
15265 * - isinst and castclass
15266 * - lvregs not allocated to global registers even if used multiple times
15267 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15269 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15270 * - add all micro optimizations from the old JIT
15271 * - put tree optimizations into the deadce pass
15272 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15273 * specific function.
15274 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15275 * fcompare + branchCC.
15276 * - create a helper function for allocating a stack slot, taking into account
15277 * MONO_CFG_HAS_SPILLUP.
15279 * - merge the ia64 switch changes.
15280 * - optimize mono_regstate2_alloc_int/float.
15281 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15282 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15283 * parts of the tree could be separated by other instructions, killing the tree
15284 * arguments, or stores killing loads etc. Also, should we fold loads into other
15285 * instructions if the result of the load is used multiple times ?
15286 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15287 * - LAST MERGE: 108395.
15288 * - when returning vtypes in registers, generate IR and append it to the end of the
15289 * last bb instead of doing it in the epilog.
15290 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15298 - When to decompose opcodes:
15299 - earlier: this makes some optimizations hard to implement, since the low level IR
15300 no longer contains the neccessary information. But it is easier to do.
15301 - later: harder to implement, enables more optimizations.
15302 - Branches inside bblocks:
15303 - created when decomposing complex opcodes.
15304 - branches to another bblock: harmless, but not tracked by the branch
15305 optimizations, so need to branch to a label at the start of the bblock.
15306 - branches to inside the same bblock: very problematic, trips up the local
15307 reg allocator. Can be fixed by spitting the current bblock, but that is a
15308 complex operation, since some local vregs can become global vregs etc.
15309 - Local/global vregs:
15310 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15311 local register allocator.
15312 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15313 structure, created by mono_create_var (). Assigned to hregs or the stack by
15314 the global register allocator.
15315 - When to do optimizations like alu->alu_imm:
15316 - earlier -> saves work later on since the IR will be smaller/simpler
15317 - later -> can work on more instructions
15318 - Handling of valuetypes:
15319 - When a vtype is pushed on the stack, a new temporary is created, an
15320 instruction computing its address (LDADDR) is emitted and pushed on
15321 the stack. Need to optimize cases when the vtype is used immediately as in
15322 argument passing, stloc etc.
15323 - Instead of the to_end stuff in the old JIT, simply call the function handling
15324 the values on the stack before emitting the last instruction of the bb.
15327 #else /* !DISABLE_JIT */
15329 MONO_EMPTY_SOURCE_FILE (method_to_ir);
15331 #endif /* !DISABLE_JIT */