2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
29 #ifdef HAVE_SYS_TIME_H
37 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/mono-debug.h>
54 #include <mono/metadata/mono-debug-debugger.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/metadata/debug-mono-symfile.h>
63 #include <mono/utils/mono-compiler.h>
64 #include <mono/utils/mono-memory-model.h>
65 #include <mono/utils/mono-error-internals.h>
66 #include <mono/metadata/mono-basic-block.h>
67 #include <mono/metadata/reflection-internals.h>
68 #include <mono/utils/mono-threads-coop.h>
74 #include "jit-icalls.h"
76 #include "debugger-agent.h"
77 #include "seq-points.h"
78 #include "aot-compiler.h"
79 #include "mini-llvm.h"
81 #define BRANCH_COST 10
82 #define INLINE_LENGTH_LIMIT 20
84 /* These have 'cfg' as an implicit argument */
85 #define INLINE_FAILURE(msg) do { \
86 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
87 inline_failure (cfg, msg); \
88 goto exception_exit; \
91 #define CHECK_CFG_EXCEPTION do {\
92 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
93 goto exception_exit; \
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 inline static MonoInst*
156 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
158 /* helper methods signatures */
159 static MonoMethodSignature *helper_sig_domain_get;
160 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
161 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
162 static MonoMethodSignature *helper_sig_jit_thread_attach;
163 static MonoMethodSignature *helper_sig_get_tls_tramp;
164 static MonoMethodSignature *helper_sig_set_tls_tramp;
166 /* type loading helpers */
167 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, "RuntimeHelpers")
168 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, "DebuggableAttribute")
171 * Instruction metadata
179 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
180 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
186 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
191 /* keep in sync with the enum in mini.h */
194 #include "mini-ops.h"
199 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
200 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
202 * This should contain the index of the last sreg + 1. This is not the same
203 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
205 const gint8 ins_sreg_counts[] = {
206 #include "mini-ops.h"
211 #define MONO_INIT_VARINFO(vi,id) do { \
212 (vi)->range.first_use.pos.bid = 0xffff; \
218 mono_alloc_ireg (MonoCompile *cfg)
220 return alloc_ireg (cfg);
224 mono_alloc_lreg (MonoCompile *cfg)
226 return alloc_lreg (cfg);
230 mono_alloc_freg (MonoCompile *cfg)
232 return alloc_freg (cfg);
236 mono_alloc_preg (MonoCompile *cfg)
238 return alloc_preg (cfg);
242 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
244 return alloc_dreg (cfg, stack_type);
248 * mono_alloc_ireg_ref:
250 * Allocate an IREG, and mark it as holding a GC ref.
253 mono_alloc_ireg_ref (MonoCompile *cfg)
255 return alloc_ireg_ref (cfg);
259 * mono_alloc_ireg_mp:
261 * Allocate an IREG, and mark it as holding a managed pointer.
264 mono_alloc_ireg_mp (MonoCompile *cfg)
266 return alloc_ireg_mp (cfg);
270 * mono_alloc_ireg_copy:
272 * Allocate an IREG with the same GC type as VREG.
275 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
277 if (vreg_is_ref (cfg, vreg))
278 return alloc_ireg_ref (cfg);
279 else if (vreg_is_mp (cfg, vreg))
280 return alloc_ireg_mp (cfg);
282 return alloc_ireg (cfg);
286 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
291 type = mini_get_underlying_type (type);
293 switch (type->type) {
306 case MONO_TYPE_FNPTR:
308 case MONO_TYPE_CLASS:
309 case MONO_TYPE_STRING:
310 case MONO_TYPE_OBJECT:
311 case MONO_TYPE_SZARRAY:
312 case MONO_TYPE_ARRAY:
316 #if SIZEOF_REGISTER == 8
322 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
325 case MONO_TYPE_VALUETYPE:
326 if (type->data.klass->enumtype) {
327 type = mono_class_enum_basetype (type->data.klass);
330 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
333 case MONO_TYPE_TYPEDBYREF:
335 case MONO_TYPE_GENERICINST:
336 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
338 type = &type->data.generic_class->container_class->byval_arg;
342 g_assert (cfg->gshared);
343 if (mini_type_var_is_vt (type))
346 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
348 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
354 mono_print_bb (MonoBasicBlock *bb, const char *msg)
359 printf ("\n%s %d: [IN: ", msg, bb->block_num);
360 for (i = 0; i < bb->in_count; ++i)
361 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
363 for (i = 0; i < bb->out_count; ++i)
364 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
366 for (tree = bb->code; tree; tree = tree->next)
367 mono_print_ins_index (-1, tree);
371 mono_create_helper_signatures (void)
373 helper_sig_domain_get = mono_create_icall_signature ("ptr");
374 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
375 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
376 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
377 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
378 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
381 static MONO_NEVER_INLINE void
382 break_on_unverified (void)
384 if (mini_get_debug_options ()->break_on_unverified)
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
394 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
461 /* Emit conversions so both operands of a binary opcode are of the same type */
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
524 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins); \
529 #define ADD_BINCOND(next_block) do { \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, cfg->cbb, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, cfg->cbb, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (cfg->cbb, cmp); \
558 MONO_ADD_INS (cfg->cbb, ins); \
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
575 if (from->cil_code) {
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
584 printf ("edge from entry to exit\n");
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
596 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
613 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
676 ip_in_finally_clause (MonoCompile *cfg, int offset)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
682 for (i = 0; i < header->num_clauses; ++i) {
683 clause = &header->clauses [i];
684 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
687 if (MONO_OFFSET_IN_HANDLER (clause, offset))
694 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
696 MonoMethodHeader *header = cfg->header;
697 MonoExceptionClause *clause;
701 for (i = 0; i < header->num_clauses; ++i) {
702 clause = &header->clauses [i];
703 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
704 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
705 if (clause->flags == type)
706 res = g_list_append (res, clause);
713 mono_create_spvar_for_region (MonoCompile *cfg, int region)
717 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
721 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
729 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
731 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
739 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
743 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
744 /* prevent it from being register allocated */
745 var->flags |= MONO_INST_VOLATILE;
747 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
753 * Returns the type used in the eval stack when @type is loaded.
754 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
757 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
761 type = mini_get_underlying_type (type);
762 inst->klass = klass = mono_class_from_mono_type (type);
764 inst->type = STACK_MP;
769 switch (type->type) {
771 inst->type = STACK_INV;
779 inst->type = STACK_I4;
784 case MONO_TYPE_FNPTR:
785 inst->type = STACK_PTR;
787 case MONO_TYPE_CLASS:
788 case MONO_TYPE_STRING:
789 case MONO_TYPE_OBJECT:
790 case MONO_TYPE_SZARRAY:
791 case MONO_TYPE_ARRAY:
792 inst->type = STACK_OBJ;
796 inst->type = STACK_I8;
799 inst->type = cfg->r4_stack_type;
802 inst->type = STACK_R8;
804 case MONO_TYPE_VALUETYPE:
805 if (type->data.klass->enumtype) {
806 type = mono_class_enum_basetype (type->data.klass);
810 inst->type = STACK_VTYPE;
813 case MONO_TYPE_TYPEDBYREF:
814 inst->klass = mono_defaults.typed_reference_class;
815 inst->type = STACK_VTYPE;
817 case MONO_TYPE_GENERICINST:
818 type = &type->data.generic_class->container_class->byval_arg;
822 g_assert (cfg->gshared);
823 if (mini_is_gsharedvt_type (type)) {
824 g_assert (cfg->gsharedvt);
825 inst->type = STACK_VTYPE;
827 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
831 g_error ("unknown type 0x%02x in eval stack type", type->type);
836 * The following tables are used to quickly validate the IL code in type_from_op ().
839 bin_num_table [STACK_MAX] [STACK_MAX] = {
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
845 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
853 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
856 /* reduce the size of this table */
858 bin_int_table [STACK_MAX] [STACK_MAX] = {
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
870 bin_comp_table [STACK_MAX] [STACK_MAX] = {
871 /* Inv i L p F & O vt r4 */
873 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
874 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
875 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
876 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
877 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
878 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
879 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
880 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
883 /* reduce the size of this table */
885 shift_table [STACK_MAX] [STACK_MAX] = {
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
889 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
890 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
891 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
892 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
893 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
897 * Tables to map from the non-specific opcode to the matching
898 * type-specific opcode.
900 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
902 binops_op_map [STACK_MAX] = {
903 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
906 /* handles from CEE_NEG to CEE_CONV_U8 */
908 unops_op_map [STACK_MAX] = {
909 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
912 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
914 ovfops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
918 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
920 ovf2ops_op_map [STACK_MAX] = {
921 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
924 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
926 ovf3ops_op_map [STACK_MAX] = {
927 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
930 /* handles from CEE_BEQ to CEE_BLT_UN */
932 beqops_op_map [STACK_MAX] = {
933 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
936 /* handles from CEE_CEQ to CEE_CLT_UN */
938 ceqops_op_map [STACK_MAX] = {
939 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
943 * Sets ins->type (the type on the eval stack) according to the
944 * type of the opcode and the arguments to it.
945 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
947 * FIXME: this function sets ins->type unconditionally in some cases, but
948 * it should set it to invalid for some types (a conv.x on an object)
951 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
953 switch (ins->opcode) {
960 /* FIXME: check unverifiable args for STACK_MP */
961 ins->type = bin_num_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
969 ins->type = bin_int_table [src1->type] [src2->type];
970 ins->opcode += binops_op_map [ins->type];
975 ins->type = shift_table [src1->type] [src2->type];
976 ins->opcode += binops_op_map [ins->type];
981 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
982 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
983 ins->opcode = OP_LCOMPARE;
984 else if (src1->type == STACK_R4)
985 ins->opcode = OP_RCOMPARE;
986 else if (src1->type == STACK_R8)
987 ins->opcode = OP_FCOMPARE;
989 ins->opcode = OP_ICOMPARE;
991 case OP_ICOMPARE_IMM:
992 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
993 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
994 ins->opcode = OP_LCOMPARE_IMM;
1006 ins->opcode += beqops_op_map [src1->type];
1009 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1010 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1017 ins->opcode += ceqops_op_map [src1->type];
1021 ins->type = neg_table [src1->type];
1022 ins->opcode += unops_op_map [ins->type];
1025 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1026 ins->type = src1->type;
1028 ins->type = STACK_INV;
1029 ins->opcode += unops_op_map [ins->type];
1035 ins->type = STACK_I4;
1036 ins->opcode += unops_op_map [src1->type];
1039 ins->type = STACK_R8;
1040 switch (src1->type) {
1043 ins->opcode = OP_ICONV_TO_R_UN;
1046 ins->opcode = OP_LCONV_TO_R_UN;
1050 case CEE_CONV_OVF_I1:
1051 case CEE_CONV_OVF_U1:
1052 case CEE_CONV_OVF_I2:
1053 case CEE_CONV_OVF_U2:
1054 case CEE_CONV_OVF_I4:
1055 case CEE_CONV_OVF_U4:
1056 ins->type = STACK_I4;
1057 ins->opcode += ovf3ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I_UN:
1060 case CEE_CONV_OVF_U_UN:
1061 ins->type = STACK_PTR;
1062 ins->opcode += ovf2ops_op_map [src1->type];
1064 case CEE_CONV_OVF_I1_UN:
1065 case CEE_CONV_OVF_I2_UN:
1066 case CEE_CONV_OVF_I4_UN:
1067 case CEE_CONV_OVF_U1_UN:
1068 case CEE_CONV_OVF_U2_UN:
1069 case CEE_CONV_OVF_U4_UN:
1070 ins->type = STACK_I4;
1071 ins->opcode += ovf2ops_op_map [src1->type];
1074 ins->type = STACK_PTR;
1075 switch (src1->type) {
1077 ins->opcode = OP_ICONV_TO_U;
1081 #if SIZEOF_VOID_P == 8
1082 ins->opcode = OP_LCONV_TO_U;
1084 ins->opcode = OP_MOVE;
1088 ins->opcode = OP_LCONV_TO_U;
1091 ins->opcode = OP_FCONV_TO_U;
1097 ins->type = STACK_I8;
1098 ins->opcode += unops_op_map [src1->type];
1100 case CEE_CONV_OVF_I8:
1101 case CEE_CONV_OVF_U8:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf3ops_op_map [src1->type];
1105 case CEE_CONV_OVF_U8_UN:
1106 case CEE_CONV_OVF_I8_UN:
1107 ins->type = STACK_I8;
1108 ins->opcode += ovf2ops_op_map [src1->type];
1111 ins->type = cfg->r4_stack_type;
1112 ins->opcode += unops_op_map [src1->type];
1115 ins->type = STACK_R8;
1116 ins->opcode += unops_op_map [src1->type];
1119 ins->type = STACK_R8;
1123 ins->type = STACK_I4;
1124 ins->opcode += ovfops_op_map [src1->type];
1127 case CEE_CONV_OVF_I:
1128 case CEE_CONV_OVF_U:
1129 ins->type = STACK_PTR;
1130 ins->opcode += ovfops_op_map [src1->type];
1133 case CEE_ADD_OVF_UN:
1135 case CEE_MUL_OVF_UN:
1137 case CEE_SUB_OVF_UN:
1138 ins->type = bin_num_table [src1->type] [src2->type];
1139 ins->opcode += ovfops_op_map [src1->type];
1140 if (ins->type == STACK_R8)
1141 ins->type = STACK_INV;
1143 case OP_LOAD_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI1_MEMBASE:
1147 case OP_LOADU1_MEMBASE:
1148 case OP_LOADI2_MEMBASE:
1149 case OP_LOADU2_MEMBASE:
1150 case OP_LOADI4_MEMBASE:
1151 case OP_LOADU4_MEMBASE:
1152 ins->type = STACK_PTR;
1154 case OP_LOADI8_MEMBASE:
1155 ins->type = STACK_I8;
1157 case OP_LOADR4_MEMBASE:
1158 ins->type = cfg->r4_stack_type;
1160 case OP_LOADR8_MEMBASE:
1161 ins->type = STACK_R8;
1164 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1168 if (ins->type == STACK_MP)
1169 ins->klass = mono_defaults.object_class;
1174 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1180 param_table [STACK_MAX] [STACK_MAX] = {
1185 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1190 switch (args->type) {
1200 for (i = 0; i < sig->param_count; ++i) {
1201 switch (args [i].type) {
1205 if (!sig->params [i]->byref)
1209 if (sig->params [i]->byref)
1211 switch (sig->params [i]->type) {
1212 case MONO_TYPE_CLASS:
1213 case MONO_TYPE_STRING:
1214 case MONO_TYPE_OBJECT:
1215 case MONO_TYPE_SZARRAY:
1216 case MONO_TYPE_ARRAY:
1223 if (sig->params [i]->byref)
1225 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1234 /*if (!param_table [args [i].type] [sig->params [i]->type])
1242 * When we need a pointer to the current domain many times in a method, we
1243 * call mono_domain_get() once and we store the result in a local variable.
1244 * This function returns the variable that represents the MonoDomain*.
1246 inline static MonoInst *
1247 mono_get_domainvar (MonoCompile *cfg)
1249 if (!cfg->domainvar)
1250 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1251 return cfg->domainvar;
1255 * The got_var contains the address of the Global Offset Table when AOT
1259 mono_get_got_var (MonoCompile *cfg)
1261 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1263 if (!cfg->got_var) {
1264 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1266 return cfg->got_var;
1270 mono_get_vtable_var (MonoCompile *cfg)
1272 g_assert (cfg->gshared);
1274 if (!cfg->rgctx_var) {
1275 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1276 /* force the var to be stack allocated */
1277 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1280 return cfg->rgctx_var;
1284 type_from_stack_type (MonoInst *ins) {
1285 switch (ins->type) {
1286 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1287 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1288 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1289 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1290 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1292 return &ins->klass->this_arg;
1293 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1294 case STACK_VTYPE: return &ins->klass->byval_arg;
1296 g_error ("stack type %d to monotype not handled\n", ins->type);
1301 static G_GNUC_UNUSED int
1302 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1304 t = mono_type_get_underlying_type (t);
1316 case MONO_TYPE_FNPTR:
1318 case MONO_TYPE_CLASS:
1319 case MONO_TYPE_STRING:
1320 case MONO_TYPE_OBJECT:
1321 case MONO_TYPE_SZARRAY:
1322 case MONO_TYPE_ARRAY:
1328 return cfg->r4_stack_type;
1331 case MONO_TYPE_VALUETYPE:
1332 case MONO_TYPE_TYPEDBYREF:
1334 case MONO_TYPE_GENERICINST:
1335 if (mono_type_generic_inst_is_valuetype (t))
1341 g_assert_not_reached ();
1348 array_access_to_klass (int opcode)
1352 return mono_defaults.byte_class;
1354 return mono_defaults.uint16_class;
1357 return mono_defaults.int_class;
1360 return mono_defaults.sbyte_class;
1363 return mono_defaults.int16_class;
1366 return mono_defaults.int32_class;
1368 return mono_defaults.uint32_class;
1371 return mono_defaults.int64_class;
1374 return mono_defaults.single_class;
1377 return mono_defaults.double_class;
1378 case CEE_LDELEM_REF:
1379 case CEE_STELEM_REF:
1380 return mono_defaults.object_class;
1382 g_assert_not_reached ();
1388 * We try to share variables when possible
1391 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1396 /* inlining can result in deeper stacks */
1397 if (slot >= cfg->header->max_stack)
1398 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1400 pos = ins->type - 1 + slot * STACK_MAX;
1402 switch (ins->type) {
1409 if ((vnum = cfg->intvars [pos]))
1410 return cfg->varinfo [vnum];
1411 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 cfg->intvars [pos] = res->inst_c0;
1415 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1421 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1424 * Don't use this if a generic_context is set, since that means AOT can't
1425 * look up the method using just the image+token.
1426 * table == 0 means this is a reference made from a wrapper.
1428 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1429 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1430 jump_info_token->image = image;
1431 jump_info_token->token = token;
1432 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1437 * This function is called to handle items that are left on the evaluation stack
1438 * at basic block boundaries. What happens is that we save the values to local variables
1439 * and we reload them later when first entering the target basic block (with the
1440 * handle_loaded_temps () function).
1441 * A single joint point will use the same variables (stored in the array bb->out_stack or
1442 * bb->in_stack, if the basic block is before or after the joint point).
1444 * This function needs to be called _before_ emitting the last instruction of
1445 * the bb (i.e. before emitting a branch).
1446 * If the stack merge fails at a join point, cfg->unverifiable is set.
1449 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1452 MonoBasicBlock *bb = cfg->cbb;
1453 MonoBasicBlock *outb;
1454 MonoInst *inst, **locals;
1459 if (cfg->verbose_level > 3)
1460 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1461 if (!bb->out_scount) {
1462 bb->out_scount = count;
1463 //printf ("bblock %d has out:", bb->block_num);
1465 for (i = 0; i < bb->out_count; ++i) {
1466 outb = bb->out_bb [i];
1467 /* exception handlers are linked, but they should not be considered for stack args */
1468 if (outb->flags & BB_EXCEPTION_HANDLER)
1470 //printf (" %d", outb->block_num);
1471 if (outb->in_stack) {
1473 bb->out_stack = outb->in_stack;
1479 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1480 for (i = 0; i < count; ++i) {
1482 * try to reuse temps already allocated for this purpouse, if they occupy the same
1483 * stack slot and if they are of the same type.
1484 * This won't cause conflicts since if 'local' is used to
1485 * store one of the values in the in_stack of a bblock, then
1486 * the same variable will be used for the same outgoing stack
1488 * This doesn't work when inlining methods, since the bblocks
1489 * in the inlined methods do not inherit their in_stack from
1490 * the bblock they are inlined to. See bug #58863 for an
1493 if (cfg->inlined_method)
1494 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1496 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1501 for (i = 0; i < bb->out_count; ++i) {
1502 outb = bb->out_bb [i];
1503 /* exception handlers are linked, but they should not be considered for stack args */
1504 if (outb->flags & BB_EXCEPTION_HANDLER)
1506 if (outb->in_scount) {
1507 if (outb->in_scount != bb->out_scount) {
1508 cfg->unverifiable = TRUE;
1511 continue; /* check they are the same locals */
1513 outb->in_scount = count;
1514 outb->in_stack = bb->out_stack;
1517 locals = bb->out_stack;
1519 for (i = 0; i < count; ++i) {
1520 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1521 inst->cil_code = sp [i]->cil_code;
1522 sp [i] = locals [i];
1523 if (cfg->verbose_level > 3)
1524 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1528 * It is possible that the out bblocks already have in_stack assigned, and
1529 * the in_stacks differ. In this case, we will store to all the different
1536 /* Find a bblock which has a different in_stack */
1538 while (bindex < bb->out_count) {
1539 outb = bb->out_bb [bindex];
1540 /* exception handlers are linked, but they should not be considered for stack args */
1541 if (outb->flags & BB_EXCEPTION_HANDLER) {
1545 if (outb->in_stack != locals) {
1546 for (i = 0; i < count; ++i) {
1547 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1548 inst->cil_code = sp [i]->cil_code;
1549 sp [i] = locals [i];
1550 if (cfg->verbose_level > 3)
1551 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1553 locals = outb->in_stack;
1563 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1567 if (cfg->compile_aot) {
1568 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1574 ji.type = patch_type;
1575 ji.data.target = data;
1576 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1577 mono_error_assert_ok (&error);
1579 EMIT_NEW_PCONST (cfg, ins, target);
1585 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1587 return emit_runtime_constant (cfg, patch_type, data);
1591 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1595 g_assert (val == 0);
1600 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1603 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1611 #if SIZEOF_REGISTER == 8
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1619 val_reg = alloc_preg (cfg);
1621 if (SIZEOF_REGISTER == 8)
1622 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1624 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1627 /* This could be optimized further if neccesary */
1629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1636 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1638 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1660 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1667 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1674 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1675 g_assert (size < 10000);
1678 /* This could be optimized further if neccesary */
1680 cur_reg = alloc_preg (cfg);
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1689 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1691 cur_reg = alloc_preg (cfg);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1701 cur_reg = alloc_preg (cfg);
1702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1717 cur_reg = alloc_preg (cfg);
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1727 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1729 int tls_offset = mono_tls_get_tls_offset (key);
1731 if (cfg->compile_aot)
1734 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1736 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1737 ins->dreg = mono_alloc_preg (cfg);
1738 ins->inst_offset = tls_offset;
1745 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1747 int tls_offset = mono_tls_get_tls_offset (key);
1749 if (cfg->compile_aot)
1752 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1754 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1755 ins->sreg1 = value->dreg;
1756 ins->inst_offset = tls_offset;
1764 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1766 MonoInst *fast_tls = NULL;
1768 if (!mini_get_debug_options ()->use_fallback_tls)
1769 fast_tls = mono_create_fast_tls_getter (cfg, key);
1772 MONO_ADD_INS (cfg->cbb, fast_tls);
1776 if (cfg->compile_aot) {
1779 * tls getters are critical pieces of code and we don't want to resolve them
1780 * through the standard plt/tramp mechanism since we might expose ourselves
1781 * to crashes and infinite recursions.
1783 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1784 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1786 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1787 return mono_emit_jit_icall (cfg, getter, NULL);
1792 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1794 MonoInst *fast_tls = NULL;
1796 if (!mini_get_debug_options ()->use_fallback_tls)
1797 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1800 MONO_ADD_INS (cfg->cbb, fast_tls);
1804 if (cfg->compile_aot) {
1806 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1807 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1809 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1810 return mono_emit_jit_icall (cfg, setter, &value);
1817 * Emit IR to push the current LMF onto the LMF stack.
1820 emit_push_lmf (MonoCompile *cfg)
1823 * Emit IR to push the LMF:
1824 * lmf_addr = <lmf_addr from tls>
1825 * lmf->lmf_addr = lmf_addr
1826 * lmf->prev_lmf = *lmf_addr
1829 MonoInst *ins, *lmf_ins;
1834 if (cfg->lmf_ir_mono_lmf) {
1835 MonoInst *lmf_vara_ins, *lmf_ins;
1836 /* Load current lmf */
1837 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
1839 EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
1840 /* Save previous_lmf */
1841 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1843 mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
1845 int lmf_reg, prev_lmf_reg;
1847 * Store lmf_addr in a variable, so it can be allocated to a global register.
1849 if (!cfg->lmf_addr_var)
1850 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1853 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1855 int jit_tls_dreg = ins->dreg;
1857 lmf_reg = alloc_preg (cfg);
1858 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1860 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1863 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1865 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1866 lmf_reg = ins->dreg;
1868 prev_lmf_reg = alloc_preg (cfg);
1869 /* Save previous_lmf */
1870 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1871 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1873 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1880 * Emit IR to pop the current LMF from the LMF stack.
1883 emit_pop_lmf (MonoCompile *cfg)
1885 int lmf_reg, lmf_addr_reg;
1891 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1892 lmf_reg = ins->dreg;
1894 if (cfg->lmf_ir_mono_lmf) {
1895 /* Load previous_lmf */
1896 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1898 mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
1902 * Emit IR to pop the LMF:
1903 * *(lmf->lmf_addr) = lmf->prev_lmf
1905 /* This could be called before emit_push_lmf () */
1906 if (!cfg->lmf_addr_var)
1907 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1908 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1910 prev_lmf_reg = alloc_preg (cfg);
1911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1912 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1917 emit_instrumentation_call (MonoCompile *cfg, void *func)
1919 MonoInst *iargs [1];
1922 * Avoid instrumenting inlined methods since it can
1923 * distort profiling results.
1925 if (cfg->method != cfg->current_method)
1928 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1929 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1930 mono_emit_jit_icall (cfg, func, iargs);
1935 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1938 type = mini_get_underlying_type (type);
1939 switch (type->type) {
1940 case MONO_TYPE_VOID:
1941 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1948 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1952 case MONO_TYPE_FNPTR:
1953 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1954 case MONO_TYPE_CLASS:
1955 case MONO_TYPE_STRING:
1956 case MONO_TYPE_OBJECT:
1957 case MONO_TYPE_SZARRAY:
1958 case MONO_TYPE_ARRAY:
1959 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1962 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1965 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1967 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1969 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1970 case MONO_TYPE_VALUETYPE:
1971 if (type->data.klass->enumtype) {
1972 type = mono_class_enum_basetype (type->data.klass);
1975 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1976 case MONO_TYPE_TYPEDBYREF:
1977 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1978 case MONO_TYPE_GENERICINST:
1979 type = &type->data.generic_class->container_class->byval_arg;
1982 case MONO_TYPE_MVAR:
1984 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1986 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1991 //XXX this ignores if t is byref
1992 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1995 * target_type_is_incompatible:
1996 * @cfg: MonoCompile context
1998 * Check that the item @arg on the evaluation stack can be stored
1999 * in the target type (can be a local, or field, etc).
2000 * The cfg arg can be used to check if we need verification or just
2003 * Returns: non-0 value if arg can't be stored on a target.
2006 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2008 MonoType *simple_type;
2011 if (target->byref) {
2012 /* FIXME: check that the pointed to types match */
2013 if (arg->type == STACK_MP) {
2014 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2015 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2016 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2018 /* if the target is native int& or same type */
2019 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2022 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2023 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2024 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2028 if (arg->type == STACK_PTR)
2033 simple_type = mini_get_underlying_type (target);
2034 switch (simple_type->type) {
2035 case MONO_TYPE_VOID:
2043 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2047 /* STACK_MP is needed when setting pinned locals */
2048 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2053 case MONO_TYPE_FNPTR:
2055 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2056 * in native int. (#688008).
2058 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2061 case MONO_TYPE_CLASS:
2062 case MONO_TYPE_STRING:
2063 case MONO_TYPE_OBJECT:
2064 case MONO_TYPE_SZARRAY:
2065 case MONO_TYPE_ARRAY:
2066 if (arg->type != STACK_OBJ)
2068 /* FIXME: check type compatibility */
2072 if (arg->type != STACK_I8)
2076 if (arg->type != cfg->r4_stack_type)
2080 if (arg->type != STACK_R8)
2083 case MONO_TYPE_VALUETYPE:
2084 if (arg->type != STACK_VTYPE)
2086 klass = mono_class_from_mono_type (simple_type);
2087 if (klass != arg->klass)
2090 case MONO_TYPE_TYPEDBYREF:
2091 if (arg->type != STACK_VTYPE)
2093 klass = mono_class_from_mono_type (simple_type);
2094 if (klass != arg->klass)
2097 case MONO_TYPE_GENERICINST:
2098 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2099 MonoClass *target_class;
2100 if (arg->type != STACK_VTYPE)
2102 klass = mono_class_from_mono_type (simple_type);
2103 target_class = mono_class_from_mono_type (target);
2104 /* The second cases is needed when doing partial sharing */
2105 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2109 if (arg->type != STACK_OBJ)
2111 /* FIXME: check type compatibility */
2115 case MONO_TYPE_MVAR:
2116 g_assert (cfg->gshared);
2117 if (mini_type_var_is_vt (simple_type)) {
2118 if (arg->type != STACK_VTYPE)
2121 if (arg->type != STACK_OBJ)
2126 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2132 * Prepare arguments for passing to a function call.
2133 * Return a non-zero value if the arguments can't be passed to the given
2135 * The type checks are not yet complete and some conversions may need
2136 * casts on 32 or 64 bit architectures.
2138 * FIXME: implement this using target_type_is_incompatible ()
2141 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2143 MonoType *simple_type;
2147 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2151 for (i = 0; i < sig->param_count; ++i) {
2152 if (sig->params [i]->byref) {
2153 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2157 simple_type = mini_get_underlying_type (sig->params [i]);
2159 switch (simple_type->type) {
2160 case MONO_TYPE_VOID:
2169 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2175 case MONO_TYPE_FNPTR:
2176 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2179 case MONO_TYPE_CLASS:
2180 case MONO_TYPE_STRING:
2181 case MONO_TYPE_OBJECT:
2182 case MONO_TYPE_SZARRAY:
2183 case MONO_TYPE_ARRAY:
2184 if (args [i]->type != STACK_OBJ)
2189 if (args [i]->type != STACK_I8)
2193 if (args [i]->type != cfg->r4_stack_type)
2197 if (args [i]->type != STACK_R8)
2200 case MONO_TYPE_VALUETYPE:
2201 if (simple_type->data.klass->enumtype) {
2202 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2205 if (args [i]->type != STACK_VTYPE)
2208 case MONO_TYPE_TYPEDBYREF:
2209 if (args [i]->type != STACK_VTYPE)
2212 case MONO_TYPE_GENERICINST:
2213 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2216 case MONO_TYPE_MVAR:
2218 if (args [i]->type != STACK_VTYPE)
2222 g_error ("unknown type 0x%02x in check_call_signature",
2230 callvirt_to_call (int opcode)
2233 case OP_CALL_MEMBASE:
2235 case OP_VOIDCALL_MEMBASE:
2237 case OP_FCALL_MEMBASE:
2239 case OP_RCALL_MEMBASE:
2241 case OP_VCALL_MEMBASE:
2243 case OP_LCALL_MEMBASE:
2246 g_assert_not_reached ();
2253 callvirt_to_call_reg (int opcode)
2256 case OP_CALL_MEMBASE:
2258 case OP_VOIDCALL_MEMBASE:
2259 return OP_VOIDCALL_REG;
2260 case OP_FCALL_MEMBASE:
2261 return OP_FCALL_REG;
2262 case OP_RCALL_MEMBASE:
2263 return OP_RCALL_REG;
2264 case OP_VCALL_MEMBASE:
2265 return OP_VCALL_REG;
2266 case OP_LCALL_MEMBASE:
2267 return OP_LCALL_REG;
2269 g_assert_not_reached ();
2275 /* Either METHOD or IMT_ARG needs to be set */
2277 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2281 if (COMPILE_LLVM (cfg)) {
2283 method_reg = alloc_preg (cfg);
2284 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2286 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2287 method_reg = ins->dreg;
2291 call->imt_arg_reg = method_reg;
2293 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2298 method_reg = alloc_preg (cfg);
2299 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2301 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2302 method_reg = ins->dreg;
2305 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2308 static MonoJumpInfo *
2309 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2311 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2315 ji->data.target = target;
2321 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2324 return mono_class_check_context_used (klass);
2330 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2333 return mono_method_check_context_used (method);
2339 * check_method_sharing:
2341 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2344 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2346 gboolean pass_vtable = FALSE;
2347 gboolean pass_mrgctx = FALSE;
2349 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2350 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2351 gboolean sharable = FALSE;
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2357 * Pass vtable iff target method might
2358 * be shared, which means that sharing
2359 * is enabled for its class and its
2360 * context is sharable (and it's not a
2363 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2367 if (mini_method_get_context (cmethod) &&
2368 mini_method_get_context (cmethod)->method_inst) {
2369 g_assert (!pass_vtable);
2371 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2374 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2379 if (out_pass_vtable)
2380 *out_pass_vtable = pass_vtable;
2381 if (out_pass_mrgctx)
2382 *out_pass_mrgctx = pass_mrgctx;
2385 inline static MonoCallInst *
2386 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2387 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2391 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2399 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2401 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2403 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2406 call->signature = sig;
2407 call->rgctx_reg = rgctx;
2408 sig_ret = mini_get_underlying_type (sig->ret);
2410 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2413 if (mini_type_is_vtype (sig_ret)) {
2414 call->vret_var = cfg->vret_addr;
2415 //g_assert_not_reached ();
2417 } else if (mini_type_is_vtype (sig_ret)) {
2418 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2421 temp->backend.is_pinvoke = sig->pinvoke;
2424 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2425 * address of return value to increase optimization opportunities.
2426 * Before vtype decomposition, the dreg of the call ins itself represents the
2427 * fact the call modifies the return value. After decomposition, the call will
2428 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2429 * will be transformed into an LDADDR.
2431 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2432 loada->dreg = alloc_preg (cfg);
2433 loada->inst_p0 = temp;
2434 /* We reference the call too since call->dreg could change during optimization */
2435 loada->inst_p1 = call;
2436 MONO_ADD_INS (cfg->cbb, loada);
2438 call->inst.dreg = temp->dreg;
2440 call->vret_var = loada;
2441 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2442 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2444 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2445 if (COMPILE_SOFT_FLOAT (cfg)) {
2447 * If the call has a float argument, we would need to do an r8->r4 conversion using
2448 * an icall, but that cannot be done during the call sequence since it would clobber
2449 * the call registers + the stack. So we do it before emitting the call.
2451 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2453 MonoInst *in = call->args [i];
2455 if (i >= sig->hasthis)
2456 t = sig->params [i - sig->hasthis];
2458 t = &mono_defaults.int_class->byval_arg;
2459 t = mono_type_get_underlying_type (t);
2461 if (!t->byref && t->type == MONO_TYPE_R4) {
2462 MonoInst *iargs [1];
2466 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2468 /* The result will be in an int vreg */
2469 call->args [i] = conv;
2475 call->need_unbox_trampoline = unbox_trampoline;
2478 if (COMPILE_LLVM (cfg))
2479 mono_llvm_emit_call (cfg, call);
2481 mono_arch_emit_call (cfg, call);
2483 mono_arch_emit_call (cfg, call);
2486 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2487 cfg->flags |= MONO_CFG_HAS_CALLS;
2493 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2495 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2496 cfg->uses_rgctx_reg = TRUE;
2497 call->rgctx_reg = TRUE;
2499 call->rgctx_arg_reg = rgctx_reg;
2503 inline static MonoInst*
2504 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2509 gboolean check_sp = FALSE;
2511 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2512 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2514 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2519 rgctx_reg = mono_alloc_preg (cfg);
2520 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2524 if (!cfg->stack_inbalance_var)
2525 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2527 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2528 ins->dreg = cfg->stack_inbalance_var->dreg;
2529 MONO_ADD_INS (cfg->cbb, ins);
2532 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2534 call->inst.sreg1 = addr->dreg;
2537 emit_imt_argument (cfg, call, NULL, imt_arg);
2539 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2544 sp_reg = mono_alloc_preg (cfg);
2546 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2548 MONO_ADD_INS (cfg->cbb, ins);
2550 /* Restore the stack so we don't crash when throwing the exception */
2551 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2552 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2553 MONO_ADD_INS (cfg->cbb, ins);
2555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2556 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2560 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2562 return (MonoInst*)call;
2566 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2569 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2572 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2573 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2575 #ifndef DISABLE_REMOTING
2576 gboolean might_be_remote = FALSE;
2578 gboolean virtual_ = this_ins != NULL;
2579 gboolean enable_for_aot = TRUE;
2582 MonoInst *call_target = NULL;
2584 gboolean need_unbox_trampoline;
2587 sig = mono_method_signature (method);
2589 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2590 g_assert_not_reached ();
2593 rgctx_reg = mono_alloc_preg (cfg);
2594 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2597 if (method->string_ctor) {
2598 /* Create the real signature */
2599 /* FIXME: Cache these */
2600 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2601 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2606 context_used = mini_method_check_context_used (cfg, method);
2608 #ifndef DISABLE_REMOTING
2609 might_be_remote = this_ins && sig->hasthis &&
2610 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2611 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2613 if (might_be_remote && context_used) {
2616 g_assert (cfg->gshared);
2618 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2620 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2624 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2625 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2627 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2629 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2631 #ifndef DISABLE_REMOTING
2632 if (might_be_remote)
2633 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2636 call->method = method;
2637 call->inst.flags |= MONO_INST_HAS_METHOD;
2638 call->inst.inst_left = this_ins;
2639 call->tail_call = tail;
2642 int vtable_reg, slot_reg, this_reg;
2645 this_reg = this_ins->dreg;
2647 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2648 MonoInst *dummy_use;
2650 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2652 /* Make a call to delegate->invoke_impl */
2653 call->inst.inst_basereg = this_reg;
2654 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2655 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2657 /* We must emit a dummy use here because the delegate trampoline will
2658 replace the 'this' argument with the delegate target making this activation
2659 no longer a root for the delegate.
2660 This is an issue for delegates that target collectible code such as dynamic
2661 methods of GC'able assemblies.
2663 For a test case look into #667921.
2665 FIXME: a dummy use is not the best way to do it as the local register allocator
2666 will put it on a caller save register and spil it around the call.
2667 Ideally, we would either put it on a callee save register or only do the store part.
2669 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2671 return (MonoInst*)call;
2674 if ((!cfg->compile_aot || enable_for_aot) &&
2675 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2676 (MONO_METHOD_IS_FINAL (method) &&
2677 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2678 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2680 * the method is not virtual, we just need to ensure this is not null
2681 * and then we can call the method directly.
2683 #ifndef DISABLE_REMOTING
2684 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2686 * The check above ensures method is not gshared, this is needed since
2687 * gshared methods can't have wrappers.
2689 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2693 if (!method->string_ctor)
2694 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2696 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2697 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2699 * the method is virtual, but we can statically dispatch since either
2700 * it's class or the method itself are sealed.
2701 * But first we need to ensure it's not a null reference.
2703 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2705 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2706 } else if (call_target) {
2707 vtable_reg = alloc_preg (cfg);
2708 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2710 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2711 call->inst.sreg1 = call_target->dreg;
2712 call->inst.flags &= !MONO_INST_HAS_METHOD;
2714 vtable_reg = alloc_preg (cfg);
2715 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2716 if (mono_class_is_interface (method->klass)) {
2717 guint32 imt_slot = mono_method_get_imt_slot (method);
2718 emit_imt_argument (cfg, call, call->method, imt_arg);
2719 slot_reg = vtable_reg;
2720 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2722 slot_reg = vtable_reg;
2723 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2724 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2726 g_assert (mono_method_signature (method)->generic_param_count);
2727 emit_imt_argument (cfg, call, call->method, imt_arg);
2731 call->inst.sreg1 = slot_reg;
2732 call->inst.inst_offset = offset;
2733 call->is_virtual = TRUE;
2737 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2740 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2742 return (MonoInst*)call;
2746 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2748 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2752 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2759 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2762 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2764 return (MonoInst*)call;
2768 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2770 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2774 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2778 * mono_emit_abs_call:
2780 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2782 inline static MonoInst*
2783 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2784 MonoMethodSignature *sig, MonoInst **args)
2786 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2790 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2793 if (cfg->abs_patches == NULL)
2794 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2795 g_hash_table_insert (cfg->abs_patches, ji, ji);
2796 ins = mono_emit_native_call (cfg, ji, sig, args);
2797 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2801 static MonoMethodSignature*
2802 sig_to_rgctx_sig (MonoMethodSignature *sig)
2804 // FIXME: memory allocation
2805 MonoMethodSignature *res;
2808 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2809 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2810 res->param_count = sig->param_count + 1;
2811 for (i = 0; i < sig->param_count; ++i)
2812 res->params [i] = sig->params [i];
2813 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2817 /* Make an indirect call to FSIG passing an additional argument */
2819 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2821 MonoMethodSignature *csig;
2822 MonoInst *args_buf [16];
2824 int i, pindex, tmp_reg;
2826 /* Make a call with an rgctx/extra arg */
2827 if (fsig->param_count + 2 < 16)
2830 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2833 args [pindex ++] = orig_args [0];
2834 for (i = 0; i < fsig->param_count; ++i)
2835 args [pindex ++] = orig_args [fsig->hasthis + i];
2836 tmp_reg = alloc_preg (cfg);
2837 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2838 csig = sig_to_rgctx_sig (fsig);
2839 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2842 /* Emit an indirect call to the function descriptor ADDR */
2844 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2846 int addr_reg, arg_reg;
2847 MonoInst *call_target;
2849 g_assert (cfg->llvm_only);
2852 * addr points to a <addr, arg> pair, load both of them, and
2853 * make a call to addr, passing arg as an extra arg.
2855 addr_reg = alloc_preg (cfg);
2856 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2857 arg_reg = alloc_preg (cfg);
2858 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2860 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2864 direct_icalls_enabled (MonoCompile *cfg)
2866 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2868 if (cfg->compile_llvm && !cfg->llvm_only)
2871 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2877 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2880 * Call the jit icall without a wrapper if possible.
2881 * The wrapper is needed for the following reasons:
2882 * - to handle exceptions thrown using mono_raise_exceptions () from the
2883 * icall function. The EH code needs the lmf frame pushed by the
2884 * wrapper to be able to unwind back to managed code.
2885 * - to be able to do stack walks for asynchronously suspended
2886 * threads when debugging.
2888 if (info->no_raise && direct_icalls_enabled (cfg)) {
2892 if (!info->wrapper_method) {
2893 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2894 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2896 mono_memory_barrier ();
2900 * Inline the wrapper method, which is basically a call to the C icall, and
2901 * an exception check.
2903 costs = inline_method (cfg, info->wrapper_method, NULL,
2904 args, NULL, il_offset, TRUE);
2905 g_assert (costs > 0);
2906 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2910 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2915 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2917 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2918 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2922 * Native code might return non register sized integers
2923 * without initializing the upper bits.
2925 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2926 case OP_LOADI1_MEMBASE:
2927 widen_op = OP_ICONV_TO_I1;
2929 case OP_LOADU1_MEMBASE:
2930 widen_op = OP_ICONV_TO_U1;
2932 case OP_LOADI2_MEMBASE:
2933 widen_op = OP_ICONV_TO_I2;
2935 case OP_LOADU2_MEMBASE:
2936 widen_op = OP_ICONV_TO_U2;
2942 if (widen_op != -1) {
2943 int dreg = alloc_preg (cfg);
2946 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2947 widen->type = ins->type;
2958 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
2960 MonoInst *args [16];
2962 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
2963 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
2965 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2969 get_memcpy_method (void)
2971 static MonoMethod *memcpy_method = NULL;
2972 if (!memcpy_method) {
2973 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2975 g_error ("Old corlib found. Install a new one");
2977 return memcpy_method;
2981 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2983 MonoClassField *field;
2984 gpointer iter = NULL;
2986 while ((field = mono_class_get_fields (klass, &iter))) {
2989 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2991 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2992 if (mini_type_is_reference (mono_field_get_type (field))) {
2993 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2994 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2996 MonoClass *field_class = mono_class_from_mono_type (field->type);
2997 if (field_class->has_references)
2998 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3004 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3006 int card_table_shift_bits;
3007 gpointer card_table_mask;
3009 MonoInst *dummy_use;
3010 int nursery_shift_bits;
3011 size_t nursery_size;
3013 if (!cfg->gen_write_barriers)
3016 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3018 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3020 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3023 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3024 wbarrier->sreg1 = ptr->dreg;
3025 wbarrier->sreg2 = value->dreg;
3026 MONO_ADD_INS (cfg->cbb, wbarrier);
3027 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3028 int offset_reg = alloc_preg (cfg);
3032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3033 if (card_table_mask)
3034 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3036 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3037 * IMM's larger than 32bits.
3039 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3040 card_reg = ins->dreg;
3042 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3043 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3045 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3046 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3049 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3053 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3055 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3056 unsigned need_wb = 0;
3061 /*types with references can't have alignment smaller than sizeof(void*) */
3062 if (align < SIZEOF_VOID_P)
3065 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3066 if (size > 32 * SIZEOF_VOID_P)
3069 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3071 /* We don't unroll more than 5 stores to avoid code bloat. */
3072 if (size > 5 * SIZEOF_VOID_P) {
3073 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3074 size += (SIZEOF_VOID_P - 1);
3075 size &= ~(SIZEOF_VOID_P - 1);
3077 EMIT_NEW_ICONST (cfg, iargs [2], size);
3078 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3079 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3083 destreg = iargs [0]->dreg;
3084 srcreg = iargs [1]->dreg;
3087 dest_ptr_reg = alloc_preg (cfg);
3088 tmp_reg = alloc_preg (cfg);
3091 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3093 while (size >= SIZEOF_VOID_P) {
3094 MonoInst *load_inst;
3095 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3096 load_inst->dreg = tmp_reg;
3097 load_inst->inst_basereg = srcreg;
3098 load_inst->inst_offset = offset;
3099 MONO_ADD_INS (cfg->cbb, load_inst);
3101 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3104 emit_write_barrier (cfg, iargs [0], load_inst);
3106 offset += SIZEOF_VOID_P;
3107 size -= SIZEOF_VOID_P;
3110 /*tmp += sizeof (void*)*/
3111 if (size >= SIZEOF_VOID_P) {
3112 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3113 MONO_ADD_INS (cfg->cbb, iargs [0]);
3117 /* Those cannot be references since size < sizeof (void*) */
3119 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3126 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3127 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3133 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3143 * Emit code to copy a valuetype of type @klass whose address is stored in
3144 * @src->dreg to memory whose address is stored at @dest->dreg.
3147 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3149 MonoInst *iargs [4];
3152 MonoMethod *memcpy_method;
3153 MonoInst *size_ins = NULL;
3154 MonoInst *memcpy_ins = NULL;
3158 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3161 * This check breaks with spilled vars... need to handle it during verification anyway.
3162 * g_assert (klass && klass == src->klass && klass == dest->klass);
3165 if (mini_is_gsharedvt_klass (klass)) {
3167 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3168 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3172 n = mono_class_native_size (klass, &align);
3174 n = mono_class_value_size (klass, &align);
3176 /* if native is true there should be no references in the struct */
3177 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3178 /* Avoid barriers when storing to the stack */
3179 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3180 (dest->opcode == OP_LDADDR))) {
3186 context_used = mini_class_check_context_used (cfg, klass);
3188 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3189 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3191 } else if (context_used) {
3192 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3194 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3195 if (!cfg->compile_aot)
3196 mono_class_compute_gc_descriptor (klass);
3200 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3202 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3207 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3208 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3209 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3214 iargs [2] = size_ins;
3216 EMIT_NEW_ICONST (cfg, iargs [2], n);
3218 memcpy_method = get_memcpy_method ();
3220 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3222 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3227 get_memset_method (void)
3229 static MonoMethod *memset_method = NULL;
3230 if (!memset_method) {
3231 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3233 g_error ("Old corlib found. Install a new one");
3235 return memset_method;
3239 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3241 MonoInst *iargs [3];
3244 MonoMethod *memset_method;
3245 MonoInst *size_ins = NULL;
3246 MonoInst *bzero_ins = NULL;
3247 static MonoMethod *bzero_method;
3249 /* FIXME: Optimize this for the case when dest is an LDADDR */
3250 mono_class_init (klass);
3251 if (mini_is_gsharedvt_klass (klass)) {
3252 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3253 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3255 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3256 g_assert (bzero_method);
3258 iargs [1] = size_ins;
3259 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3263 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3265 n = mono_class_value_size (klass, &align);
3267 if (n <= sizeof (gpointer) * 8) {
3268 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3271 memset_method = get_memset_method ();
3273 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3274 EMIT_NEW_ICONST (cfg, iargs [2], n);
3275 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3282 * Emit IR to return either the this pointer for instance method,
3283 * or the mrgctx for static methods.
3286 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3288 MonoInst *this_ins = NULL;
3290 g_assert (cfg->gshared);
3292 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3293 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3294 !method->klass->valuetype)
3295 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3297 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3298 MonoInst *mrgctx_loc, *mrgctx_var;
3300 g_assert (!this_ins);
3301 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3303 mrgctx_loc = mono_get_vtable_var (cfg);
3304 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3307 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3308 MonoInst *vtable_loc, *vtable_var;
3310 g_assert (!this_ins);
3312 vtable_loc = mono_get_vtable_var (cfg);
3313 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3315 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3316 MonoInst *mrgctx_var = vtable_var;
3319 vtable_reg = alloc_preg (cfg);
3320 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3321 vtable_var->type = STACK_PTR;
3329 vtable_reg = alloc_preg (cfg);
3330 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3335 static MonoJumpInfoRgctxEntry *
3336 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3338 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3339 res->method = method;
3340 res->in_mrgctx = in_mrgctx;
3341 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3342 res->data->type = patch_type;
3343 res->data->data.target = patch_data;
3344 res->info_type = info_type;
3349 static inline MonoInst*
3350 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3352 MonoInst *args [16];
3355 // FIXME: No fastpath since the slot is not a compile time constant
3357 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3358 if (entry->in_mrgctx)
3359 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3361 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3365 * FIXME: This can be called during decompose, which is a problem since it creates
3367 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3369 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3371 MonoBasicBlock *is_null_bb, *end_bb;
3372 MonoInst *res, *ins, *call;
3375 slot = mini_get_rgctx_entry_slot (entry);
3377 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3378 index = MONO_RGCTX_SLOT_INDEX (slot);
3380 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3381 for (depth = 0; ; ++depth) {
3382 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3384 if (index < size - 1)
3389 NEW_BBLOCK (cfg, end_bb);
3390 NEW_BBLOCK (cfg, is_null_bb);
3393 rgctx_reg = rgctx->dreg;
3395 rgctx_reg = alloc_preg (cfg);
3397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3398 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3399 NEW_BBLOCK (cfg, is_null_bb);
3401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3405 for (i = 0; i < depth; ++i) {
3406 int array_reg = alloc_preg (cfg);
3408 /* load ptr to next array */
3409 if (mrgctx && i == 0)
3410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3413 rgctx_reg = array_reg;
3414 /* is the ptr null? */
3415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3416 /* if yes, jump to actual trampoline */
3417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3421 val_reg = alloc_preg (cfg);
3422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3423 /* is the slot null? */
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3425 /* if yes, jump to actual trampoline */
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3429 res_reg = alloc_preg (cfg);
3430 MONO_INST_NEW (cfg, ins, OP_MOVE);
3431 ins->dreg = res_reg;
3432 ins->sreg1 = val_reg;
3433 MONO_ADD_INS (cfg->cbb, ins);
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3438 MONO_START_BB (cfg, is_null_bb);
3440 EMIT_NEW_ICONST (cfg, args [1], index);
3442 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3444 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3445 MONO_INST_NEW (cfg, ins, OP_MOVE);
3446 ins->dreg = res_reg;
3447 ins->sreg1 = call->dreg;
3448 MONO_ADD_INS (cfg->cbb, ins);
3449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3451 MONO_START_BB (cfg, end_bb);
3460 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3463 static inline MonoInst*
3464 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3467 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3469 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3473 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3474 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3476 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3477 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3479 return emit_rgctx_fetch (cfg, rgctx, entry);
3483 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3484 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3486 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3487 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3489 return emit_rgctx_fetch (cfg, rgctx, entry);
3493 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3494 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3496 MonoJumpInfoGSharedVtCall *call_info;
3497 MonoJumpInfoRgctxEntry *entry;
3500 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3501 call_info->sig = sig;
3502 call_info->method = cmethod;
3504 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3505 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3507 return emit_rgctx_fetch (cfg, rgctx, entry);
3511 * emit_get_rgctx_virt_method:
3513 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3516 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3517 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3519 MonoJumpInfoVirtMethod *info;
3520 MonoJumpInfoRgctxEntry *entry;
3523 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3524 info->klass = klass;
3525 info->method = virt_method;
3527 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3528 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3530 return emit_rgctx_fetch (cfg, rgctx, entry);
3534 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3535 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3537 MonoJumpInfoRgctxEntry *entry;
3540 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3541 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3543 return emit_rgctx_fetch (cfg, rgctx, entry);
3547 * emit_get_rgctx_method:
3549 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3550 * normal constants, else emit a load from the rgctx.
3553 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3554 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3556 if (!context_used) {
3559 switch (rgctx_type) {
3560 case MONO_RGCTX_INFO_METHOD:
3561 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3563 case MONO_RGCTX_INFO_METHOD_RGCTX:
3564 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3567 g_assert_not_reached ();
3570 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3571 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3573 return emit_rgctx_fetch (cfg, rgctx, entry);
3578 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3579 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3581 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3582 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3584 return emit_rgctx_fetch (cfg, rgctx, entry);
3588 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3590 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3591 MonoRuntimeGenericContextInfoTemplate *template_;
3596 for (i = 0; i < info->num_entries; ++i) {
3597 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3599 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3603 if (info->num_entries == info->count_entries) {
3604 MonoRuntimeGenericContextInfoTemplate *new_entries;
3605 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3607 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3609 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3610 info->entries = new_entries;
3611 info->count_entries = new_count_entries;
3614 idx = info->num_entries;
3615 template_ = &info->entries [idx];
3616 template_->info_type = rgctx_type;
3617 template_->data = data;
3619 info->num_entries ++;
3625 * emit_get_gsharedvt_info:
3627 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3630 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3635 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3636 /* Load info->entries [idx] */
3637 dreg = alloc_preg (cfg);
3638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3644 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3646 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3650 * On return the caller must check @klass for load errors.
3653 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3655 MonoInst *vtable_arg;
3658 context_used = mini_class_check_context_used (cfg, klass);
3661 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3662 klass, MONO_RGCTX_INFO_VTABLE);
3664 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3668 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3671 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3675 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3676 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3678 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3679 ins->sreg1 = vtable_arg->dreg;
3680 MONO_ADD_INS (cfg->cbb, ins);
3683 MonoBasicBlock *inited_bb;
3684 MonoInst *args [16];
3686 inited_reg = alloc_ireg (cfg);
3688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3690 NEW_BBLOCK (cfg, inited_bb);
3692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3695 args [0] = vtable_arg;
3696 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3698 MONO_START_BB (cfg, inited_bb);
3703 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3707 if (cfg->gen_seq_points && cfg->method == method) {
3708 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3710 ins->flags |= MONO_INST_NONEMPTY_STACK;
3711 MONO_ADD_INS (cfg->cbb, ins);
3716 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3718 if (mini_get_debug_options ()->better_cast_details) {
3719 int vtable_reg = alloc_preg (cfg);
3720 int klass_reg = alloc_preg (cfg);
3721 MonoBasicBlock *is_null_bb = NULL;
3723 int to_klass_reg, context_used;
3726 NEW_BBLOCK (cfg, is_null_bb);
3728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3732 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3734 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3743 context_used = mini_class_check_context_used (cfg, klass);
3745 MonoInst *class_ins;
3747 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3748 to_klass_reg = class_ins->dreg;
3750 to_klass_reg = alloc_preg (cfg);
3751 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3756 MONO_START_BB (cfg, is_null_bb);
3761 mini_reset_cast_details (MonoCompile *cfg)
3763 /* Reset the variables holding the cast details */
3764 if (mini_get_debug_options ()->better_cast_details) {
3765 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3766 /* It is enough to reset the from field */
3767 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3772 * On return the caller must check @array_class for load errors
3775 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3777 int vtable_reg = alloc_preg (cfg);
3780 context_used = mini_class_check_context_used (cfg, array_class);
3782 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3784 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3786 if (cfg->opt & MONO_OPT_SHARED) {
3787 int class_reg = alloc_preg (cfg);
3790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3791 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3792 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3793 } else if (context_used) {
3794 MonoInst *vtable_ins;
3796 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3799 if (cfg->compile_aot) {
3803 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3805 vt_reg = alloc_preg (cfg);
3806 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3807 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3810 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3816 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3818 mini_reset_cast_details (cfg);
3822 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3823 * generic code is generated.
3826 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3828 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3831 MonoInst *rgctx, *addr;
3833 /* FIXME: What if the class is shared? We might not
3834 have to get the address of the method from the
3836 addr = emit_get_rgctx_method (cfg, context_used, method,
3837 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3838 if (cfg->llvm_only) {
3839 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3840 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3842 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3844 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3847 gboolean pass_vtable, pass_mrgctx;
3848 MonoInst *rgctx_arg = NULL;
3850 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3851 g_assert (!pass_mrgctx);
3854 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3857 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3860 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3865 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3869 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3870 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3871 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3872 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3874 obj_reg = sp [0]->dreg;
3875 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3876 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3878 /* FIXME: generics */
3879 g_assert (klass->rank == 0);
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3883 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3889 MonoInst *element_class;
3891 /* This assertion is from the unboxcast insn */
3892 g_assert (klass->rank == 0);
3894 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3895 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3897 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3898 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3900 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3901 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3902 mini_reset_cast_details (cfg);
3905 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3906 MONO_ADD_INS (cfg->cbb, add);
3907 add->type = STACK_MP;
3914 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3916 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3917 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3921 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3927 args [1] = klass_inst;
3930 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3932 NEW_BBLOCK (cfg, is_ref_bb);
3933 NEW_BBLOCK (cfg, is_nullable_bb);
3934 NEW_BBLOCK (cfg, end_bb);
3935 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3942 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3943 addr_reg = alloc_dreg (cfg, STACK_MP);
3947 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3948 MONO_ADD_INS (cfg->cbb, addr);
3950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3953 MONO_START_BB (cfg, is_ref_bb);
3955 /* Save the ref to a temporary */
3956 dreg = alloc_ireg (cfg);
3957 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3958 addr->dreg = addr_reg;
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3963 MONO_START_BB (cfg, is_nullable_bb);
3966 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3967 MonoInst *unbox_call;
3968 MonoMethodSignature *unbox_sig;
3970 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3971 unbox_sig->ret = &klass->byval_arg;
3972 unbox_sig->param_count = 1;
3973 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3976 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3978 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3980 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3981 addr->dreg = addr_reg;
3984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3987 MONO_START_BB (cfg, end_bb);
3990 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3996 * Returns NULL and set the cfg exception on error.
3999 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4001 MonoInst *iargs [2];
4006 MonoRgctxInfoType rgctx_info;
4007 MonoInst *iargs [2];
4008 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4010 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4012 if (cfg->opt & MONO_OPT_SHARED)
4013 rgctx_info = MONO_RGCTX_INFO_KLASS;
4015 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4016 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4018 if (cfg->opt & MONO_OPT_SHARED) {
4019 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4021 alloc_ftn = ves_icall_object_new;
4024 alloc_ftn = ves_icall_object_new_specific;
4027 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4028 if (known_instance_size) {
4029 int size = mono_class_instance_size (klass);
4030 if (size < sizeof (MonoObject))
4031 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4033 EMIT_NEW_ICONST (cfg, iargs [1], size);
4035 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4038 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4041 if (cfg->opt & MONO_OPT_SHARED) {
4042 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4043 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4045 alloc_ftn = ves_icall_object_new;
4046 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4047 /* This happens often in argument checking code, eg. throw new FooException... */
4048 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4049 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4050 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4052 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4053 MonoMethod *managed_alloc = NULL;
4057 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4058 cfg->exception_ptr = klass;
4062 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4064 if (managed_alloc) {
4065 int size = mono_class_instance_size (klass);
4066 if (size < sizeof (MonoObject))
4067 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4069 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4070 EMIT_NEW_ICONST (cfg, iargs [1], size);
4071 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4073 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4075 guint32 lw = vtable->klass->instance_size;
4076 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4077 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4078 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4081 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4085 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4089 * Returns NULL and set the cfg exception on error.
4092 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4094 MonoInst *alloc, *ins;
4096 if (mono_class_is_nullable (klass)) {
4097 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4100 if (cfg->llvm_only && cfg->gsharedvt) {
4101 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4102 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4103 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4105 /* FIXME: What if the class is shared? We might not
4106 have to get the method address from the RGCTX. */
4107 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4108 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4109 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4111 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4114 gboolean pass_vtable, pass_mrgctx;
4115 MonoInst *rgctx_arg = NULL;
4117 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4118 g_assert (!pass_mrgctx);
4121 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4124 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4127 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4131 if (mini_is_gsharedvt_klass (klass)) {
4132 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4133 MonoInst *res, *is_ref, *src_var, *addr;
4136 dreg = alloc_ireg (cfg);
4138 NEW_BBLOCK (cfg, is_ref_bb);
4139 NEW_BBLOCK (cfg, is_nullable_bb);
4140 NEW_BBLOCK (cfg, end_bb);
4141 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4149 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4152 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4153 ins->opcode = OP_STOREV_MEMBASE;
4155 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4156 res->type = STACK_OBJ;
4158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4161 MONO_START_BB (cfg, is_ref_bb);
4163 /* val is a vtype, so has to load the value manually */
4164 src_var = get_vreg_to_inst (cfg, val->dreg);
4166 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4167 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4172 MONO_START_BB (cfg, is_nullable_bb);
4175 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4176 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4178 MonoMethodSignature *box_sig;
4181 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4182 * construct that method at JIT time, so have to do things by hand.
4184 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4185 box_sig->ret = &mono_defaults.object_class->byval_arg;
4186 box_sig->param_count = 1;
4187 box_sig->params [0] = &klass->byval_arg;
4190 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4192 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4193 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4194 res->type = STACK_OBJ;
4198 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4200 MONO_START_BB (cfg, end_bb);
4204 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4208 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4213 static GHashTable* direct_icall_type_hash;
4216 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4218 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4219 if (!direct_icalls_enabled (cfg))
4223 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4224 * Whitelist a few icalls for now.
4226 if (!direct_icall_type_hash) {
4227 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4229 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4230 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4231 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4232 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4233 mono_memory_barrier ();
4234 direct_icall_type_hash = h;
4237 if (cmethod->klass == mono_defaults.math_class)
4239 /* No locking needed */
4240 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4246 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4248 if (cmethod->klass == mono_defaults.systemtype_class) {
4249 if (!strcmp (cmethod->name, "GetType"))
4255 static G_GNUC_UNUSED MonoInst*
4256 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4258 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4259 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4262 switch (enum_type->type) {
4265 #if SIZEOF_REGISTER == 8
4277 MonoInst *load, *and_, *cmp, *ceq;
4278 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4279 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4280 int dest_reg = alloc_ireg (cfg);
4282 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4283 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4284 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4285 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4287 ceq->type = STACK_I4;
4290 load = mono_decompose_opcode (cfg, load);
4291 and_ = mono_decompose_opcode (cfg, and_);
4292 cmp = mono_decompose_opcode (cfg, cmp);
4293 ceq = mono_decompose_opcode (cfg, ceq);
4301 * Returns NULL and set the cfg exception on error.
4303 static G_GNUC_UNUSED MonoInst*
4304 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4308 gpointer trampoline;
4309 MonoInst *obj, *method_ins, *tramp_ins;
4313 if (virtual_ && !cfg->llvm_only) {
4314 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4317 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4321 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4325 /* Inline the contents of mono_delegate_ctor */
4327 /* Set target field */
4328 /* Optimize away setting of NULL target */
4329 if (!MONO_INS_IS_PCONST_NULL (target)) {
4330 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4331 if (cfg->gen_write_barriers) {
4332 dreg = alloc_preg (cfg);
4333 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4334 emit_write_barrier (cfg, ptr, target);
4338 /* Set method field */
4339 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4343 * To avoid looking up the compiled code belonging to the target method
4344 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4345 * store it, and we fill it after the method has been compiled.
4347 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4348 MonoInst *code_slot_ins;
4351 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4353 domain = mono_domain_get ();
4354 mono_domain_lock (domain);
4355 if (!domain_jit_info (domain)->method_code_hash)
4356 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4357 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4359 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4360 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4362 mono_domain_unlock (domain);
4364 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4366 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4369 if (cfg->llvm_only) {
4370 MonoInst *args [16];
4375 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4376 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4379 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4385 if (cfg->compile_aot) {
4386 MonoDelegateClassMethodPair *del_tramp;
4388 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4389 del_tramp->klass = klass;
4390 del_tramp->method = context_used ? NULL : method;
4391 del_tramp->is_virtual = virtual_;
4392 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4395 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4397 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4398 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4401 /* Set invoke_impl field */
4403 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4405 dreg = alloc_preg (cfg);
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4409 dreg = alloc_preg (cfg);
4410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4411 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4414 dreg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4416 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4418 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4424 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4426 MonoJitICallInfo *info;
4428 /* Need to register the icall so it gets an icall wrapper */
4429 info = mono_get_array_new_va_icall (rank);
4431 cfg->flags |= MONO_CFG_HAS_VARARGS;
4433 /* mono_array_new_va () needs a vararg calling convention */
4434 cfg->exception_message = g_strdup ("array-new");
4435 cfg->disable_llvm = TRUE;
4437 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4438 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4442 * handle_constrained_gsharedvt_call:
4444 * Handle constrained calls where the receiver is a gsharedvt type.
4445 * Return the instruction representing the call. Set the cfg exception on failure.
4448 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4449 gboolean *ref_emit_widen)
4451 MonoInst *ins = NULL;
4452 gboolean emit_widen = *ref_emit_widen;
4455 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4456 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4457 * pack the arguments into an array, and do the rest of the work in in an icall.
4459 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4460 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
4461 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4462 MonoInst *args [16];
4465 * This case handles calls to
4466 * - object:ToString()/Equals()/GetHashCode(),
4467 * - System.IComparable<T>:CompareTo()
4468 * - System.IEquatable<T>:Equals ()
4469 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4473 if (mono_method_check_context_used (cmethod))
4474 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4476 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4477 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4479 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4480 if (fsig->hasthis && fsig->param_count) {
4481 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4482 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4483 ins->dreg = alloc_preg (cfg);
4484 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4485 MONO_ADD_INS (cfg->cbb, ins);
4488 if (mini_is_gsharedvt_type (fsig->params [0])) {
4489 int addr_reg, deref_arg_reg;
4491 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4492 deref_arg_reg = alloc_preg (cfg);
4493 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4494 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4496 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4497 addr_reg = ins->dreg;
4498 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4500 EMIT_NEW_ICONST (cfg, args [3], 0);
4501 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4504 EMIT_NEW_ICONST (cfg, args [3], 0);
4505 EMIT_NEW_ICONST (cfg, args [4], 0);
4507 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4510 if (mini_is_gsharedvt_type (fsig->ret)) {
4511 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4512 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4516 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4517 MONO_ADD_INS (cfg->cbb, add);
4519 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4520 MONO_ADD_INS (cfg->cbb, ins);
4521 /* ins represents the call result */
4524 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4527 *ref_emit_widen = emit_widen;
4536 mono_emit_load_got_addr (MonoCompile *cfg)
4538 MonoInst *getaddr, *dummy_use;
4540 if (!cfg->got_var || cfg->got_var_allocated)
4543 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4544 getaddr->cil_code = cfg->header->code;
4545 getaddr->dreg = cfg->got_var->dreg;
4547 /* Add it to the start of the first bblock */
4548 if (cfg->bb_entry->code) {
4549 getaddr->next = cfg->bb_entry->code;
4550 cfg->bb_entry->code = getaddr;
4553 MONO_ADD_INS (cfg->bb_entry, getaddr);
4555 cfg->got_var_allocated = TRUE;
4558 * Add a dummy use to keep the got_var alive, since real uses might
4559 * only be generated by the back ends.
4560 * Add it to end_bblock, so the variable's lifetime covers the whole
4562 * It would be better to make the usage of the got var explicit in all
4563 * cases when the backend needs it (i.e. calls, throw etc.), so this
4564 * wouldn't be needed.
4566 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4567 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4570 static int inline_limit;
4571 static gboolean inline_limit_inited;
4574 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4576 MonoMethodHeaderSummary header;
4578 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4579 MonoMethodSignature *sig = mono_method_signature (method);
4583 if (cfg->disable_inline)
4588 if (cfg->inline_depth > 10)
4591 if (!mono_method_get_header_summary (method, &header))
4594 /*runtime, icall and pinvoke are checked by summary call*/
4595 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4596 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4597 (mono_class_is_marshalbyref (method->klass)) ||
4601 /* also consider num_locals? */
4602 /* Do the size check early to avoid creating vtables */
4603 if (!inline_limit_inited) {
4604 if (g_getenv ("MONO_INLINELIMIT"))
4605 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4607 inline_limit = INLINE_LENGTH_LIMIT;
4608 inline_limit_inited = TRUE;
4610 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4614 * if we can initialize the class of the method right away, we do,
4615 * otherwise we don't allow inlining if the class needs initialization,
4616 * since it would mean inserting a call to mono_runtime_class_init()
4617 * inside the inlined code
4619 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4622 if (!(cfg->opt & MONO_OPT_SHARED)) {
4623 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4624 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4625 if (method->klass->has_cctor) {
4626 vtable = mono_class_vtable (cfg->domain, method->klass);
4629 if (!cfg->compile_aot) {
4631 if (!mono_runtime_class_init_full (vtable, &error)) {
4632 mono_error_cleanup (&error);
4637 } else if (mono_class_is_before_field_init (method->klass)) {
4638 if (cfg->run_cctors && method->klass->has_cctor) {
4639 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4640 if (!method->klass->runtime_info)
4641 /* No vtable created yet */
4643 vtable = mono_class_vtable (cfg->domain, method->klass);
4646 /* This makes so that inline cannot trigger */
4647 /* .cctors: too many apps depend on them */
4648 /* running with a specific order... */
4649 if (! vtable->initialized)
4652 if (!mono_runtime_class_init_full (vtable, &error)) {
4653 mono_error_cleanup (&error);
4657 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4658 if (!method->klass->runtime_info)
4659 /* No vtable created yet */
4661 vtable = mono_class_vtable (cfg->domain, method->klass);
4664 if (!vtable->initialized)
4669 * If we're compiling for shared code
4670 * the cctor will need to be run at aot method load time, for example,
4671 * or at the end of the compilation of the inlining method.
4673 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4677 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4678 if (mono_arch_is_soft_float ()) {
4680 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4682 for (i = 0; i < sig->param_count; ++i)
4683 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4688 if (g_list_find (cfg->dont_inline, method))
4695 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4697 if (!cfg->compile_aot) {
4699 if (vtable->initialized)
4703 if (mono_class_is_before_field_init (klass)) {
4704 if (cfg->method == method)
4708 if (!mono_class_needs_cctor_run (klass, method))
4711 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4712 /* The initialization is already done before the method is called */
4719 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4723 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4726 if (mini_is_gsharedvt_variable_klass (klass)) {
4729 mono_class_init (klass);
4730 size = mono_class_array_element_size (klass);
4733 mult_reg = alloc_preg (cfg);
4734 array_reg = arr->dreg;
4735 index_reg = index->dreg;
4737 #if SIZEOF_REGISTER == 8
4738 /* The array reg is 64 bits but the index reg is only 32 */
4739 if (COMPILE_LLVM (cfg)) {
4741 index2_reg = index_reg;
4743 index2_reg = alloc_preg (cfg);
4744 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4747 if (index->type == STACK_I8) {
4748 index2_reg = alloc_preg (cfg);
4749 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4751 index2_reg = index_reg;
4756 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4758 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4759 if (size == 1 || size == 2 || size == 4 || size == 8) {
4760 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4762 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4763 ins->klass = mono_class_get_element_class (klass);
4764 ins->type = STACK_MP;
4770 add_reg = alloc_ireg_mp (cfg);
4773 MonoInst *rgctx_ins;
4776 g_assert (cfg->gshared);
4777 context_used = mini_class_check_context_used (cfg, klass);
4778 g_assert (context_used);
4779 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4780 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4784 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4785 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4786 ins->klass = mono_class_get_element_class (klass);
4787 ins->type = STACK_MP;
4788 MONO_ADD_INS (cfg->cbb, ins);
4794 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4796 int bounds_reg = alloc_preg (cfg);
4797 int add_reg = alloc_ireg_mp (cfg);
4798 int mult_reg = alloc_preg (cfg);
4799 int mult2_reg = alloc_preg (cfg);
4800 int low1_reg = alloc_preg (cfg);
4801 int low2_reg = alloc_preg (cfg);
4802 int high1_reg = alloc_preg (cfg);
4803 int high2_reg = alloc_preg (cfg);
4804 int realidx1_reg = alloc_preg (cfg);
4805 int realidx2_reg = alloc_preg (cfg);
4806 int sum_reg = alloc_preg (cfg);
4807 int index1, index2, tmpreg;
4811 mono_class_init (klass);
4812 size = mono_class_array_element_size (klass);
4814 index1 = index_ins1->dreg;
4815 index2 = index_ins2->dreg;
4817 #if SIZEOF_REGISTER == 8
4818 /* The array reg is 64 bits but the index reg is only 32 */
4819 if (COMPILE_LLVM (cfg)) {
4822 tmpreg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4825 tmpreg = alloc_preg (cfg);
4826 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4830 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4834 /* range checking */
4835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4836 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4839 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4840 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4841 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4842 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4844 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4847 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4848 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4849 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4850 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4851 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4852 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4854 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4855 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4856 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4857 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4858 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4860 ins->type = STACK_MP;
4862 MONO_ADD_INS (cfg->cbb, ins);
4868 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4872 MonoMethod *addr_method;
4874 MonoClass *eclass = cmethod->klass->element_class;
4876 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4879 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4881 /* emit_ldelema_2 depends on OP_LMUL */
4882 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4883 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4886 if (mini_is_gsharedvt_variable_klass (eclass))
4889 element_size = mono_class_array_element_size (eclass);
4890 addr_method = mono_marshal_get_array_address (rank, element_size);
4891 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4896 static MonoBreakPolicy
4897 always_insert_breakpoint (MonoMethod *method)
4899 return MONO_BREAK_POLICY_ALWAYS;
4902 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4905 * mono_set_break_policy:
4906 * policy_callback: the new callback function
4908 * Allow embedders to decide wherther to actually obey breakpoint instructions
4909 * (both break IL instructions and Debugger.Break () method calls), for example
4910 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4911 * untrusted or semi-trusted code.
4913 * @policy_callback will be called every time a break point instruction needs to
4914 * be inserted with the method argument being the method that calls Debugger.Break()
4915 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4916 * if it wants the breakpoint to not be effective in the given method.
4917 * #MONO_BREAK_POLICY_ALWAYS is the default.
4920 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4922 if (policy_callback)
4923 break_policy_func = policy_callback;
4925 break_policy_func = always_insert_breakpoint;
4929 should_insert_brekpoint (MonoMethod *method) {
4930 switch (break_policy_func (method)) {
4931 case MONO_BREAK_POLICY_ALWAYS:
4933 case MONO_BREAK_POLICY_NEVER:
4935 case MONO_BREAK_POLICY_ON_DBG:
4936 g_warning ("mdb no longer supported");
4939 g_warning ("Incorrect value returned from break policy callback");
4944 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4946 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4948 MonoInst *addr, *store, *load;
4949 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4951 /* the bounds check is already done by the callers */
4952 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4955 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4956 if (mini_type_is_reference (&eklass->byval_arg))
4957 emit_write_barrier (cfg, addr, load);
4959 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4960 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4967 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4969 return mini_type_is_reference (&klass->byval_arg);
4973 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4975 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4976 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4977 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4978 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4979 MonoInst *iargs [3];
4982 mono_class_setup_vtable (obj_array);
4983 g_assert (helper->slot);
4985 if (sp [0]->type != STACK_OBJ)
4987 if (sp [2]->type != STACK_OBJ)
4994 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4998 if (mini_is_gsharedvt_variable_klass (klass)) {
5001 // FIXME-VT: OP_ICONST optimization
5002 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5003 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5004 ins->opcode = OP_STOREV_MEMBASE;
5005 } else if (sp [1]->opcode == OP_ICONST) {
5006 int array_reg = sp [0]->dreg;
5007 int index_reg = sp [1]->dreg;
5008 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5010 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5011 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5014 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5015 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5017 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5018 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5019 if (generic_class_is_reference_type (cfg, klass))
5020 emit_write_barrier (cfg, addr, sp [2]);
5027 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5032 eklass = mono_class_from_mono_type (fsig->params [2]);
5034 eklass = mono_class_from_mono_type (fsig->ret);
5037 return emit_array_store (cfg, eklass, args, FALSE);
5039 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5040 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5046 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5049 int param_size, return_size;
5051 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5052 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5054 if (cfg->verbose_level > 3)
5055 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5057 //Don't allow mixing reference types with value types
5058 if (param_klass->valuetype != return_klass->valuetype) {
5059 if (cfg->verbose_level > 3)
5060 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5064 if (!param_klass->valuetype) {
5065 if (cfg->verbose_level > 3)
5066 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5071 if (param_klass->has_references || return_klass->has_references)
5074 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5075 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5076 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5077 if (cfg->verbose_level > 3)
5078 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5082 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5083 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5084 if (cfg->verbose_level > 3)
5085 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5089 param_size = mono_class_value_size (param_klass, &align);
5090 return_size = mono_class_value_size (return_klass, &align);
5092 //We can do it if sizes match
5093 if (param_size == return_size) {
5094 if (cfg->verbose_level > 3)
5095 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5099 //No simple way to handle struct if sizes don't match
5100 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5101 if (cfg->verbose_level > 3)
5102 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5107 * Same reg size category.
5108 * A quick note on why we don't require widening here.
5109 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5111 * Since the source value comes from a function argument, the JIT will already have
5112 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5114 if (param_size <= 4 && return_size <= 4) {
5115 if (cfg->verbose_level > 3)
5116 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5124 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5126 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5127 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5129 if (mini_is_gsharedvt_variable_type (fsig->ret))
5132 //Valuetypes that are semantically equivalent or numbers than can be widened to
5133 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5136 //Arrays of valuetypes that are semantically equivalent
5137 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5144 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5146 #ifdef MONO_ARCH_SIMD_INTRINSICS
5147 MonoInst *ins = NULL;
5149 if (cfg->opt & MONO_OPT_SIMD) {
5150 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5156 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5160 emit_memory_barrier (MonoCompile *cfg, int kind)
5162 MonoInst *ins = NULL;
5163 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5164 MONO_ADD_INS (cfg->cbb, ins);
5165 ins->backend.memory_barrier_kind = kind;
5171 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5173 MonoInst *ins = NULL;
5176 /* The LLVM backend supports these intrinsics */
5177 if (cmethod->klass == mono_defaults.math_class) {
5178 if (strcmp (cmethod->name, "Sin") == 0) {
5180 } else if (strcmp (cmethod->name, "Cos") == 0) {
5182 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5184 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5188 if (opcode && fsig->param_count == 1) {
5189 MONO_INST_NEW (cfg, ins, opcode);
5190 ins->type = STACK_R8;
5191 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5192 ins->sreg1 = args [0]->dreg;
5193 MONO_ADD_INS (cfg->cbb, ins);
5197 if (cfg->opt & MONO_OPT_CMOV) {
5198 if (strcmp (cmethod->name, "Min") == 0) {
5199 if (fsig->params [0]->type == MONO_TYPE_I4)
5201 if (fsig->params [0]->type == MONO_TYPE_U4)
5202 opcode = OP_IMIN_UN;
5203 else if (fsig->params [0]->type == MONO_TYPE_I8)
5205 else if (fsig->params [0]->type == MONO_TYPE_U8)
5206 opcode = OP_LMIN_UN;
5207 } else if (strcmp (cmethod->name, "Max") == 0) {
5208 if (fsig->params [0]->type == MONO_TYPE_I4)
5210 if (fsig->params [0]->type == MONO_TYPE_U4)
5211 opcode = OP_IMAX_UN;
5212 else if (fsig->params [0]->type == MONO_TYPE_I8)
5214 else if (fsig->params [0]->type == MONO_TYPE_U8)
5215 opcode = OP_LMAX_UN;
5219 if (opcode && fsig->param_count == 2) {
5220 MONO_INST_NEW (cfg, ins, opcode);
5221 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5222 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5223 ins->sreg1 = args [0]->dreg;
5224 ins->sreg2 = args [1]->dreg;
5225 MONO_ADD_INS (cfg->cbb, ins);
5233 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5235 if (cmethod->klass == mono_defaults.array_class) {
5236 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5237 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5238 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5239 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5240 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5241 return emit_array_unsafe_mov (cfg, fsig, args);
5248 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5250 MonoInst *ins = NULL;
5252 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5254 if (cmethod->klass == mono_defaults.string_class) {
5255 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5256 int dreg = alloc_ireg (cfg);
5257 int index_reg = alloc_preg (cfg);
5258 int add_reg = alloc_preg (cfg);
5260 #if SIZEOF_REGISTER == 8
5261 if (COMPILE_LLVM (cfg)) {
5262 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5264 /* The array reg is 64 bits but the index reg is only 32 */
5265 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5268 index_reg = args [1]->dreg;
5270 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5272 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5273 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5274 add_reg = ins->dreg;
5275 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5278 int mult_reg = alloc_preg (cfg);
5279 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5280 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5281 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5282 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5284 type_from_op (cfg, ins, NULL, NULL);
5286 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5287 int dreg = alloc_ireg (cfg);
5288 /* Decompose later to allow more optimizations */
5289 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5290 ins->type = STACK_I4;
5291 ins->flags |= MONO_INST_FAULT;
5292 cfg->cbb->has_array_access = TRUE;
5293 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5298 } else if (cmethod->klass == mono_defaults.object_class) {
5299 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5300 int dreg = alloc_ireg_ref (cfg);
5301 int vt_reg = alloc_preg (cfg);
5302 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5303 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5304 type_from_op (cfg, ins, NULL, NULL);
5307 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5308 int dreg = alloc_ireg (cfg);
5309 int t1 = alloc_ireg (cfg);
5311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5312 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5313 ins->type = STACK_I4;
5316 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5317 MONO_INST_NEW (cfg, ins, OP_NOP);
5318 MONO_ADD_INS (cfg->cbb, ins);
5322 } else if (cmethod->klass == mono_defaults.array_class) {
5323 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5324 return emit_array_generic_access (cfg, fsig, args, FALSE);
5325 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5326 return emit_array_generic_access (cfg, fsig, args, TRUE);
5328 #ifndef MONO_BIG_ARRAYS
5330 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5333 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5334 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5335 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5336 int dreg = alloc_ireg (cfg);
5337 int bounds_reg = alloc_ireg_mp (cfg);
5338 MonoBasicBlock *end_bb, *szarray_bb;
5339 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5341 NEW_BBLOCK (cfg, end_bb);
5342 NEW_BBLOCK (cfg, szarray_bb);
5344 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5345 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5348 /* Non-szarray case */
5350 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5351 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5354 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5356 MONO_START_BB (cfg, szarray_bb);
5359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5360 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5362 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5363 MONO_START_BB (cfg, end_bb);
5365 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5366 ins->type = STACK_I4;
5372 if (cmethod->name [0] != 'g')
5375 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5376 int dreg = alloc_ireg (cfg);
5377 int vtable_reg = alloc_preg (cfg);
5378 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5379 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5380 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5381 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5382 type_from_op (cfg, ins, NULL, NULL);
5385 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5386 int dreg = alloc_ireg (cfg);
5388 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5389 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5390 type_from_op (cfg, ins, NULL, NULL);
5395 } else if (cmethod->klass == runtime_helpers_class) {
5396 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5397 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5401 } else if (cmethod->klass == mono_defaults.monitor_class) {
5402 gboolean is_enter = FALSE;
5403 gboolean is_v4 = FALSE;
5405 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5409 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5414 * To make async stack traces work, icalls which can block should have a wrapper.
5415 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5417 MonoBasicBlock *end_bb;
5419 NEW_BBLOCK (cfg, end_bb);
5421 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5424 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5425 MONO_START_BB (cfg, end_bb);
5428 } else if (cmethod->klass == mono_defaults.thread_class) {
5429 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5430 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5431 MONO_ADD_INS (cfg->cbb, ins);
5433 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5434 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5435 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5437 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5439 if (fsig->params [0]->type == MONO_TYPE_I1)
5440 opcode = OP_LOADI1_MEMBASE;
5441 else if (fsig->params [0]->type == MONO_TYPE_U1)
5442 opcode = OP_LOADU1_MEMBASE;
5443 else if (fsig->params [0]->type == MONO_TYPE_I2)
5444 opcode = OP_LOADI2_MEMBASE;
5445 else if (fsig->params [0]->type == MONO_TYPE_U2)
5446 opcode = OP_LOADU2_MEMBASE;
5447 else if (fsig->params [0]->type == MONO_TYPE_I4)
5448 opcode = OP_LOADI4_MEMBASE;
5449 else if (fsig->params [0]->type == MONO_TYPE_U4)
5450 opcode = OP_LOADU4_MEMBASE;
5451 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5452 opcode = OP_LOADI8_MEMBASE;
5453 else if (fsig->params [0]->type == MONO_TYPE_R4)
5454 opcode = OP_LOADR4_MEMBASE;
5455 else if (fsig->params [0]->type == MONO_TYPE_R8)
5456 opcode = OP_LOADR8_MEMBASE;
5457 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5458 opcode = OP_LOAD_MEMBASE;
5461 MONO_INST_NEW (cfg, ins, opcode);
5462 ins->inst_basereg = args [0]->dreg;
5463 ins->inst_offset = 0;
5464 MONO_ADD_INS (cfg->cbb, ins);
5466 switch (fsig->params [0]->type) {
5473 ins->dreg = mono_alloc_ireg (cfg);
5474 ins->type = STACK_I4;
5478 ins->dreg = mono_alloc_lreg (cfg);
5479 ins->type = STACK_I8;
5483 ins->dreg = mono_alloc_ireg (cfg);
5484 #if SIZEOF_REGISTER == 8
5485 ins->type = STACK_I8;
5487 ins->type = STACK_I4;
5492 ins->dreg = mono_alloc_freg (cfg);
5493 ins->type = STACK_R8;
5496 g_assert (mini_type_is_reference (fsig->params [0]));
5497 ins->dreg = mono_alloc_ireg_ref (cfg);
5498 ins->type = STACK_OBJ;
5502 if (opcode == OP_LOADI8_MEMBASE)
5503 ins = mono_decompose_opcode (cfg, ins);
5505 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5509 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5511 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5513 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5514 opcode = OP_STOREI1_MEMBASE_REG;
5515 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5516 opcode = OP_STOREI2_MEMBASE_REG;
5517 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5518 opcode = OP_STOREI4_MEMBASE_REG;
5519 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5520 opcode = OP_STOREI8_MEMBASE_REG;
5521 else if (fsig->params [0]->type == MONO_TYPE_R4)
5522 opcode = OP_STORER4_MEMBASE_REG;
5523 else if (fsig->params [0]->type == MONO_TYPE_R8)
5524 opcode = OP_STORER8_MEMBASE_REG;
5525 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5526 opcode = OP_STORE_MEMBASE_REG;
5529 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5531 MONO_INST_NEW (cfg, ins, opcode);
5532 ins->sreg1 = args [1]->dreg;
5533 ins->inst_destbasereg = args [0]->dreg;
5534 ins->inst_offset = 0;
5535 MONO_ADD_INS (cfg->cbb, ins);
5537 if (opcode == OP_STOREI8_MEMBASE_REG)
5538 ins = mono_decompose_opcode (cfg, ins);
5543 } else if (cmethod->klass->image == mono_defaults.corlib &&
5544 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5545 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5548 #if SIZEOF_REGISTER == 8
5549 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5550 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5551 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5552 ins->dreg = mono_alloc_preg (cfg);
5553 ins->sreg1 = args [0]->dreg;
5554 ins->type = STACK_I8;
5555 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5556 MONO_ADD_INS (cfg->cbb, ins);
5560 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5562 /* 64 bit reads are already atomic */
5563 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5564 load_ins->dreg = mono_alloc_preg (cfg);
5565 load_ins->inst_basereg = args [0]->dreg;
5566 load_ins->inst_offset = 0;
5567 load_ins->type = STACK_I8;
5568 MONO_ADD_INS (cfg->cbb, load_ins);
5570 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5577 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5578 MonoInst *ins_iconst;
5581 if (fsig->params [0]->type == MONO_TYPE_I4) {
5582 opcode = OP_ATOMIC_ADD_I4;
5583 cfg->has_atomic_add_i4 = TRUE;
5585 #if SIZEOF_REGISTER == 8
5586 else if (fsig->params [0]->type == MONO_TYPE_I8)
5587 opcode = OP_ATOMIC_ADD_I8;
5590 if (!mono_arch_opcode_supported (opcode))
5592 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5593 ins_iconst->inst_c0 = 1;
5594 ins_iconst->dreg = mono_alloc_ireg (cfg);
5595 MONO_ADD_INS (cfg->cbb, ins_iconst);
5597 MONO_INST_NEW (cfg, ins, opcode);
5598 ins->dreg = mono_alloc_ireg (cfg);
5599 ins->inst_basereg = args [0]->dreg;
5600 ins->inst_offset = 0;
5601 ins->sreg2 = ins_iconst->dreg;
5602 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5603 MONO_ADD_INS (cfg->cbb, ins);
5605 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5606 MonoInst *ins_iconst;
5609 if (fsig->params [0]->type == MONO_TYPE_I4) {
5610 opcode = OP_ATOMIC_ADD_I4;
5611 cfg->has_atomic_add_i4 = TRUE;
5613 #if SIZEOF_REGISTER == 8
5614 else if (fsig->params [0]->type == MONO_TYPE_I8)
5615 opcode = OP_ATOMIC_ADD_I8;
5618 if (!mono_arch_opcode_supported (opcode))
5620 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5621 ins_iconst->inst_c0 = -1;
5622 ins_iconst->dreg = mono_alloc_ireg (cfg);
5623 MONO_ADD_INS (cfg->cbb, ins_iconst);
5625 MONO_INST_NEW (cfg, ins, opcode);
5626 ins->dreg = mono_alloc_ireg (cfg);
5627 ins->inst_basereg = args [0]->dreg;
5628 ins->inst_offset = 0;
5629 ins->sreg2 = ins_iconst->dreg;
5630 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5631 MONO_ADD_INS (cfg->cbb, ins);
5633 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5636 if (fsig->params [0]->type == MONO_TYPE_I4) {
5637 opcode = OP_ATOMIC_ADD_I4;
5638 cfg->has_atomic_add_i4 = TRUE;
5640 #if SIZEOF_REGISTER == 8
5641 else if (fsig->params [0]->type == MONO_TYPE_I8)
5642 opcode = OP_ATOMIC_ADD_I8;
5645 if (!mono_arch_opcode_supported (opcode))
5647 MONO_INST_NEW (cfg, ins, opcode);
5648 ins->dreg = mono_alloc_ireg (cfg);
5649 ins->inst_basereg = args [0]->dreg;
5650 ins->inst_offset = 0;
5651 ins->sreg2 = args [1]->dreg;
5652 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5653 MONO_ADD_INS (cfg->cbb, ins);
5656 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5657 MonoInst *f2i = NULL, *i2f;
5658 guint32 opcode, f2i_opcode, i2f_opcode;
5659 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5660 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5662 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5663 fsig->params [0]->type == MONO_TYPE_R4) {
5664 opcode = OP_ATOMIC_EXCHANGE_I4;
5665 f2i_opcode = OP_MOVE_F_TO_I4;
5666 i2f_opcode = OP_MOVE_I4_TO_F;
5667 cfg->has_atomic_exchange_i4 = TRUE;
5669 #if SIZEOF_REGISTER == 8
5671 fsig->params [0]->type == MONO_TYPE_I8 ||
5672 fsig->params [0]->type == MONO_TYPE_R8 ||
5673 fsig->params [0]->type == MONO_TYPE_I) {
5674 opcode = OP_ATOMIC_EXCHANGE_I8;
5675 f2i_opcode = OP_MOVE_F_TO_I8;
5676 i2f_opcode = OP_MOVE_I8_TO_F;
5679 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5680 opcode = OP_ATOMIC_EXCHANGE_I4;
5681 cfg->has_atomic_exchange_i4 = TRUE;
5687 if (!mono_arch_opcode_supported (opcode))
5691 /* TODO: Decompose these opcodes instead of bailing here. */
5692 if (COMPILE_SOFT_FLOAT (cfg))
5695 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5696 f2i->dreg = mono_alloc_ireg (cfg);
5697 f2i->sreg1 = args [1]->dreg;
5698 if (f2i_opcode == OP_MOVE_F_TO_I4)
5699 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5700 MONO_ADD_INS (cfg->cbb, f2i);
5703 MONO_INST_NEW (cfg, ins, opcode);
5704 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5705 ins->inst_basereg = args [0]->dreg;
5706 ins->inst_offset = 0;
5707 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5708 MONO_ADD_INS (cfg->cbb, ins);
5710 switch (fsig->params [0]->type) {
5712 ins->type = STACK_I4;
5715 ins->type = STACK_I8;
5718 #if SIZEOF_REGISTER == 8
5719 ins->type = STACK_I8;
5721 ins->type = STACK_I4;
5726 ins->type = STACK_R8;
5729 g_assert (mini_type_is_reference (fsig->params [0]));
5730 ins->type = STACK_OBJ;
5735 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5736 i2f->dreg = mono_alloc_freg (cfg);
5737 i2f->sreg1 = ins->dreg;
5738 i2f->type = STACK_R8;
5739 if (i2f_opcode == OP_MOVE_I4_TO_F)
5740 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5741 MONO_ADD_INS (cfg->cbb, i2f);
5746 if (cfg->gen_write_barriers && is_ref)
5747 emit_write_barrier (cfg, args [0], args [1]);
5749 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5750 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5751 guint32 opcode, f2i_opcode, i2f_opcode;
5752 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5753 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5755 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5756 fsig->params [1]->type == MONO_TYPE_R4) {
5757 opcode = OP_ATOMIC_CAS_I4;
5758 f2i_opcode = OP_MOVE_F_TO_I4;
5759 i2f_opcode = OP_MOVE_I4_TO_F;
5760 cfg->has_atomic_cas_i4 = TRUE;
5762 #if SIZEOF_REGISTER == 8
5764 fsig->params [1]->type == MONO_TYPE_I8 ||
5765 fsig->params [1]->type == MONO_TYPE_R8 ||
5766 fsig->params [1]->type == MONO_TYPE_I) {
5767 opcode = OP_ATOMIC_CAS_I8;
5768 f2i_opcode = OP_MOVE_F_TO_I8;
5769 i2f_opcode = OP_MOVE_I8_TO_F;
5772 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5773 opcode = OP_ATOMIC_CAS_I4;
5774 cfg->has_atomic_cas_i4 = TRUE;
5780 if (!mono_arch_opcode_supported (opcode))
5784 /* TODO: Decompose these opcodes instead of bailing here. */
5785 if (COMPILE_SOFT_FLOAT (cfg))
5788 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5789 f2i_new->dreg = mono_alloc_ireg (cfg);
5790 f2i_new->sreg1 = args [1]->dreg;
5791 if (f2i_opcode == OP_MOVE_F_TO_I4)
5792 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5793 MONO_ADD_INS (cfg->cbb, f2i_new);
5795 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5796 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5797 f2i_cmp->sreg1 = args [2]->dreg;
5798 if (f2i_opcode == OP_MOVE_F_TO_I4)
5799 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5800 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5803 MONO_INST_NEW (cfg, ins, opcode);
5804 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5805 ins->sreg1 = args [0]->dreg;
5806 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5807 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5808 MONO_ADD_INS (cfg->cbb, ins);
5810 switch (fsig->params [1]->type) {
5812 ins->type = STACK_I4;
5815 ins->type = STACK_I8;
5818 #if SIZEOF_REGISTER == 8
5819 ins->type = STACK_I8;
5821 ins->type = STACK_I4;
5825 ins->type = cfg->r4_stack_type;
5828 ins->type = STACK_R8;
5831 g_assert (mini_type_is_reference (fsig->params [1]));
5832 ins->type = STACK_OBJ;
5837 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5838 i2f->dreg = mono_alloc_freg (cfg);
5839 i2f->sreg1 = ins->dreg;
5840 i2f->type = STACK_R8;
5841 if (i2f_opcode == OP_MOVE_I4_TO_F)
5842 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5843 MONO_ADD_INS (cfg->cbb, i2f);
5848 if (cfg->gen_write_barriers && is_ref)
5849 emit_write_barrier (cfg, args [0], args [1]);
5851 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5852 fsig->params [1]->type == MONO_TYPE_I4) {
5853 MonoInst *cmp, *ceq;
5855 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5858 /* int32 r = CAS (location, value, comparand); */
5859 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5860 ins->dreg = alloc_ireg (cfg);
5861 ins->sreg1 = args [0]->dreg;
5862 ins->sreg2 = args [1]->dreg;
5863 ins->sreg3 = args [2]->dreg;
5864 ins->type = STACK_I4;
5865 MONO_ADD_INS (cfg->cbb, ins);
5867 /* bool result = r == comparand; */
5868 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5869 cmp->sreg1 = ins->dreg;
5870 cmp->sreg2 = args [2]->dreg;
5871 cmp->type = STACK_I4;
5872 MONO_ADD_INS (cfg->cbb, cmp);
5874 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5875 ceq->dreg = alloc_ireg (cfg);
5876 ceq->type = STACK_I4;
5877 MONO_ADD_INS (cfg->cbb, ceq);
5879 /* *success = result; */
5880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5882 cfg->has_atomic_cas_i4 = TRUE;
5884 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5885 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5889 } else if (cmethod->klass->image == mono_defaults.corlib &&
5890 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5891 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5894 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5896 MonoType *t = fsig->params [0];
5898 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5900 g_assert (t->byref);
5901 /* t is a byref type, so the reference check is more complicated */
5902 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5903 if (t->type == MONO_TYPE_I1)
5904 opcode = OP_ATOMIC_LOAD_I1;
5905 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5906 opcode = OP_ATOMIC_LOAD_U1;
5907 else if (t->type == MONO_TYPE_I2)
5908 opcode = OP_ATOMIC_LOAD_I2;
5909 else if (t->type == MONO_TYPE_U2)
5910 opcode = OP_ATOMIC_LOAD_U2;
5911 else if (t->type == MONO_TYPE_I4)
5912 opcode = OP_ATOMIC_LOAD_I4;
5913 else if (t->type == MONO_TYPE_U4)
5914 opcode = OP_ATOMIC_LOAD_U4;
5915 else if (t->type == MONO_TYPE_R4)
5916 opcode = OP_ATOMIC_LOAD_R4;
5917 else if (t->type == MONO_TYPE_R8)
5918 opcode = OP_ATOMIC_LOAD_R8;
5919 #if SIZEOF_REGISTER == 8
5920 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5921 opcode = OP_ATOMIC_LOAD_I8;
5922 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5923 opcode = OP_ATOMIC_LOAD_U8;
5925 else if (t->type == MONO_TYPE_I)
5926 opcode = OP_ATOMIC_LOAD_I4;
5927 else if (is_ref || t->type == MONO_TYPE_U)
5928 opcode = OP_ATOMIC_LOAD_U4;
5932 if (!mono_arch_opcode_supported (opcode))
5935 MONO_INST_NEW (cfg, ins, opcode);
5936 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5937 ins->sreg1 = args [0]->dreg;
5938 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5939 MONO_ADD_INS (cfg->cbb, ins);
5942 case MONO_TYPE_BOOLEAN:
5949 ins->type = STACK_I4;
5953 ins->type = STACK_I8;
5957 #if SIZEOF_REGISTER == 8
5958 ins->type = STACK_I8;
5960 ins->type = STACK_I4;
5964 ins->type = cfg->r4_stack_type;
5967 ins->type = STACK_R8;
5971 ins->type = STACK_OBJ;
5977 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5979 MonoType *t = fsig->params [0];
5982 g_assert (t->byref);
5983 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5984 if (t->type == MONO_TYPE_I1)
5985 opcode = OP_ATOMIC_STORE_I1;
5986 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5987 opcode = OP_ATOMIC_STORE_U1;
5988 else if (t->type == MONO_TYPE_I2)
5989 opcode = OP_ATOMIC_STORE_I2;
5990 else if (t->type == MONO_TYPE_U2)
5991 opcode = OP_ATOMIC_STORE_U2;
5992 else if (t->type == MONO_TYPE_I4)
5993 opcode = OP_ATOMIC_STORE_I4;
5994 else if (t->type == MONO_TYPE_U4)
5995 opcode = OP_ATOMIC_STORE_U4;
5996 else if (t->type == MONO_TYPE_R4)
5997 opcode = OP_ATOMIC_STORE_R4;
5998 else if (t->type == MONO_TYPE_R8)
5999 opcode = OP_ATOMIC_STORE_R8;
6000 #if SIZEOF_REGISTER == 8
6001 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6002 opcode = OP_ATOMIC_STORE_I8;
6003 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6004 opcode = OP_ATOMIC_STORE_U8;
6006 else if (t->type == MONO_TYPE_I)
6007 opcode = OP_ATOMIC_STORE_I4;
6008 else if (is_ref || t->type == MONO_TYPE_U)
6009 opcode = OP_ATOMIC_STORE_U4;
6013 if (!mono_arch_opcode_supported (opcode))
6016 MONO_INST_NEW (cfg, ins, opcode);
6017 ins->dreg = args [0]->dreg;
6018 ins->sreg1 = args [1]->dreg;
6019 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6020 MONO_ADD_INS (cfg->cbb, ins);
6022 if (cfg->gen_write_barriers && is_ref)
6023 emit_write_barrier (cfg, args [0], args [1]);
6029 } else if (cmethod->klass->image == mono_defaults.corlib &&
6030 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6031 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6032 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6033 if (should_insert_brekpoint (cfg->method)) {
6034 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6036 MONO_INST_NEW (cfg, ins, OP_NOP);
6037 MONO_ADD_INS (cfg->cbb, ins);
6041 } else if (cmethod->klass->image == mono_defaults.corlib &&
6042 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6043 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6044 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6046 EMIT_NEW_ICONST (cfg, ins, 1);
6048 EMIT_NEW_ICONST (cfg, ins, 0);
6051 } else if (cmethod->klass->image == mono_defaults.corlib &&
6052 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6053 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6054 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6055 /* No stack walks are currently available, so implement this as an intrinsic */
6056 MonoInst *assembly_ins;
6058 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6059 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6062 } else if (cmethod->klass->image == mono_defaults.corlib &&
6063 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6064 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6065 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6066 /* No stack walks are currently available, so implement this as an intrinsic */
6067 MonoInst *method_ins;
6068 MonoMethod *declaring = cfg->method;
6070 /* This returns the declaring generic method */
6071 if (declaring->is_inflated)
6072 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6073 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6074 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6075 cfg->no_inline = TRUE;
6076 if (cfg->method != cfg->current_method)
6077 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6080 } else if (cmethod->klass == mono_defaults.math_class) {
6082 * There is general branchless code for Min/Max, but it does not work for
6084 * http://everything2.com/?node_id=1051618
6086 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6087 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6088 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6089 ins->dreg = alloc_preg (cfg);
6090 ins->type = STACK_I4;
6091 MONO_ADD_INS (cfg->cbb, ins);
6093 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6094 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6095 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6096 !strcmp (cmethod->klass->name, "Selector")) ||
6097 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6098 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6099 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6100 !strcmp (cmethod->klass->name, "Selector"))
6102 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6103 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6104 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6107 MonoJumpInfoToken *ji;
6110 if (args [0]->opcode == OP_GOT_ENTRY) {
6111 pi = (MonoInst *)args [0]->inst_p1;
6112 g_assert (pi->opcode == OP_PATCH_INFO);
6113 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6114 ji = (MonoJumpInfoToken *)pi->inst_p0;
6116 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6117 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6120 NULLIFY_INS (args [0]);
6122 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6123 return_val_if_nok (&cfg->error, NULL);
6125 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6126 ins->dreg = mono_alloc_ireg (cfg);
6129 MONO_ADD_INS (cfg->cbb, ins);
6134 #ifdef MONO_ARCH_SIMD_INTRINSICS
6135 if (cfg->opt & MONO_OPT_SIMD) {
6136 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6142 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6146 if (COMPILE_LLVM (cfg)) {
6147 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6152 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6156 * This entry point could be used later for arbitrary method
6159 inline static MonoInst*
6160 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6161 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6163 if (method->klass == mono_defaults.string_class) {
6164 /* managed string allocation support */
6165 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6166 MonoInst *iargs [2];
6167 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6168 MonoMethod *managed_alloc = NULL;
6170 g_assert (vtable); /*Should not fail since it System.String*/
6171 #ifndef MONO_CROSS_COMPILE
6172 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6176 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6177 iargs [1] = args [0];
6178 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6185 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6187 MonoInst *store, *temp;
6190 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6191 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6194 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6195 * would be different than the MonoInst's used to represent arguments, and
6196 * the ldelema implementation can't deal with that.
6197 * Solution: When ldelema is used on an inline argument, create a var for
6198 * it, emit ldelema on that var, and emit the saving code below in
6199 * inline_method () if needed.
6201 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6202 cfg->args [i] = temp;
6203 /* This uses cfg->args [i] which is set by the preceeding line */
6204 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6205 store->cil_code = sp [0]->cil_code;
6210 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6211 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6213 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6215 check_inline_called_method_name_limit (MonoMethod *called_method)
6218 static const char *limit = NULL;
6220 if (limit == NULL) {
6221 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6223 if (limit_string != NULL)
6224 limit = limit_string;
6229 if (limit [0] != '\0') {
6230 char *called_method_name = mono_method_full_name (called_method, TRUE);
6232 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6233 g_free (called_method_name);
6235 //return (strncmp_result <= 0);
6236 return (strncmp_result == 0);
6243 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6245 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6248 static const char *limit = NULL;
6250 if (limit == NULL) {
6251 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6252 if (limit_string != NULL) {
6253 limit = limit_string;
6259 if (limit [0] != '\0') {
6260 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6262 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6263 g_free (caller_method_name);
6265 //return (strncmp_result <= 0);
6266 return (strncmp_result == 0);
6274 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6276 static double r8_0 = 0.0;
6277 static float r4_0 = 0.0;
6281 rtype = mini_get_underlying_type (rtype);
6285 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6286 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6287 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6288 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6289 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6290 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6291 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6292 ins->type = STACK_R4;
6293 ins->inst_p0 = (void*)&r4_0;
6295 MONO_ADD_INS (cfg->cbb, ins);
6296 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6297 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6298 ins->type = STACK_R8;
6299 ins->inst_p0 = (void*)&r8_0;
6301 MONO_ADD_INS (cfg->cbb, ins);
6302 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6303 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6304 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6305 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6306 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6308 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6313 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6317 rtype = mini_get_underlying_type (rtype);
6321 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6322 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6323 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6324 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6325 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6326 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6327 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6328 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6329 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6330 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6331 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6332 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6333 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6334 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6336 emit_init_rvar (cfg, dreg, rtype);
6340 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6342 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6344 MonoInst *var = cfg->locals [local];
6345 if (COMPILE_SOFT_FLOAT (cfg)) {
6347 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6348 emit_init_rvar (cfg, reg, type);
6349 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6352 emit_init_rvar (cfg, var->dreg, type);
6354 emit_dummy_init_rvar (cfg, var->dreg, type);
6359 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6361 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6367 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6370 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6371 guchar *ip, guint real_offset, gboolean inline_always)
6374 MonoInst *ins, *rvar = NULL;
6375 MonoMethodHeader *cheader;
6376 MonoBasicBlock *ebblock, *sbblock;
6378 MonoMethod *prev_inlined_method;
6379 MonoInst **prev_locals, **prev_args;
6380 MonoType **prev_arg_types;
6381 guint prev_real_offset;
6382 GHashTable *prev_cbb_hash;
6383 MonoBasicBlock **prev_cil_offset_to_bb;
6384 MonoBasicBlock *prev_cbb;
6385 const unsigned char *prev_ip;
6386 unsigned char *prev_cil_start;
6387 guint32 prev_cil_offset_to_bb_len;
6388 MonoMethod *prev_current_method;
6389 MonoGenericContext *prev_generic_context;
6390 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6392 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6394 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6395 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6398 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6399 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6404 fsig = mono_method_signature (cmethod);
6406 if (cfg->verbose_level > 2)
6407 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6409 if (!cmethod->inline_info) {
6410 cfg->stat_inlineable_methods++;
6411 cmethod->inline_info = 1;
6414 /* allocate local variables */
6415 cheader = mono_method_get_header_checked (cmethod, &error);
6417 if (inline_always) {
6418 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6419 mono_error_move (&cfg->error, &error);
6421 mono_error_cleanup (&error);
6426 /*Must verify before creating locals as it can cause the JIT to assert.*/
6427 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6428 mono_metadata_free_mh (cheader);
6432 /* allocate space to store the return value */
6433 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6434 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6437 prev_locals = cfg->locals;
6438 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6439 for (i = 0; i < cheader->num_locals; ++i)
6440 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6442 /* allocate start and end blocks */
6443 /* This is needed so if the inline is aborted, we can clean up */
6444 NEW_BBLOCK (cfg, sbblock);
6445 sbblock->real_offset = real_offset;
6447 NEW_BBLOCK (cfg, ebblock);
6448 ebblock->block_num = cfg->num_bblocks++;
6449 ebblock->real_offset = real_offset;
6451 prev_args = cfg->args;
6452 prev_arg_types = cfg->arg_types;
6453 prev_inlined_method = cfg->inlined_method;
6454 cfg->inlined_method = cmethod;
6455 cfg->ret_var_set = FALSE;
6456 cfg->inline_depth ++;
6457 prev_real_offset = cfg->real_offset;
6458 prev_cbb_hash = cfg->cbb_hash;
6459 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6460 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6461 prev_cil_start = cfg->cil_start;
6463 prev_cbb = cfg->cbb;
6464 prev_current_method = cfg->current_method;
6465 prev_generic_context = cfg->generic_context;
6466 prev_ret_var_set = cfg->ret_var_set;
6467 prev_disable_inline = cfg->disable_inline;
6469 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6472 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6474 ret_var_set = cfg->ret_var_set;
6476 cfg->inlined_method = prev_inlined_method;
6477 cfg->real_offset = prev_real_offset;
6478 cfg->cbb_hash = prev_cbb_hash;
6479 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6480 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6481 cfg->cil_start = prev_cil_start;
6483 cfg->locals = prev_locals;
6484 cfg->args = prev_args;
6485 cfg->arg_types = prev_arg_types;
6486 cfg->current_method = prev_current_method;
6487 cfg->generic_context = prev_generic_context;
6488 cfg->ret_var_set = prev_ret_var_set;
6489 cfg->disable_inline = prev_disable_inline;
6490 cfg->inline_depth --;
6492 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6493 if (cfg->verbose_level > 2)
6494 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6496 cfg->stat_inlined_methods++;
6498 /* always add some code to avoid block split failures */
6499 MONO_INST_NEW (cfg, ins, OP_NOP);
6500 MONO_ADD_INS (prev_cbb, ins);
6502 prev_cbb->next_bb = sbblock;
6503 link_bblock (cfg, prev_cbb, sbblock);
6506 * Get rid of the begin and end bblocks if possible to aid local
6509 if (prev_cbb->out_count == 1)
6510 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6512 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6513 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6515 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6516 MonoBasicBlock *prev = ebblock->in_bb [0];
6518 if (prev->next_bb == ebblock) {
6519 mono_merge_basic_blocks (cfg, prev, ebblock);
6521 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6522 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6523 cfg->cbb = prev_cbb;
6526 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6531 * Its possible that the rvar is set in some prev bblock, but not in others.
6537 for (i = 0; i < ebblock->in_count; ++i) {
6538 bb = ebblock->in_bb [i];
6540 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6543 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6553 * If the inlined method contains only a throw, then the ret var is not
6554 * set, so set it to a dummy value.
6557 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6559 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6562 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6565 if (cfg->verbose_level > 2)
6566 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6567 cfg->exception_type = MONO_EXCEPTION_NONE;
6569 /* This gets rid of the newly added bblocks */
6570 cfg->cbb = prev_cbb;
6572 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6577 * Some of these comments may well be out-of-date.
6578 * Design decisions: we do a single pass over the IL code (and we do bblock
6579 * splitting/merging in the few cases when it's required: a back jump to an IL
6580 * address that was not already seen as bblock starting point).
6581 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6582 * Complex operations are decomposed in simpler ones right away. We need to let the
6583 * arch-specific code peek and poke inside this process somehow (except when the
6584 * optimizations can take advantage of the full semantic info of coarse opcodes).
6585 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6586 * MonoInst->opcode initially is the IL opcode or some simplification of that
6587 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6588 * opcode with value bigger than OP_LAST.
6589 * At this point the IR can be handed over to an interpreter, a dumb code generator
6590 * or to the optimizing code generator that will translate it to SSA form.
6592 * Profiling directed optimizations.
6593 * We may compile by default with few or no optimizations and instrument the code
6594 * or the user may indicate what methods to optimize the most either in a config file
6595 * or through repeated runs where the compiler applies offline the optimizations to
6596 * each method and then decides if it was worth it.
6599 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6600 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6601 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6602 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6603 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6604 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6605 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6606 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6608 /* offset from br.s -> br like opcodes */
6609 #define BIG_BRANCH_OFFSET 13
6612 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6614 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6616 return b == NULL || b == bb;
6620 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6622 unsigned char *ip = start;
6623 unsigned char *target;
6626 MonoBasicBlock *bblock;
6627 const MonoOpcode *opcode;
6630 cli_addr = ip - start;
6631 i = mono_opcode_value ((const guint8 **)&ip, end);
6634 opcode = &mono_opcodes [i];
6635 switch (opcode->argument) {
6636 case MonoInlineNone:
6639 case MonoInlineString:
6640 case MonoInlineType:
6641 case MonoInlineField:
6642 case MonoInlineMethod:
6645 case MonoShortInlineR:
6652 case MonoShortInlineVar:
6653 case MonoShortInlineI:
6656 case MonoShortInlineBrTarget:
6657 target = start + cli_addr + 2 + (signed char)ip [1];
6658 GET_BBLOCK (cfg, bblock, target);
6661 GET_BBLOCK (cfg, bblock, ip);
6663 case MonoInlineBrTarget:
6664 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6665 GET_BBLOCK (cfg, bblock, target);
6668 GET_BBLOCK (cfg, bblock, ip);
6670 case MonoInlineSwitch: {
6671 guint32 n = read32 (ip + 1);
6674 cli_addr += 5 + 4 * n;
6675 target = start + cli_addr;
6676 GET_BBLOCK (cfg, bblock, target);
6678 for (j = 0; j < n; ++j) {
6679 target = start + cli_addr + (gint32)read32 (ip);
6680 GET_BBLOCK (cfg, bblock, target);
6690 g_assert_not_reached ();
6693 if (i == CEE_THROW) {
6694 unsigned char *bb_start = ip - 1;
6696 /* Find the start of the bblock containing the throw */
6698 while ((bb_start >= start) && !bblock) {
6699 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6703 bblock->out_of_line = 1;
6713 static inline MonoMethod *
6714 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6718 mono_error_init (error);
6720 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6721 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6723 method = mono_class_inflate_generic_method_checked (method, context, error);
6726 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6732 static inline MonoMethod *
6733 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6736 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6738 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6739 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6743 if (!method && !cfg)
6744 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6749 static inline MonoClass*
6750 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6755 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6756 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6758 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6759 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6762 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6763 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6766 mono_class_init (klass);
6770 static inline MonoMethodSignature*
6771 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6773 MonoMethodSignature *fsig;
6775 mono_error_init (error);
6776 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6777 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6779 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6780 return_val_if_nok (error, NULL);
6783 fsig = mono_inflate_generic_signature(fsig, context, error);
6789 throw_exception (void)
6791 static MonoMethod *method = NULL;
6794 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6795 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6802 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6804 MonoMethod *thrower = throw_exception ();
6807 EMIT_NEW_PCONST (cfg, args [0], ex);
6808 mono_emit_method_call (cfg, thrower, args, NULL);
6812 * Return the original method is a wrapper is specified. We can only access
6813 * the custom attributes from the original method.
6816 get_original_method (MonoMethod *method)
6818 if (method->wrapper_type == MONO_WRAPPER_NONE)
6821 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6822 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6825 /* in other cases we need to find the original method */
6826 return mono_marshal_method_from_wrapper (method);
6830 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6832 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6833 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6835 emit_throw_exception (cfg, ex);
6839 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6841 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6842 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6844 emit_throw_exception (cfg, ex);
6848 * Check that the IL instructions at ip are the array initialization
6849 * sequence and return the pointer to the data and the size.
6852 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6855 * newarr[System.Int32]
6857 * ldtoken field valuetype ...
6858 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6860 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6862 guint32 token = read32 (ip + 7);
6863 guint32 field_token = read32 (ip + 2);
6864 guint32 field_index = field_token & 0xffffff;
6866 const char *data_ptr;
6868 MonoMethod *cmethod;
6869 MonoClass *dummy_class;
6870 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6874 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6878 *out_field_token = field_token;
6880 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6883 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6885 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6886 case MONO_TYPE_BOOLEAN:
6890 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6891 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6892 case MONO_TYPE_CHAR:
6909 if (size > mono_type_size (field->type, &dummy_align))
6912 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6913 if (!image_is_dynamic (method->klass->image)) {
6914 field_index = read32 (ip + 2) & 0xffffff;
6915 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6916 data_ptr = mono_image_rva_map (method->klass->image, rva);
6917 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6918 /* for aot code we do the lookup on load */
6919 if (aot && data_ptr)
6920 return (const char *)GUINT_TO_POINTER (rva);
6922 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6924 data_ptr = mono_field_get_data (field);
6932 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6935 char *method_fname = mono_method_full_name (method, TRUE);
6937 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6940 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6941 mono_error_cleanup (&error);
6942 } else if (header->code_size == 0)
6943 method_code = g_strdup ("method body is empty.");
6945 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6946 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6947 g_free (method_fname);
6948 g_free (method_code);
6949 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6953 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6956 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6957 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6958 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6959 /* Optimize reg-reg moves away */
6961 * Can't optimize other opcodes, since sp[0] might point to
6962 * the last ins of a decomposed opcode.
6964 sp [0]->dreg = (cfg)->locals [n]->dreg;
6966 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6971 * ldloca inhibits many optimizations so try to get rid of it in common
6974 static inline unsigned char *
6975 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6985 local = read16 (ip + 2);
6989 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6990 /* From the INITOBJ case */
6991 token = read32 (ip + 2);
6992 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6993 CHECK_TYPELOAD (klass);
6994 type = mini_get_underlying_type (&klass->byval_arg);
6995 emit_init_local (cfg, local, type, TRUE);
7003 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7005 MonoInst *icall_args [16];
7006 MonoInst *call_target, *ins, *vtable_ins;
7007 int arg_reg, this_reg, vtable_reg;
7008 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7009 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7010 gboolean variant_iface = FALSE;
7013 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7016 * In llvm-only mode, vtables contain function descriptors instead of
7017 * method addresses/trampolines.
7019 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7022 slot = mono_method_get_imt_slot (cmethod);
7024 slot = mono_method_get_vtable_index (cmethod);
7026 this_reg = sp [0]->dreg;
7028 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7029 variant_iface = TRUE;
7031 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7033 * The simplest case, a normal virtual call.
7035 int slot_reg = alloc_preg (cfg);
7036 int addr_reg = alloc_preg (cfg);
7037 int arg_reg = alloc_preg (cfg);
7038 MonoBasicBlock *non_null_bb;
7040 vtable_reg = alloc_preg (cfg);
7041 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7042 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7044 /* Load the vtable slot, which contains a function descriptor. */
7045 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7047 NEW_BBLOCK (cfg, non_null_bb);
7049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7050 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7054 // FIXME: Make the wrapper use the preserveall cconv
7055 // FIXME: Use one icall per slot for small slot numbers ?
7056 icall_args [0] = vtable_ins;
7057 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7058 /* Make the icall return the vtable slot value to save some code space */
7059 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7060 ins->dreg = slot_reg;
7061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7064 MONO_START_BB (cfg, non_null_bb);
7065 /* Load the address + arg from the vtable slot */
7066 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7069 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7072 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7074 * A simple interface call
7076 * We make a call through an imt slot to obtain the function descriptor we need to call.
7077 * The imt slot contains a function descriptor for a runtime function + arg.
7079 int slot_reg = alloc_preg (cfg);
7080 int addr_reg = alloc_preg (cfg);
7081 int arg_reg = alloc_preg (cfg);
7082 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7084 vtable_reg = alloc_preg (cfg);
7085 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7086 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7089 * The slot is already initialized when the vtable is created so there is no need
7093 /* Load the imt slot, which contains a function descriptor. */
7094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7096 /* Load the address + arg of the imt thunk from the imt slot */
7097 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7098 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7100 * IMT thunks in llvm-only mode are C functions which take an info argument
7101 * plus the imt method and return the ftndesc to call.
7103 icall_args [0] = thunk_arg_ins;
7104 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7105 cmethod, MONO_RGCTX_INFO_METHOD);
7106 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7108 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7111 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7113 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7114 * dynamically extended as more instantiations are discovered.
7115 * This handles generic virtual methods both on classes and interfaces.
7117 int slot_reg = alloc_preg (cfg);
7118 int addr_reg = alloc_preg (cfg);
7119 int arg_reg = alloc_preg (cfg);
7120 int ftndesc_reg = alloc_preg (cfg);
7121 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7122 MonoBasicBlock *slowpath_bb, *end_bb;
7124 NEW_BBLOCK (cfg, slowpath_bb);
7125 NEW_BBLOCK (cfg, end_bb);
7127 vtable_reg = alloc_preg (cfg);
7128 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7130 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7132 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7134 /* Load the slot, which contains a function descriptor. */
7135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7137 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7138 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7143 /* Same as with iface calls */
7144 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7145 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7146 icall_args [0] = thunk_arg_ins;
7147 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7148 cmethod, MONO_RGCTX_INFO_METHOD);
7149 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7150 ftndesc_ins->dreg = ftndesc_reg;
7152 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7153 * they don't know about yet. Fall back to the slowpath in that case.
7155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7161 MONO_START_BB (cfg, slowpath_bb);
7162 icall_args [0] = vtable_ins;
7163 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7164 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7165 cmethod, MONO_RGCTX_INFO_METHOD);
7167 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7169 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7170 ftndesc_ins->dreg = ftndesc_reg;
7171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7174 MONO_START_BB (cfg, end_bb);
7175 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7179 * Non-optimized cases
7181 icall_args [0] = sp [0];
7182 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7184 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7185 cmethod, MONO_RGCTX_INFO_METHOD);
7187 arg_reg = alloc_preg (cfg);
7188 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7189 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7191 g_assert (is_gsharedvt);
7193 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7195 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7198 * Pass the extra argument even if the callee doesn't receive it, most
7199 * calling conventions allow this.
7201 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7205 is_exception_class (MonoClass *klass)
7208 if (klass == mono_defaults.exception_class)
7210 klass = klass->parent;
7216 * is_jit_optimizer_disabled:
7218 * Determine whenever M's assembly has a DebuggableAttribute with the
7219 * IsJITOptimizerDisabled flag set.
7222 is_jit_optimizer_disabled (MonoMethod *m)
7225 MonoAssembly *ass = m->klass->image->assembly;
7226 MonoCustomAttrInfo* attrs;
7229 gboolean val = FALSE;
7232 if (ass->jit_optimizer_disabled_inited)
7233 return ass->jit_optimizer_disabled;
7235 klass = mono_class_try_get_debuggable_attribute_class ();
7239 ass->jit_optimizer_disabled = FALSE;
7240 mono_memory_barrier ();
7241 ass->jit_optimizer_disabled_inited = TRUE;
7245 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7246 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7248 for (i = 0; i < attrs->num_attrs; ++i) {
7249 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7251 MonoMethodSignature *sig;
7253 if (!attr->ctor || attr->ctor->klass != klass)
7255 /* Decode the attribute. See reflection.c */
7256 p = (const char*)attr->data;
7257 g_assert (read16 (p) == 0x0001);
7260 // FIXME: Support named parameters
7261 sig = mono_method_signature (attr->ctor);
7262 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7264 /* Two boolean arguments */
7268 mono_custom_attrs_free (attrs);
7271 ass->jit_optimizer_disabled = val;
7272 mono_memory_barrier ();
7273 ass->jit_optimizer_disabled_inited = TRUE;
7279 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7281 gboolean supported_tail_call;
7284 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7286 for (i = 0; i < fsig->param_count; ++i) {
7287 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7288 /* These can point to the current method's stack */
7289 supported_tail_call = FALSE;
7291 if (fsig->hasthis && cmethod->klass->valuetype)
7292 /* this might point to the current method's stack */
7293 supported_tail_call = FALSE;
7294 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7295 supported_tail_call = FALSE;
7296 if (cfg->method->save_lmf)
7297 supported_tail_call = FALSE;
7298 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7299 supported_tail_call = FALSE;
7300 if (call_opcode != CEE_CALL)
7301 supported_tail_call = FALSE;
7303 /* Debugging support */
7305 if (supported_tail_call) {
7306 if (!mono_debug_count ())
7307 supported_tail_call = FALSE;
7311 return supported_tail_call;
7317 * Handle calls made to ctors from NEWOBJ opcodes.
7320 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7321 MonoInst **sp, guint8 *ip, int *inline_costs)
7323 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7325 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7326 mono_method_is_generic_sharable (cmethod, TRUE)) {
7327 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7328 mono_class_vtable (cfg->domain, cmethod->klass);
7329 CHECK_TYPELOAD (cmethod->klass);
7331 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7332 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7335 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7336 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7338 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7340 CHECK_TYPELOAD (cmethod->klass);
7341 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7346 /* Avoid virtual calls to ctors if possible */
7347 if (mono_class_is_marshalbyref (cmethod->klass))
7348 callvirt_this_arg = sp [0];
7350 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7351 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7352 CHECK_CFG_EXCEPTION;
7353 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7354 mono_method_check_inlining (cfg, cmethod) &&
7355 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7358 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7359 cfg->real_offset += 5;
7361 *inline_costs += costs - 5;
7363 INLINE_FAILURE ("inline failure");
7364 // FIXME-VT: Clean this up
7365 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7366 GSHAREDVT_FAILURE(*ip);
7367 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7369 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7372 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7374 if (cfg->llvm_only) {
7375 // FIXME: Avoid initializing vtable_arg
7376 emit_llvmonly_calli (cfg, fsig, sp, addr);
7378 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7380 } else if (context_used &&
7381 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7382 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7383 MonoInst *cmethod_addr;
7385 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7387 if (cfg->llvm_only) {
7388 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7389 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7390 emit_llvmonly_calli (cfg, fsig, sp, addr);
7392 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7393 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7395 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7398 INLINE_FAILURE ("ctor call");
7399 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7400 callvirt_this_arg, NULL, vtable_arg);
7407 emit_setret (MonoCompile *cfg, MonoInst *val)
7409 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7412 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7415 if (!cfg->vret_addr) {
7416 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7418 EMIT_NEW_RETLOADA (cfg, ret_addr);
7420 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7421 ins->klass = mono_class_from_mono_type (ret_type);
7424 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7425 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7426 MonoInst *iargs [1];
7430 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7431 mono_arch_emit_setret (cfg, cfg->method, conv);
7433 mono_arch_emit_setret (cfg, cfg->method, val);
7436 mono_arch_emit_setret (cfg, cfg->method, val);
7442 * mono_method_to_ir:
7444 * Translate the .net IL into linear IR.
7446 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7447 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7448 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7449 * @inline_args: if not NULL, contains the arguments to the inline call
7450 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7451 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7453 * This method is used to turn ECMA IL into Mono's internal Linear IR
7454 * reprensetation. It is used both for entire methods, as well as
7455 * inlining existing methods. In the former case, the @start_bblock,
7456 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7457 * inline_offset is set to zero.
7459 * Returns: the inline cost, or -1 if there was an error processing this method.
7462 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7463 MonoInst *return_var, MonoInst **inline_args,
7464 guint inline_offset, gboolean is_virtual_call)
7467 MonoInst *ins, **sp, **stack_start;
7468 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7469 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7470 MonoMethod *cmethod, *method_definition;
7471 MonoInst **arg_array;
7472 MonoMethodHeader *header;
7474 guint32 token, ins_flag;
7476 MonoClass *constrained_class = NULL;
7477 unsigned char *ip, *end, *target, *err_pos;
7478 MonoMethodSignature *sig;
7479 MonoGenericContext *generic_context = NULL;
7480 MonoGenericContainer *generic_container = NULL;
7481 MonoType **param_types;
7482 int i, n, start_new_bblock, dreg;
7483 int num_calls = 0, inline_costs = 0;
7484 int breakpoint_id = 0;
7486 GSList *class_inits = NULL;
7487 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7489 gboolean init_locals, seq_points, skip_dead_blocks;
7490 gboolean sym_seq_points = FALSE;
7491 MonoDebugMethodInfo *minfo;
7492 MonoBitSet *seq_point_locs = NULL;
7493 MonoBitSet *seq_point_set_locs = NULL;
7495 cfg->disable_inline = is_jit_optimizer_disabled (method);
7497 /* serialization and xdomain stuff may need access to private fields and methods */
7498 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7499 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7500 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7501 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7502 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7503 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7505 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7506 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7507 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7508 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7509 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7511 image = method->klass->image;
7512 header = mono_method_get_header_checked (method, &cfg->error);
7514 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7515 goto exception_exit;
7517 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7520 generic_container = mono_method_get_generic_container (method);
7521 sig = mono_method_signature (method);
7522 num_args = sig->hasthis + sig->param_count;
7523 ip = (unsigned char*)header->code;
7524 cfg->cil_start = ip;
7525 end = ip + header->code_size;
7526 cfg->stat_cil_code_size += header->code_size;
7528 seq_points = cfg->gen_seq_points && cfg->method == method;
7530 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7531 /* We could hit a seq point before attaching to the JIT (#8338) */
7535 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7536 minfo = mono_debug_lookup_method (method);
7538 MonoSymSeqPoint *sps;
7539 int i, n_il_offsets;
7541 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7542 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7543 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7544 sym_seq_points = TRUE;
7545 for (i = 0; i < n_il_offsets; ++i) {
7546 if (sps [i].il_offset < header->code_size)
7547 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7551 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7553 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7555 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7556 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7558 mono_debug_free_method_async_debug_info (asyncMethod);
7560 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7561 /* Methods without line number info like auto-generated property accessors */
7562 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7563 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7564 sym_seq_points = TRUE;
7569 * Methods without init_locals set could cause asserts in various passes
7570 * (#497220). To work around this, we emit dummy initialization opcodes
7571 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7572 * on some platforms.
7574 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7575 init_locals = header->init_locals;
7579 method_definition = method;
7580 while (method_definition->is_inflated) {
7581 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7582 method_definition = imethod->declaring;
7585 /* SkipVerification is not allowed if core-clr is enabled */
7586 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7588 dont_verify_stloc = TRUE;
7591 if (sig->is_inflated)
7592 generic_context = mono_method_get_context (method);
7593 else if (generic_container)
7594 generic_context = &generic_container->context;
7595 cfg->generic_context = generic_context;
7598 g_assert (!sig->has_type_parameters);
7600 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7601 g_assert (method->is_inflated);
7602 g_assert (mono_method_get_context (method)->method_inst);
7604 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7605 g_assert (sig->generic_param_count);
7607 if (cfg->method == method) {
7608 cfg->real_offset = 0;
7610 cfg->real_offset = inline_offset;
7613 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7614 cfg->cil_offset_to_bb_len = header->code_size;
7616 cfg->current_method = method;
7618 if (cfg->verbose_level > 2)
7619 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7621 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7623 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7624 for (n = 0; n < sig->param_count; ++n)
7625 param_types [n + sig->hasthis] = sig->params [n];
7626 cfg->arg_types = param_types;
7628 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7629 if (cfg->method == method) {
7631 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7632 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7635 NEW_BBLOCK (cfg, start_bblock);
7636 cfg->bb_entry = start_bblock;
7637 start_bblock->cil_code = NULL;
7638 start_bblock->cil_length = 0;
7641 NEW_BBLOCK (cfg, end_bblock);
7642 cfg->bb_exit = end_bblock;
7643 end_bblock->cil_code = NULL;
7644 end_bblock->cil_length = 0;
7645 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7646 g_assert (cfg->num_bblocks == 2);
7648 arg_array = cfg->args;
7650 if (header->num_clauses) {
7651 cfg->spvars = g_hash_table_new (NULL, NULL);
7652 cfg->exvars = g_hash_table_new (NULL, NULL);
7654 /* handle exception clauses */
7655 for (i = 0; i < header->num_clauses; ++i) {
7656 MonoBasicBlock *try_bb;
7657 MonoExceptionClause *clause = &header->clauses [i];
7658 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7660 try_bb->real_offset = clause->try_offset;
7661 try_bb->try_start = TRUE;
7662 try_bb->region = ((i + 1) << 8) | clause->flags;
7663 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7664 tblock->real_offset = clause->handler_offset;
7665 tblock->flags |= BB_EXCEPTION_HANDLER;
7668 * Linking the try block with the EH block hinders inlining as we won't be able to
7669 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7671 if (COMPILE_LLVM (cfg))
7672 link_bblock (cfg, try_bb, tblock);
7674 if (*(ip + clause->handler_offset) == CEE_POP)
7675 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7677 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7678 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7679 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7680 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7681 MONO_ADD_INS (tblock, ins);
7683 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7684 /* finally clauses already have a seq point */
7685 /* seq points for filter clauses are emitted below */
7686 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7687 MONO_ADD_INS (tblock, ins);
7690 /* todo: is a fault block unsafe to optimize? */
7691 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7692 tblock->flags |= BB_EXCEPTION_UNSAFE;
7695 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7697 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7699 /* catch and filter blocks get the exception object on the stack */
7700 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7701 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7703 /* mostly like handle_stack_args (), but just sets the input args */
7704 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7705 tblock->in_scount = 1;
7706 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7707 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7711 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7712 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7713 if (!cfg->compile_llvm) {
7714 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7715 ins->dreg = tblock->in_stack [0]->dreg;
7716 MONO_ADD_INS (tblock, ins);
7719 MonoInst *dummy_use;
7722 * Add a dummy use for the exvar so its liveness info will be
7725 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7728 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7729 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7730 MONO_ADD_INS (tblock, ins);
7733 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7734 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7735 tblock->flags |= BB_EXCEPTION_HANDLER;
7736 tblock->real_offset = clause->data.filter_offset;
7737 tblock->in_scount = 1;
7738 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7739 /* The filter block shares the exvar with the handler block */
7740 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7741 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7742 MONO_ADD_INS (tblock, ins);
7746 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7747 clause->data.catch_class &&
7749 mono_class_check_context_used (clause->data.catch_class)) {
7751 * In shared generic code with catch
7752 * clauses containing type variables
7753 * the exception handling code has to
7754 * be able to get to the rgctx.
7755 * Therefore we have to make sure that
7756 * the vtable/mrgctx argument (for
7757 * static or generic methods) or the
7758 * "this" argument (for non-static
7759 * methods) are live.
7761 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7762 mini_method_get_context (method)->method_inst ||
7763 method->klass->valuetype) {
7764 mono_get_vtable_var (cfg);
7766 MonoInst *dummy_use;
7768 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7773 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7774 cfg->cbb = start_bblock;
7775 cfg->args = arg_array;
7776 mono_save_args (cfg, sig, inline_args);
7779 /* FIRST CODE BLOCK */
7780 NEW_BBLOCK (cfg, tblock);
7781 tblock->cil_code = ip;
7785 ADD_BBLOCK (cfg, tblock);
7787 if (cfg->method == method) {
7788 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7789 if (breakpoint_id) {
7790 MONO_INST_NEW (cfg, ins, OP_BREAK);
7791 MONO_ADD_INS (cfg->cbb, ins);
7795 /* we use a separate basic block for the initialization code */
7796 NEW_BBLOCK (cfg, init_localsbb);
7797 if (cfg->method == method)
7798 cfg->bb_init = init_localsbb;
7799 init_localsbb->real_offset = cfg->real_offset;
7800 start_bblock->next_bb = init_localsbb;
7801 init_localsbb->next_bb = cfg->cbb;
7802 link_bblock (cfg, start_bblock, init_localsbb);
7803 link_bblock (cfg, init_localsbb, cfg->cbb);
7805 cfg->cbb = init_localsbb;
7807 if (cfg->gsharedvt && cfg->method == method) {
7808 MonoGSharedVtMethodInfo *info;
7809 MonoInst *var, *locals_var;
7812 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7813 info->method = cfg->method;
7814 info->count_entries = 16;
7815 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7816 cfg->gsharedvt_info = info;
7818 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7819 /* prevent it from being register allocated */
7820 //var->flags |= MONO_INST_VOLATILE;
7821 cfg->gsharedvt_info_var = var;
7823 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7824 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7826 /* Allocate locals */
7827 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7828 /* prevent it from being register allocated */
7829 //locals_var->flags |= MONO_INST_VOLATILE;
7830 cfg->gsharedvt_locals_var = locals_var;
7832 dreg = alloc_ireg (cfg);
7833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7835 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7836 ins->dreg = locals_var->dreg;
7838 MONO_ADD_INS (cfg->cbb, ins);
7839 cfg->gsharedvt_locals_var_ins = ins;
7841 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7844 ins->flags |= MONO_INST_INIT;
7848 if (mono_security_core_clr_enabled ()) {
7849 /* check if this is native code, e.g. an icall or a p/invoke */
7850 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7851 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7853 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7854 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7856 /* if this ia a native call then it can only be JITted from platform code */
7857 if ((icall || pinvk) && method->klass && method->klass->image) {
7858 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7859 MonoException *ex = icall ? mono_get_exception_security () :
7860 mono_get_exception_method_access ();
7861 emit_throw_exception (cfg, ex);
7868 CHECK_CFG_EXCEPTION;
7870 if (header->code_size == 0)
7873 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7878 if (cfg->method == method)
7879 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7881 for (n = 0; n < header->num_locals; ++n) {
7882 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7887 /* We force the vtable variable here for all shared methods
7888 for the possibility that they might show up in a stack
7889 trace where their exact instantiation is needed. */
7890 if (cfg->gshared && method == cfg->method) {
7891 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7892 mini_method_get_context (method)->method_inst ||
7893 method->klass->valuetype) {
7894 mono_get_vtable_var (cfg);
7896 /* FIXME: Is there a better way to do this?
7897 We need the variable live for the duration
7898 of the whole method. */
7899 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7903 /* add a check for this != NULL to inlined methods */
7904 if (is_virtual_call) {
7907 NEW_ARGLOAD (cfg, arg_ins, 0);
7908 MONO_ADD_INS (cfg->cbb, arg_ins);
7909 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7912 skip_dead_blocks = !dont_verify;
7913 if (skip_dead_blocks) {
7914 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7919 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7920 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7923 start_new_bblock = 0;
7925 if (cfg->method == method)
7926 cfg->real_offset = ip - header->code;
7928 cfg->real_offset = inline_offset;
7933 if (start_new_bblock) {
7934 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7935 if (start_new_bblock == 2) {
7936 g_assert (ip == tblock->cil_code);
7938 GET_BBLOCK (cfg, tblock, ip);
7940 cfg->cbb->next_bb = tblock;
7942 start_new_bblock = 0;
7943 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7944 if (cfg->verbose_level > 3)
7945 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7946 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7950 g_slist_free (class_inits);
7953 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7954 link_bblock (cfg, cfg->cbb, tblock);
7955 if (sp != stack_start) {
7956 handle_stack_args (cfg, stack_start, sp - stack_start);
7958 CHECK_UNVERIFIABLE (cfg);
7960 cfg->cbb->next_bb = tblock;
7962 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7963 if (cfg->verbose_level > 3)
7964 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7965 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7968 g_slist_free (class_inits);
7973 if (skip_dead_blocks) {
7974 int ip_offset = ip - header->code;
7976 if (ip_offset == bb->end)
7980 int op_size = mono_opcode_size (ip, end);
7981 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7983 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7985 if (ip_offset + op_size == bb->end) {
7986 MONO_INST_NEW (cfg, ins, OP_NOP);
7987 MONO_ADD_INS (cfg->cbb, ins);
7988 start_new_bblock = 1;
7996 * Sequence points are points where the debugger can place a breakpoint.
7997 * Currently, we generate these automatically at points where the IL
8000 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8002 * Make methods interruptable at the beginning, and at the targets of
8003 * backward branches.
8004 * Also, do this at the start of every bblock in methods with clauses too,
8005 * to be able to handle instructions with inprecise control flow like
8007 * Backward branches are handled at the end of method-to-ir ().
8009 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8010 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8012 /* Avoid sequence points on empty IL like .volatile */
8013 // FIXME: Enable this
8014 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8015 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8016 if ((sp != stack_start) && !sym_seq_point)
8017 ins->flags |= MONO_INST_NONEMPTY_STACK;
8018 MONO_ADD_INS (cfg->cbb, ins);
8021 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8024 cfg->cbb->real_offset = cfg->real_offset;
8026 if ((cfg->method == method) && cfg->coverage_info) {
8027 guint32 cil_offset = ip - header->code;
8028 cfg->coverage_info->data [cil_offset].cil_code = ip;
8030 /* TODO: Use an increment here */
8031 #if defined(TARGET_X86)
8032 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8033 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8035 MONO_ADD_INS (cfg->cbb, ins);
8037 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8038 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8042 if (cfg->verbose_level > 3)
8043 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8047 if (seq_points && !sym_seq_points && sp != stack_start) {
8049 * The C# compiler uses these nops to notify the JIT that it should
8050 * insert seq points.
8052 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8053 MONO_ADD_INS (cfg->cbb, ins);
8055 if (cfg->keep_cil_nops)
8056 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8058 MONO_INST_NEW (cfg, ins, OP_NOP);
8060 MONO_ADD_INS (cfg->cbb, ins);
8063 if (should_insert_brekpoint (cfg->method)) {
8064 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8066 MONO_INST_NEW (cfg, ins, OP_NOP);
8069 MONO_ADD_INS (cfg->cbb, ins);
8075 CHECK_STACK_OVF (1);
8076 n = (*ip)-CEE_LDARG_0;
8078 EMIT_NEW_ARGLOAD (cfg, ins, n);
8086 CHECK_STACK_OVF (1);
8087 n = (*ip)-CEE_LDLOC_0;
8089 EMIT_NEW_LOCLOAD (cfg, ins, n);
8098 n = (*ip)-CEE_STLOC_0;
8101 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8103 emit_stloc_ir (cfg, sp, header, n);
8110 CHECK_STACK_OVF (1);
8113 EMIT_NEW_ARGLOAD (cfg, ins, n);
8119 CHECK_STACK_OVF (1);
8122 NEW_ARGLOADA (cfg, ins, n);
8123 MONO_ADD_INS (cfg->cbb, ins);
8133 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8135 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8140 CHECK_STACK_OVF (1);
8143 EMIT_NEW_LOCLOAD (cfg, ins, n);
8147 case CEE_LDLOCA_S: {
8148 unsigned char *tmp_ip;
8150 CHECK_STACK_OVF (1);
8151 CHECK_LOCAL (ip [1]);
8153 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8159 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8168 CHECK_LOCAL (ip [1]);
8169 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8171 emit_stloc_ir (cfg, sp, header, ip [1]);
8176 CHECK_STACK_OVF (1);
8177 EMIT_NEW_PCONST (cfg, ins, NULL);
8178 ins->type = STACK_OBJ;
8183 CHECK_STACK_OVF (1);
8184 EMIT_NEW_ICONST (cfg, ins, -1);
8197 CHECK_STACK_OVF (1);
8198 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8204 CHECK_STACK_OVF (1);
8206 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8212 CHECK_STACK_OVF (1);
8213 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8219 CHECK_STACK_OVF (1);
8220 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8221 ins->type = STACK_I8;
8222 ins->dreg = alloc_dreg (cfg, STACK_I8);
8224 ins->inst_l = (gint64)read64 (ip);
8225 MONO_ADD_INS (cfg->cbb, ins);
8231 gboolean use_aotconst = FALSE;
8233 #ifdef TARGET_POWERPC
8234 /* FIXME: Clean this up */
8235 if (cfg->compile_aot)
8236 use_aotconst = TRUE;
8239 /* FIXME: we should really allocate this only late in the compilation process */
8240 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8242 CHECK_STACK_OVF (1);
8248 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8250 dreg = alloc_freg (cfg);
8251 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8252 ins->type = cfg->r4_stack_type;
8254 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8255 ins->type = cfg->r4_stack_type;
8256 ins->dreg = alloc_dreg (cfg, STACK_R8);
8258 MONO_ADD_INS (cfg->cbb, ins);
8268 gboolean use_aotconst = FALSE;
8270 #ifdef TARGET_POWERPC
8271 /* FIXME: Clean this up */
8272 if (cfg->compile_aot)
8273 use_aotconst = TRUE;
8276 /* FIXME: we should really allocate this only late in the compilation process */
8277 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8279 CHECK_STACK_OVF (1);
8285 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8287 dreg = alloc_freg (cfg);
8288 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8289 ins->type = STACK_R8;
8291 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8292 ins->type = STACK_R8;
8293 ins->dreg = alloc_dreg (cfg, STACK_R8);
8295 MONO_ADD_INS (cfg->cbb, ins);
8304 MonoInst *temp, *store;
8306 CHECK_STACK_OVF (1);
8310 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8311 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8313 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8316 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8329 if (sp [0]->type == STACK_R8)
8330 /* we need to pop the value from the x86 FP stack */
8331 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8336 MonoMethodSignature *fsig;
8339 INLINE_FAILURE ("jmp");
8340 GSHAREDVT_FAILURE (*ip);
8343 if (stack_start != sp)
8345 token = read32 (ip + 1);
8346 /* FIXME: check the signature matches */
8347 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8350 if (cfg->gshared && mono_method_check_context_used (cmethod))
8351 GENERIC_SHARING_FAILURE (CEE_JMP);
8353 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8355 fsig = mono_method_signature (cmethod);
8356 n = fsig->param_count + fsig->hasthis;
8357 if (cfg->llvm_only) {
8360 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8361 for (i = 0; i < n; ++i)
8362 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8363 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8365 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8366 * have to emit a normal return since llvm expects it.
8369 emit_setret (cfg, ins);
8370 MONO_INST_NEW (cfg, ins, OP_BR);
8371 ins->inst_target_bb = end_bblock;
8372 MONO_ADD_INS (cfg->cbb, ins);
8373 link_bblock (cfg, cfg->cbb, end_bblock);
8376 } else if (cfg->backend->have_op_tail_call) {
8377 /* Handle tail calls similarly to calls */
8380 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8381 call->method = cmethod;
8382 call->tail_call = TRUE;
8383 call->signature = mono_method_signature (cmethod);
8384 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8385 call->inst.inst_p0 = cmethod;
8386 for (i = 0; i < n; ++i)
8387 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8389 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8390 call->vret_var = cfg->vret_addr;
8392 mono_arch_emit_call (cfg, call);
8393 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8394 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8396 for (i = 0; i < num_args; ++i)
8397 /* Prevent arguments from being optimized away */
8398 arg_array [i]->flags |= MONO_INST_VOLATILE;
8400 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8401 ins = (MonoInst*)call;
8402 ins->inst_p0 = cmethod;
8403 MONO_ADD_INS (cfg->cbb, ins);
8407 start_new_bblock = 1;
8412 MonoMethodSignature *fsig;
8415 token = read32 (ip + 1);
8419 //GSHAREDVT_FAILURE (*ip);
8424 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8427 if (method->dynamic && fsig->pinvoke) {
8431 * This is a call through a function pointer using a pinvoke
8432 * signature. Have to create a wrapper and call that instead.
8433 * FIXME: This is very slow, need to create a wrapper at JIT time
8434 * instead based on the signature.
8436 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8437 EMIT_NEW_PCONST (cfg, args [1], fsig);
8439 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8442 n = fsig->param_count + fsig->hasthis;
8446 //g_assert (!virtual_ || fsig->hasthis);
8450 inline_costs += 10 * num_calls++;
8453 * Making generic calls out of gsharedvt methods.
8454 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8455 * patching gshared method addresses into a gsharedvt method.
8457 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8459 * We pass the address to the gsharedvt trampoline in the rgctx reg
8461 MonoInst *callee = addr;
8463 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8465 GSHAREDVT_FAILURE (*ip);
8469 GSHAREDVT_FAILURE (*ip);
8471 addr = emit_get_rgctx_sig (cfg, context_used,
8472 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8473 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8477 /* Prevent inlining of methods with indirect calls */
8478 INLINE_FAILURE ("indirect call");
8480 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8481 MonoJumpInfoType info_type;
8485 * Instead of emitting an indirect call, emit a direct call
8486 * with the contents of the aotconst as the patch info.
8488 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8489 info_type = (MonoJumpInfoType)addr->inst_c1;
8490 info_data = addr->inst_p0;
8492 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8493 info_data = addr->inst_right->inst_left;
8496 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8497 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8500 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8501 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8506 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8510 /* End of call, INS should contain the result of the call, if any */
8512 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8514 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8517 CHECK_CFG_EXCEPTION;
8521 constrained_class = NULL;
8525 case CEE_CALLVIRT: {
8526 MonoInst *addr = NULL;
8527 MonoMethodSignature *fsig = NULL;
8529 int virtual_ = *ip == CEE_CALLVIRT;
8530 gboolean pass_imt_from_rgctx = FALSE;
8531 MonoInst *imt_arg = NULL;
8532 MonoInst *keep_this_alive = NULL;
8533 gboolean pass_vtable = FALSE;
8534 gboolean pass_mrgctx = FALSE;
8535 MonoInst *vtable_arg = NULL;
8536 gboolean check_this = FALSE;
8537 gboolean supported_tail_call = FALSE;
8538 gboolean tail_call = FALSE;
8539 gboolean need_seq_point = FALSE;
8540 guint32 call_opcode = *ip;
8541 gboolean emit_widen = TRUE;
8542 gboolean push_res = TRUE;
8543 gboolean skip_ret = FALSE;
8544 gboolean delegate_invoke = FALSE;
8545 gboolean direct_icall = FALSE;
8546 gboolean constrained_partial_call = FALSE;
8547 MonoMethod *cil_method;
8550 token = read32 (ip + 1);
8554 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8557 cil_method = cmethod;
8559 if (constrained_class) {
8560 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8561 if (!mini_is_gsharedvt_klass (constrained_class)) {
8562 g_assert (!cmethod->klass->valuetype);
8563 if (!mini_type_is_reference (&constrained_class->byval_arg))
8564 constrained_partial_call = TRUE;
8568 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8569 if (cfg->verbose_level > 2)
8570 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8571 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8572 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8574 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8578 if (cfg->verbose_level > 2)
8579 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8581 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8583 * This is needed since get_method_constrained can't find
8584 * the method in klass representing a type var.
8585 * The type var is guaranteed to be a reference type in this
8588 if (!mini_is_gsharedvt_klass (constrained_class))
8589 g_assert (!cmethod->klass->valuetype);
8591 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8596 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8597 /* Use the corresponding method from the base type to avoid boxing */
8598 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8599 g_assert (base_type);
8600 constrained_class = mono_class_from_mono_type (base_type);
8601 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8606 if (!dont_verify && !cfg->skip_visibility) {
8607 MonoMethod *target_method = cil_method;
8608 if (method->is_inflated) {
8609 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8612 if (!mono_method_can_access_method (method_definition, target_method) &&
8613 !mono_method_can_access_method (method, cil_method))
8614 emit_method_access_failure (cfg, method, cil_method);
8617 if (mono_security_core_clr_enabled ())
8618 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8620 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8621 /* MS.NET seems to silently convert this to a callvirt */
8626 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8627 * converts to a callvirt.
8629 * tests/bug-515884.il is an example of this behavior
8631 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8632 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8633 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8637 if (!cmethod->klass->inited)
8638 if (!mono_class_init (cmethod->klass))
8639 TYPE_LOAD_ERROR (cmethod->klass);
8641 fsig = mono_method_signature (cmethod);
8644 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8645 mini_class_is_system_array (cmethod->klass)) {
8646 array_rank = cmethod->klass->rank;
8647 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8648 direct_icall = TRUE;
8649 } else if (fsig->pinvoke) {
8650 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8651 fsig = mono_method_signature (wrapper);
8652 } else if (constrained_class) {
8654 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8658 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8659 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8661 /* See code below */
8662 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8663 MonoBasicBlock *tbb;
8665 GET_BBLOCK (cfg, tbb, ip + 5);
8666 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8668 * We want to extend the try block to cover the call, but we can't do it if the
8669 * call is made directly since its followed by an exception check.
8671 direct_icall = FALSE;
8675 mono_save_token_info (cfg, image, token, cil_method);
8677 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8678 need_seq_point = TRUE;
8680 /* Don't support calls made using type arguments for now */
8682 if (cfg->gsharedvt) {
8683 if (mini_is_gsharedvt_signature (fsig))
8684 GSHAREDVT_FAILURE (*ip);
8688 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8689 g_assert_not_reached ();
8691 n = fsig->param_count + fsig->hasthis;
8693 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8697 g_assert (!mono_method_check_context_used (cmethod));
8701 //g_assert (!virtual_ || fsig->hasthis);
8706 * We have the `constrained.' prefix opcode.
8708 if (constrained_class) {
8709 if (mini_is_gsharedvt_klass (constrained_class)) {
8710 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8711 /* The 'Own method' case below */
8712 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8713 /* 'The type parameter is instantiated as a reference type' case below. */
8715 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8716 CHECK_CFG_EXCEPTION;
8722 if (constrained_partial_call) {
8723 gboolean need_box = TRUE;
8726 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8727 * called method is not known at compile time either. The called method could end up being
8728 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8729 * to box the receiver.
8730 * A simple solution would be to box always and make a normal virtual call, but that would
8731 * be bad performance wise.
8733 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8735 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8740 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8741 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8742 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8743 ins->klass = constrained_class;
8744 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8745 CHECK_CFG_EXCEPTION;
8746 } else if (need_box) {
8748 MonoBasicBlock *is_ref_bb, *end_bb;
8749 MonoInst *nonbox_call;
8752 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8754 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8755 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8757 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8759 NEW_BBLOCK (cfg, is_ref_bb);
8760 NEW_BBLOCK (cfg, end_bb);
8762 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8767 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8772 MONO_START_BB (cfg, is_ref_bb);
8773 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8774 ins->klass = constrained_class;
8775 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8776 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8780 MONO_START_BB (cfg, end_bb);
8783 nonbox_call->dreg = ins->dreg;
8786 g_assert (mono_class_is_interface (cmethod->klass));
8787 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8788 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8791 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8793 * The type parameter is instantiated as a valuetype,
8794 * but that type doesn't override the method we're
8795 * calling, so we need to box `this'.
8797 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8798 ins->klass = constrained_class;
8799 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8800 CHECK_CFG_EXCEPTION;
8801 } else if (!constrained_class->valuetype) {
8802 int dreg = alloc_ireg_ref (cfg);
8805 * The type parameter is instantiated as a reference
8806 * type. We have a managed pointer on the stack, so
8807 * we need to dereference it here.
8809 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8810 ins->type = STACK_OBJ;
8813 if (cmethod->klass->valuetype) {
8816 /* Interface method */
8819 mono_class_setup_vtable (constrained_class);
8820 CHECK_TYPELOAD (constrained_class);
8821 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8823 TYPE_LOAD_ERROR (constrained_class);
8824 slot = mono_method_get_vtable_slot (cmethod);
8826 TYPE_LOAD_ERROR (cmethod->klass);
8827 cmethod = constrained_class->vtable [ioffset + slot];
8829 if (cmethod->klass == mono_defaults.enum_class) {
8830 /* Enum implements some interfaces, so treat this as the first case */
8831 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8832 ins->klass = constrained_class;
8833 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8834 CHECK_CFG_EXCEPTION;
8839 constrained_class = NULL;
8842 if (check_call_signature (cfg, fsig, sp))
8845 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8846 delegate_invoke = TRUE;
8848 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8849 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8850 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8858 * If the callee is a shared method, then its static cctor
8859 * might not get called after the call was patched.
8861 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8862 emit_class_init (cfg, cmethod->klass);
8863 CHECK_TYPELOAD (cmethod->klass);
8866 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8869 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8871 context_used = mini_method_check_context_used (cfg, cmethod);
8873 if (context_used && mono_class_is_interface (cmethod->klass)) {
8874 /* Generic method interface
8875 calls are resolved via a
8876 helper function and don't
8878 if (!cmethod_context || !cmethod_context->method_inst)
8879 pass_imt_from_rgctx = TRUE;
8883 * If a shared method calls another
8884 * shared method then the caller must
8885 * have a generic sharing context
8886 * because the magic trampoline
8887 * requires it. FIXME: We shouldn't
8888 * have to force the vtable/mrgctx
8889 * variable here. Instead there
8890 * should be a flag in the cfg to
8891 * request a generic sharing context.
8894 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8895 mono_get_vtable_var (cfg);
8900 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8902 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8904 CHECK_TYPELOAD (cmethod->klass);
8905 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8910 g_assert (!vtable_arg);
8912 if (!cfg->compile_aot) {
8914 * emit_get_rgctx_method () calls mono_class_vtable () so check
8915 * for type load errors before.
8917 mono_class_setup_vtable (cmethod->klass);
8918 CHECK_TYPELOAD (cmethod->klass);
8921 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8923 /* !marshalbyref is needed to properly handle generic methods + remoting */
8924 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8925 MONO_METHOD_IS_FINAL (cmethod)) &&
8926 !mono_class_is_marshalbyref (cmethod->klass)) {
8933 if (pass_imt_from_rgctx) {
8934 g_assert (!pass_vtable);
8936 imt_arg = emit_get_rgctx_method (cfg, context_used,
8937 cmethod, MONO_RGCTX_INFO_METHOD);
8941 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8943 /* Calling virtual generic methods */
8944 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8945 !(MONO_METHOD_IS_FINAL (cmethod) &&
8946 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8947 fsig->generic_param_count &&
8948 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8950 MonoInst *this_temp, *this_arg_temp, *store;
8951 MonoInst *iargs [4];
8953 g_assert (fsig->is_inflated);
8955 /* Prevent inlining of methods that contain indirect calls */
8956 INLINE_FAILURE ("virtual generic call");
8958 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8959 GSHAREDVT_FAILURE (*ip);
8961 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8962 g_assert (!imt_arg);
8964 g_assert (cmethod->is_inflated);
8965 imt_arg = emit_get_rgctx_method (cfg, context_used,
8966 cmethod, MONO_RGCTX_INFO_METHOD);
8967 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8969 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8970 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8971 MONO_ADD_INS (cfg->cbb, store);
8973 /* FIXME: This should be a managed pointer */
8974 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8976 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8977 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8978 cmethod, MONO_RGCTX_INFO_METHOD);
8979 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8980 addr = mono_emit_jit_icall (cfg,
8981 mono_helper_compile_generic_method, iargs);
8983 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8985 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8992 * Implement a workaround for the inherent races involved in locking:
8998 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8999 * try block, the Exit () won't be executed, see:
9000 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9001 * To work around this, we extend such try blocks to include the last x bytes
9002 * of the Monitor.Enter () call.
9004 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9005 MonoBasicBlock *tbb;
9007 GET_BBLOCK (cfg, tbb, ip + 5);
9009 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9010 * from Monitor.Enter like ArgumentNullException.
9012 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9013 /* Mark this bblock as needing to be extended */
9014 tbb->extend_try_block = TRUE;
9018 /* Conversion to a JIT intrinsic */
9019 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9020 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9021 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9029 if ((cfg->opt & MONO_OPT_INLINE) &&
9030 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9031 mono_method_check_inlining (cfg, cmethod)) {
9033 gboolean always = FALSE;
9035 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9036 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9037 /* Prevent inlining of methods that call wrappers */
9038 INLINE_FAILURE ("wrapper call");
9039 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9043 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9045 cfg->real_offset += 5;
9047 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9048 /* *sp is already set by inline_method */
9053 inline_costs += costs;
9059 /* Tail recursion elimination */
9060 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9061 gboolean has_vtargs = FALSE;
9064 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9065 INLINE_FAILURE ("tail call");
9067 /* keep it simple */
9068 for (i = fsig->param_count - 1; i >= 0; i--) {
9069 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9074 if (need_seq_point) {
9075 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9076 need_seq_point = FALSE;
9078 for (i = 0; i < n; ++i)
9079 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9080 MONO_INST_NEW (cfg, ins, OP_BR);
9081 MONO_ADD_INS (cfg->cbb, ins);
9082 tblock = start_bblock->out_bb [0];
9083 link_bblock (cfg, cfg->cbb, tblock);
9084 ins->inst_target_bb = tblock;
9085 start_new_bblock = 1;
9087 /* skip the CEE_RET, too */
9088 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9095 inline_costs += 10 * num_calls++;
9098 * Synchronized wrappers.
9099 * Its hard to determine where to replace a method with its synchronized
9100 * wrapper without causing an infinite recursion. The current solution is
9101 * to add the synchronized wrapper in the trampolines, and to
9102 * change the called method to a dummy wrapper, and resolve that wrapper
9103 * to the real method in mono_jit_compile_method ().
9105 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9106 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9107 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9108 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9112 * Making generic calls out of gsharedvt methods.
9113 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9114 * patching gshared method addresses into a gsharedvt method.
9116 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9117 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9118 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9119 MonoRgctxInfoType info_type;
9122 //if (mono_class_is_interface (cmethod->klass))
9123 //GSHAREDVT_FAILURE (*ip);
9124 // disable for possible remoting calls
9125 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9126 GSHAREDVT_FAILURE (*ip);
9127 if (fsig->generic_param_count) {
9128 /* virtual generic call */
9129 g_assert (!imt_arg);
9130 /* Same as the virtual generic case above */
9131 imt_arg = emit_get_rgctx_method (cfg, context_used,
9132 cmethod, MONO_RGCTX_INFO_METHOD);
9133 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9135 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9136 /* This can happen when we call a fully instantiated iface method */
9137 imt_arg = emit_get_rgctx_method (cfg, context_used,
9138 cmethod, MONO_RGCTX_INFO_METHOD);
9143 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9144 keep_this_alive = sp [0];
9146 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9147 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9149 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9150 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9152 if (cfg->llvm_only) {
9153 // FIXME: Avoid initializing vtable_arg
9154 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9156 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9161 /* Generic sharing */
9164 * Use this if the callee is gsharedvt sharable too, since
9165 * at runtime we might find an instantiation so the call cannot
9166 * be patched (the 'no_patch' code path in mini-trampolines.c).
9168 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9169 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9170 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9171 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9172 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9173 INLINE_FAILURE ("gshared");
9175 g_assert (cfg->gshared && cmethod);
9179 * We are compiling a call to a
9180 * generic method from shared code,
9181 * which means that we have to look up
9182 * the method in the rgctx and do an
9186 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9188 if (cfg->llvm_only) {
9189 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9190 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9192 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9193 // FIXME: Avoid initializing imt_arg/vtable_arg
9194 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9196 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9197 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9202 /* Direct calls to icalls */
9204 MonoMethod *wrapper;
9207 /* Inline the wrapper */
9208 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9210 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9211 g_assert (costs > 0);
9212 cfg->real_offset += 5;
9214 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9215 /* *sp is already set by inline_method */
9220 inline_costs += costs;
9229 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9230 MonoInst *val = sp [fsig->param_count];
9232 if (val->type == STACK_OBJ) {
9233 MonoInst *iargs [2];
9238 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9241 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9242 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9243 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9244 emit_write_barrier (cfg, addr, val);
9245 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9246 GSHAREDVT_FAILURE (*ip);
9247 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9248 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9250 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9251 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9252 if (!cmethod->klass->element_class->valuetype && !readonly)
9253 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9254 CHECK_TYPELOAD (cmethod->klass);
9257 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9260 g_assert_not_reached ();
9267 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9271 /* Tail prefix / tail call optimization */
9273 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9274 /* FIXME: runtime generic context pointer for jumps? */
9275 /* FIXME: handle this for generic sharing eventually */
9276 if ((ins_flag & MONO_INST_TAILCALL) &&
9277 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9278 supported_tail_call = TRUE;
9280 if (supported_tail_call) {
9283 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9284 INLINE_FAILURE ("tail call");
9286 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9288 if (cfg->backend->have_op_tail_call) {
9289 /* Handle tail calls similarly to normal calls */
9292 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9294 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9295 call->tail_call = TRUE;
9296 call->method = cmethod;
9297 call->signature = mono_method_signature (cmethod);
9300 * We implement tail calls by storing the actual arguments into the
9301 * argument variables, then emitting a CEE_JMP.
9303 for (i = 0; i < n; ++i) {
9304 /* Prevent argument from being register allocated */
9305 arg_array [i]->flags |= MONO_INST_VOLATILE;
9306 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9308 ins = (MonoInst*)call;
9309 ins->inst_p0 = cmethod;
9310 ins->inst_p1 = arg_array [0];
9311 MONO_ADD_INS (cfg->cbb, ins);
9312 link_bblock (cfg, cfg->cbb, end_bblock);
9313 start_new_bblock = 1;
9315 // FIXME: Eliminate unreachable epilogs
9318 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9319 * only reachable from this call.
9321 GET_BBLOCK (cfg, tblock, ip + 5);
9322 if (tblock == cfg->cbb || tblock->in_count == 0)
9331 * Virtual calls in llvm-only mode.
9333 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9334 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9339 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9340 INLINE_FAILURE ("call");
9341 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9342 imt_arg, vtable_arg);
9344 if (tail_call && !cfg->llvm_only) {
9345 link_bblock (cfg, cfg->cbb, end_bblock);
9346 start_new_bblock = 1;
9348 // FIXME: Eliminate unreachable epilogs
9351 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9352 * only reachable from this call.
9354 GET_BBLOCK (cfg, tblock, ip + 5);
9355 if (tblock == cfg->cbb || tblock->in_count == 0)
9362 /* End of call, INS should contain the result of the call, if any */
9364 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9367 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9372 if (keep_this_alive) {
9373 MonoInst *dummy_use;
9375 /* See mono_emit_method_call_full () */
9376 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9379 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9381 * Clang can convert these calls to tail calls which screw up the stack
9382 * walk. This happens even when the -fno-optimize-sibling-calls
9383 * option is passed to clang.
9384 * Work around this by emitting a dummy call.
9386 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9389 CHECK_CFG_EXCEPTION;
9393 g_assert (*ip == CEE_RET);
9397 constrained_class = NULL;
9399 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9403 if (cfg->method != method) {
9404 /* return from inlined method */
9406 * If in_count == 0, that means the ret is unreachable due to
9407 * being preceeded by a throw. In that case, inline_method () will
9408 * handle setting the return value
9409 * (test case: test_0_inline_throw ()).
9411 if (return_var && cfg->cbb->in_count) {
9412 MonoType *ret_type = mono_method_signature (method)->ret;
9418 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9421 //g_assert (returnvar != -1);
9422 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9423 cfg->ret_var_set = TRUE;
9426 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9428 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9432 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9434 if (seq_points && !sym_seq_points) {
9436 * Place a seq point here too even through the IL stack is not
9437 * empty, so a step over on
9440 * will work correctly.
9442 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9443 MONO_ADD_INS (cfg->cbb, ins);
9446 g_assert (!return_var);
9450 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9453 emit_setret (cfg, *sp);
9456 if (sp != stack_start)
9458 MONO_INST_NEW (cfg, ins, OP_BR);
9460 ins->inst_target_bb = end_bblock;
9461 MONO_ADD_INS (cfg->cbb, ins);
9462 link_bblock (cfg, cfg->cbb, end_bblock);
9463 start_new_bblock = 1;
9467 MONO_INST_NEW (cfg, ins, OP_BR);
9469 target = ip + 1 + (signed char)(*ip);
9471 GET_BBLOCK (cfg, tblock, target);
9472 link_bblock (cfg, cfg->cbb, tblock);
9473 ins->inst_target_bb = tblock;
9474 if (sp != stack_start) {
9475 handle_stack_args (cfg, stack_start, sp - stack_start);
9477 CHECK_UNVERIFIABLE (cfg);
9479 MONO_ADD_INS (cfg->cbb, ins);
9480 start_new_bblock = 1;
9481 inline_costs += BRANCH_COST;
9495 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9497 target = ip + 1 + *(signed char*)ip;
9503 inline_costs += BRANCH_COST;
9507 MONO_INST_NEW (cfg, ins, OP_BR);
9510 target = ip + 4 + (gint32)read32(ip);
9512 GET_BBLOCK (cfg, tblock, target);
9513 link_bblock (cfg, cfg->cbb, tblock);
9514 ins->inst_target_bb = tblock;
9515 if (sp != stack_start) {
9516 handle_stack_args (cfg, stack_start, sp - stack_start);
9518 CHECK_UNVERIFIABLE (cfg);
9521 MONO_ADD_INS (cfg->cbb, ins);
9523 start_new_bblock = 1;
9524 inline_costs += BRANCH_COST;
9531 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9532 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9533 guint32 opsize = is_short ? 1 : 4;
9535 CHECK_OPSIZE (opsize);
9537 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9540 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9545 GET_BBLOCK (cfg, tblock, target);
9546 link_bblock (cfg, cfg->cbb, tblock);
9547 GET_BBLOCK (cfg, tblock, ip);
9548 link_bblock (cfg, cfg->cbb, tblock);
9550 if (sp != stack_start) {
9551 handle_stack_args (cfg, stack_start, sp - stack_start);
9552 CHECK_UNVERIFIABLE (cfg);
9555 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9556 cmp->sreg1 = sp [0]->dreg;
9557 type_from_op (cfg, cmp, sp [0], NULL);
9560 #if SIZEOF_REGISTER == 4
9561 if (cmp->opcode == OP_LCOMPARE_IMM) {
9562 /* Convert it to OP_LCOMPARE */
9563 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9564 ins->type = STACK_I8;
9565 ins->dreg = alloc_dreg (cfg, STACK_I8);
9567 MONO_ADD_INS (cfg->cbb, ins);
9568 cmp->opcode = OP_LCOMPARE;
9569 cmp->sreg2 = ins->dreg;
9572 MONO_ADD_INS (cfg->cbb, cmp);
9574 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9575 type_from_op (cfg, ins, sp [0], NULL);
9576 MONO_ADD_INS (cfg->cbb, ins);
9577 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9578 GET_BBLOCK (cfg, tblock, target);
9579 ins->inst_true_bb = tblock;
9580 GET_BBLOCK (cfg, tblock, ip);
9581 ins->inst_false_bb = tblock;
9582 start_new_bblock = 2;
9585 inline_costs += BRANCH_COST;
9600 MONO_INST_NEW (cfg, ins, *ip);
9602 target = ip + 4 + (gint32)read32(ip);
9608 inline_costs += BRANCH_COST;
9612 MonoBasicBlock **targets;
9613 MonoBasicBlock *default_bblock;
9614 MonoJumpInfoBBTable *table;
9615 int offset_reg = alloc_preg (cfg);
9616 int target_reg = alloc_preg (cfg);
9617 int table_reg = alloc_preg (cfg);
9618 int sum_reg = alloc_preg (cfg);
9619 gboolean use_op_switch;
9623 n = read32 (ip + 1);
9626 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9630 CHECK_OPSIZE (n * sizeof (guint32));
9631 target = ip + n * sizeof (guint32);
9633 GET_BBLOCK (cfg, default_bblock, target);
9634 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9636 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9637 for (i = 0; i < n; ++i) {
9638 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9639 targets [i] = tblock;
9640 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9644 if (sp != stack_start) {
9646 * Link the current bb with the targets as well, so handle_stack_args
9647 * will set their in_stack correctly.
9649 link_bblock (cfg, cfg->cbb, default_bblock);
9650 for (i = 0; i < n; ++i)
9651 link_bblock (cfg, cfg->cbb, targets [i]);
9653 handle_stack_args (cfg, stack_start, sp - stack_start);
9655 CHECK_UNVERIFIABLE (cfg);
9657 /* Undo the links */
9658 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9659 for (i = 0; i < n; ++i)
9660 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9666 for (i = 0; i < n; ++i)
9667 link_bblock (cfg, cfg->cbb, targets [i]);
9669 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9670 table->table = targets;
9671 table->table_size = n;
9673 use_op_switch = FALSE;
9675 /* ARM implements SWITCH statements differently */
9676 /* FIXME: Make it use the generic implementation */
9677 if (!cfg->compile_aot)
9678 use_op_switch = TRUE;
9681 if (COMPILE_LLVM (cfg))
9682 use_op_switch = TRUE;
9684 cfg->cbb->has_jump_table = 1;
9686 if (use_op_switch) {
9687 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9688 ins->sreg1 = src1->dreg;
9689 ins->inst_p0 = table;
9690 ins->inst_many_bb = targets;
9691 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9692 MONO_ADD_INS (cfg->cbb, ins);
9694 if (sizeof (gpointer) == 8)
9695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9699 #if SIZEOF_REGISTER == 8
9700 /* The upper word might not be zero, and we add it to a 64 bit address later */
9701 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9704 if (cfg->compile_aot) {
9705 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9707 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9708 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9709 ins->inst_p0 = table;
9710 ins->dreg = table_reg;
9711 MONO_ADD_INS (cfg->cbb, ins);
9714 /* FIXME: Use load_memindex */
9715 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9717 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9719 start_new_bblock = 1;
9720 inline_costs += (BRANCH_COST * 2);
9740 dreg = alloc_freg (cfg);
9743 dreg = alloc_lreg (cfg);
9746 dreg = alloc_ireg_ref (cfg);
9749 dreg = alloc_preg (cfg);
9752 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9753 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9754 if (*ip == CEE_LDIND_R4)
9755 ins->type = cfg->r4_stack_type;
9756 ins->flags |= ins_flag;
9757 MONO_ADD_INS (cfg->cbb, ins);
9759 if (ins_flag & MONO_INST_VOLATILE) {
9760 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9761 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9777 if (ins_flag & MONO_INST_VOLATILE) {
9778 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9779 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9782 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9783 ins->flags |= ins_flag;
9786 MONO_ADD_INS (cfg->cbb, ins);
9788 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9789 emit_write_barrier (cfg, sp [0], sp [1]);
9798 MONO_INST_NEW (cfg, ins, (*ip));
9800 ins->sreg1 = sp [0]->dreg;
9801 ins->sreg2 = sp [1]->dreg;
9802 type_from_op (cfg, ins, sp [0], sp [1]);
9804 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9806 /* Use the immediate opcodes if possible */
9807 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9808 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9809 if (imm_opcode != -1) {
9810 ins->opcode = imm_opcode;
9811 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9814 NULLIFY_INS (sp [1]);
9818 MONO_ADD_INS ((cfg)->cbb, (ins));
9820 *sp++ = mono_decompose_opcode (cfg, ins);
9837 MONO_INST_NEW (cfg, ins, (*ip));
9839 ins->sreg1 = sp [0]->dreg;
9840 ins->sreg2 = sp [1]->dreg;
9841 type_from_op (cfg, ins, sp [0], sp [1]);
9843 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9844 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9846 /* FIXME: Pass opcode to is_inst_imm */
9848 /* Use the immediate opcodes if possible */
9849 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9850 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9851 if (imm_opcode != -1) {
9852 ins->opcode = imm_opcode;
9853 if (sp [1]->opcode == OP_I8CONST) {
9854 #if SIZEOF_REGISTER == 8
9855 ins->inst_imm = sp [1]->inst_l;
9857 ins->inst_ls_word = sp [1]->inst_ls_word;
9858 ins->inst_ms_word = sp [1]->inst_ms_word;
9862 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9865 /* Might be followed by an instruction added by add_widen_op */
9866 if (sp [1]->next == NULL)
9867 NULLIFY_INS (sp [1]);
9870 MONO_ADD_INS ((cfg)->cbb, (ins));
9872 *sp++ = mono_decompose_opcode (cfg, ins);
9885 case CEE_CONV_OVF_I8:
9886 case CEE_CONV_OVF_U8:
9890 /* Special case this earlier so we have long constants in the IR */
9891 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9892 int data = sp [-1]->inst_c0;
9893 sp [-1]->opcode = OP_I8CONST;
9894 sp [-1]->type = STACK_I8;
9895 #if SIZEOF_REGISTER == 8
9896 if ((*ip) == CEE_CONV_U8)
9897 sp [-1]->inst_c0 = (guint32)data;
9899 sp [-1]->inst_c0 = data;
9901 sp [-1]->inst_ls_word = data;
9902 if ((*ip) == CEE_CONV_U8)
9903 sp [-1]->inst_ms_word = 0;
9905 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9907 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9914 case CEE_CONV_OVF_I4:
9915 case CEE_CONV_OVF_I1:
9916 case CEE_CONV_OVF_I2:
9917 case CEE_CONV_OVF_I:
9918 case CEE_CONV_OVF_U:
9921 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9922 ADD_UNOP (CEE_CONV_OVF_I8);
9929 case CEE_CONV_OVF_U1:
9930 case CEE_CONV_OVF_U2:
9931 case CEE_CONV_OVF_U4:
9934 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9935 ADD_UNOP (CEE_CONV_OVF_U8);
9942 case CEE_CONV_OVF_I1_UN:
9943 case CEE_CONV_OVF_I2_UN:
9944 case CEE_CONV_OVF_I4_UN:
9945 case CEE_CONV_OVF_I8_UN:
9946 case CEE_CONV_OVF_U1_UN:
9947 case CEE_CONV_OVF_U2_UN:
9948 case CEE_CONV_OVF_U4_UN:
9949 case CEE_CONV_OVF_U8_UN:
9950 case CEE_CONV_OVF_I_UN:
9951 case CEE_CONV_OVF_U_UN:
9958 CHECK_CFG_EXCEPTION;
9962 case CEE_ADD_OVF_UN:
9964 case CEE_MUL_OVF_UN:
9966 case CEE_SUB_OVF_UN:
9972 GSHAREDVT_FAILURE (*ip);
9975 token = read32 (ip + 1);
9976 klass = mini_get_class (method, token, generic_context);
9977 CHECK_TYPELOAD (klass);
9979 if (generic_class_is_reference_type (cfg, klass)) {
9980 MonoInst *store, *load;
9981 int dreg = alloc_ireg_ref (cfg);
9983 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9984 load->flags |= ins_flag;
9985 MONO_ADD_INS (cfg->cbb, load);
9987 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9988 store->flags |= ins_flag;
9989 MONO_ADD_INS (cfg->cbb, store);
9991 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9992 emit_write_barrier (cfg, sp [0], sp [1]);
9994 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10000 int loc_index = -1;
10006 token = read32 (ip + 1);
10007 klass = mini_get_class (method, token, generic_context);
10008 CHECK_TYPELOAD (klass);
10010 /* Optimize the common ldobj+stloc combination */
10013 loc_index = ip [6];
10020 loc_index = ip [5] - CEE_STLOC_0;
10027 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10028 CHECK_LOCAL (loc_index);
10030 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10031 ins->dreg = cfg->locals [loc_index]->dreg;
10032 ins->flags |= ins_flag;
10035 if (ins_flag & MONO_INST_VOLATILE) {
10036 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10037 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10043 /* Optimize the ldobj+stobj combination */
10044 /* The reference case ends up being a load+store anyway */
10045 /* Skip this if the operation is volatile. */
10046 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10051 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10058 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10059 ins->flags |= ins_flag;
10062 if (ins_flag & MONO_INST_VOLATILE) {
10063 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10064 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10073 CHECK_STACK_OVF (1);
10075 n = read32 (ip + 1);
10077 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10078 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10079 ins->type = STACK_OBJ;
10082 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10083 MonoInst *iargs [1];
10084 char *str = (char *)mono_method_get_wrapper_data (method, n);
10086 if (cfg->compile_aot)
10087 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10089 EMIT_NEW_PCONST (cfg, iargs [0], str);
10090 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10092 if (cfg->opt & MONO_OPT_SHARED) {
10093 MonoInst *iargs [3];
10095 if (cfg->compile_aot) {
10096 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10098 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10099 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10100 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10101 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10102 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10105 if (cfg->cbb->out_of_line) {
10106 MonoInst *iargs [2];
10108 if (image == mono_defaults.corlib) {
10110 * Avoid relocations in AOT and save some space by using a
10111 * version of helper_ldstr specialized to mscorlib.
10113 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10114 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10116 /* Avoid creating the string object */
10117 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10118 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10119 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10123 if (cfg->compile_aot) {
10124 NEW_LDSTRCONST (cfg, ins, image, n);
10126 MONO_ADD_INS (cfg->cbb, ins);
10129 NEW_PCONST (cfg, ins, NULL);
10130 ins->type = STACK_OBJ;
10131 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10135 OUT_OF_MEMORY_FAILURE;
10138 MONO_ADD_INS (cfg->cbb, ins);
10147 MonoInst *iargs [2];
10148 MonoMethodSignature *fsig;
10151 MonoInst *vtable_arg = NULL;
10154 token = read32 (ip + 1);
10155 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10158 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10161 mono_save_token_info (cfg, image, token, cmethod);
10163 if (!mono_class_init (cmethod->klass))
10164 TYPE_LOAD_ERROR (cmethod->klass);
10166 context_used = mini_method_check_context_used (cfg, cmethod);
10168 if (mono_security_core_clr_enabled ())
10169 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10171 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10172 emit_class_init (cfg, cmethod->klass);
10173 CHECK_TYPELOAD (cmethod->klass);
10177 if (cfg->gsharedvt) {
10178 if (mini_is_gsharedvt_variable_signature (sig))
10179 GSHAREDVT_FAILURE (*ip);
10183 n = fsig->param_count;
10187 * Generate smaller code for the common newobj <exception> instruction in
10188 * argument checking code.
10190 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10191 is_exception_class (cmethod->klass) && n <= 2 &&
10192 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10193 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10194 MonoInst *iargs [3];
10198 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10201 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10204 iargs [1] = sp [0];
10205 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10208 iargs [1] = sp [0];
10209 iargs [2] = sp [1];
10210 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10213 g_assert_not_reached ();
10221 /* move the args to allow room for 'this' in the first position */
10227 /* check_call_signature () requires sp[0] to be set */
10228 this_ins.type = STACK_OBJ;
10229 sp [0] = &this_ins;
10230 if (check_call_signature (cfg, fsig, sp))
10235 if (mini_class_is_system_array (cmethod->klass)) {
10236 *sp = emit_get_rgctx_method (cfg, context_used,
10237 cmethod, MONO_RGCTX_INFO_METHOD);
10239 /* Avoid varargs in the common case */
10240 if (fsig->param_count == 1)
10241 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10242 else if (fsig->param_count == 2)
10243 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10244 else if (fsig->param_count == 3)
10245 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10246 else if (fsig->param_count == 4)
10247 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10249 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10250 } else if (cmethod->string_ctor) {
10251 g_assert (!context_used);
10252 g_assert (!vtable_arg);
10253 /* we simply pass a null pointer */
10254 EMIT_NEW_PCONST (cfg, *sp, NULL);
10255 /* now call the string ctor */
10256 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10258 if (cmethod->klass->valuetype) {
10259 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10260 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10261 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10266 * The code generated by mini_emit_virtual_call () expects
10267 * iargs [0] to be a boxed instance, but luckily the vcall
10268 * will be transformed into a normal call there.
10270 } else if (context_used) {
10271 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10274 MonoVTable *vtable = NULL;
10276 if (!cfg->compile_aot)
10277 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10278 CHECK_TYPELOAD (cmethod->klass);
10281 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10282 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10283 * As a workaround, we call class cctors before allocating objects.
10285 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10286 emit_class_init (cfg, cmethod->klass);
10287 if (cfg->verbose_level > 2)
10288 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10289 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10292 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10295 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10298 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10300 /* Now call the actual ctor */
10301 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10302 CHECK_CFG_EXCEPTION;
10305 if (alloc == NULL) {
10307 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10308 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10316 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10317 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10320 case CEE_CASTCLASS:
10325 token = read32 (ip + 1);
10326 klass = mini_get_class (method, token, generic_context);
10327 CHECK_TYPELOAD (klass);
10328 if (sp [0]->type != STACK_OBJ)
10331 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10332 ins->dreg = alloc_preg (cfg);
10333 ins->sreg1 = (*sp)->dreg;
10334 ins->klass = klass;
10335 ins->type = STACK_OBJ;
10336 MONO_ADD_INS (cfg->cbb, ins);
10338 CHECK_CFG_EXCEPTION;
10342 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10345 case CEE_UNBOX_ANY: {
10346 MonoInst *res, *addr;
10351 token = read32 (ip + 1);
10352 klass = mini_get_class (method, token, generic_context);
10353 CHECK_TYPELOAD (klass);
10355 mono_save_token_info (cfg, image, token, klass);
10357 context_used = mini_class_check_context_used (cfg, klass);
10359 if (mini_is_gsharedvt_klass (klass)) {
10360 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10362 } else if (generic_class_is_reference_type (cfg, klass)) {
10363 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10364 EMIT_NEW_PCONST (cfg, res, NULL);
10365 res->type = STACK_OBJ;
10367 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10368 res->dreg = alloc_preg (cfg);
10369 res->sreg1 = (*sp)->dreg;
10370 res->klass = klass;
10371 res->type = STACK_OBJ;
10372 MONO_ADD_INS (cfg->cbb, res);
10373 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10375 } else if (mono_class_is_nullable (klass)) {
10376 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10378 addr = handle_unbox (cfg, klass, sp, context_used);
10380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10391 MonoClass *enum_class;
10392 MonoMethod *has_flag;
10398 token = read32 (ip + 1);
10399 klass = mini_get_class (method, token, generic_context);
10400 CHECK_TYPELOAD (klass);
10402 mono_save_token_info (cfg, image, token, klass);
10404 context_used = mini_class_check_context_used (cfg, klass);
10406 if (generic_class_is_reference_type (cfg, klass)) {
10412 if (klass == mono_defaults.void_class)
10414 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10416 /* frequent check in generic code: box (struct), brtrue */
10421 * <push int/long ptr>
10424 * constrained. MyFlags
10425 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10427 * If we find this sequence and the operand types on box and constrained
10428 * are equal, we can emit a specialized instruction sequence instead of
10429 * the very slow HasFlag () call.
10431 if ((cfg->opt & MONO_OPT_INTRINS) &&
10432 /* Cheap checks first. */
10433 ip + 5 + 6 + 5 < end &&
10434 ip [5] == CEE_PREFIX1 &&
10435 ip [6] == CEE_CONSTRAINED_ &&
10436 ip [11] == CEE_CALLVIRT &&
10437 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10438 mono_class_is_enum (klass) &&
10439 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10440 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10441 has_flag->klass == mono_defaults.enum_class &&
10442 !strcmp (has_flag->name, "HasFlag") &&
10443 has_flag->signature->hasthis &&
10444 has_flag->signature->param_count == 1) {
10445 CHECK_TYPELOAD (enum_class);
10447 if (enum_class == klass) {
10448 MonoInst *enum_this, *enum_flag;
10453 enum_this = sp [0];
10454 enum_flag = sp [1];
10456 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10461 // FIXME: LLVM can't handle the inconsistent bb linking
10462 if (!mono_class_is_nullable (klass) &&
10463 !mini_is_gsharedvt_klass (klass) &&
10464 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10465 (ip [5] == CEE_BRTRUE ||
10466 ip [5] == CEE_BRTRUE_S ||
10467 ip [5] == CEE_BRFALSE ||
10468 ip [5] == CEE_BRFALSE_S)) {
10469 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10471 MonoBasicBlock *true_bb, *false_bb;
10475 if (cfg->verbose_level > 3) {
10476 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10477 printf ("<box+brtrue opt>\n");
10482 case CEE_BRFALSE_S:
10485 target = ip + 1 + (signed char)(*ip);
10492 target = ip + 4 + (gint)(read32 (ip));
10496 g_assert_not_reached ();
10500 * We need to link both bblocks, since it is needed for handling stack
10501 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10502 * Branching to only one of them would lead to inconsistencies, so
10503 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10505 GET_BBLOCK (cfg, true_bb, target);
10506 GET_BBLOCK (cfg, false_bb, ip);
10508 mono_link_bblock (cfg, cfg->cbb, true_bb);
10509 mono_link_bblock (cfg, cfg->cbb, false_bb);
10511 if (sp != stack_start) {
10512 handle_stack_args (cfg, stack_start, sp - stack_start);
10514 CHECK_UNVERIFIABLE (cfg);
10517 if (COMPILE_LLVM (cfg)) {
10518 dreg = alloc_ireg (cfg);
10519 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10522 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10524 /* The JIT can't eliminate the iconst+compare */
10525 MONO_INST_NEW (cfg, ins, OP_BR);
10526 ins->inst_target_bb = is_true ? true_bb : false_bb;
10527 MONO_ADD_INS (cfg->cbb, ins);
10530 start_new_bblock = 1;
10534 *sp++ = handle_box (cfg, val, klass, context_used);
10536 CHECK_CFG_EXCEPTION;
10545 token = read32 (ip + 1);
10546 klass = mini_get_class (method, token, generic_context);
10547 CHECK_TYPELOAD (klass);
10549 mono_save_token_info (cfg, image, token, klass);
10551 context_used = mini_class_check_context_used (cfg, klass);
10553 if (mono_class_is_nullable (klass)) {
10556 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10557 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10561 ins = handle_unbox (cfg, klass, sp, context_used);
10574 MonoClassField *field;
10575 #ifndef DISABLE_REMOTING
10579 gboolean is_instance;
10581 gpointer addr = NULL;
10582 gboolean is_special_static;
10584 MonoInst *store_val = NULL;
10585 MonoInst *thread_ins;
10588 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10590 if (op == CEE_STFLD) {
10593 store_val = sp [1];
10598 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10600 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10603 if (op == CEE_STSFLD) {
10606 store_val = sp [0];
10611 token = read32 (ip + 1);
10612 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10613 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10614 klass = field->parent;
10617 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10620 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10621 FIELD_ACCESS_FAILURE (method, field);
10622 mono_class_init (klass);
10624 /* if the class is Critical then transparent code cannot access it's fields */
10625 if (!is_instance && mono_security_core_clr_enabled ())
10626 ensure_method_is_allowed_to_access_field (cfg, method, field);
10628 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10629 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10630 if (mono_security_core_clr_enabled ())
10631 ensure_method_is_allowed_to_access_field (cfg, method, field);
10634 ftype = mono_field_get_type (field);
10637 * LDFLD etc. is usable on static fields as well, so convert those cases to
10640 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10652 g_assert_not_reached ();
10654 is_instance = FALSE;
10657 context_used = mini_class_check_context_used (cfg, klass);
10659 /* INSTANCE CASE */
10661 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10662 if (op == CEE_STFLD) {
10663 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10665 #ifndef DISABLE_REMOTING
10666 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10667 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10668 MonoInst *iargs [5];
10670 GSHAREDVT_FAILURE (op);
10672 iargs [0] = sp [0];
10673 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10674 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10675 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10677 iargs [4] = sp [1];
10679 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10680 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10681 iargs, ip, cfg->real_offset, TRUE);
10682 CHECK_CFG_EXCEPTION;
10683 g_assert (costs > 0);
10685 cfg->real_offset += 5;
10687 inline_costs += costs;
10689 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10694 MonoInst *store, *wbarrier_ptr_ins = NULL;
10696 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10698 if (ins_flag & MONO_INST_VOLATILE) {
10699 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10700 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10703 if (mini_is_gsharedvt_klass (klass)) {
10704 MonoInst *offset_ins;
10706 context_used = mini_class_check_context_used (cfg, klass);
10708 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10709 /* The value is offset by 1 */
10710 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10711 dreg = alloc_ireg_mp (cfg);
10712 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10713 wbarrier_ptr_ins = ins;
10714 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10715 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10717 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10719 if (sp [0]->opcode != OP_LDADDR)
10720 store->flags |= MONO_INST_FAULT;
10722 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10723 if (mini_is_gsharedvt_klass (klass)) {
10724 g_assert (wbarrier_ptr_ins);
10725 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10727 /* insert call to write barrier */
10731 dreg = alloc_ireg_mp (cfg);
10732 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10733 emit_write_barrier (cfg, ptr, sp [1]);
10737 store->flags |= ins_flag;
10744 #ifndef DISABLE_REMOTING
10745 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10746 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10747 MonoInst *iargs [4];
10749 GSHAREDVT_FAILURE (op);
10751 iargs [0] = sp [0];
10752 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10753 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10754 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10755 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10756 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10757 iargs, ip, cfg->real_offset, TRUE);
10758 CHECK_CFG_EXCEPTION;
10759 g_assert (costs > 0);
10761 cfg->real_offset += 5;
10765 inline_costs += costs;
10767 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10773 if (sp [0]->type == STACK_VTYPE) {
10776 /* Have to compute the address of the variable */
10778 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10780 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10782 g_assert (var->klass == klass);
10784 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10788 if (op == CEE_LDFLDA) {
10789 if (sp [0]->type == STACK_OBJ) {
10790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10791 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10794 dreg = alloc_ireg_mp (cfg);
10796 if (mini_is_gsharedvt_klass (klass)) {
10797 MonoInst *offset_ins;
10799 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10800 /* The value is offset by 1 */
10801 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10802 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10804 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10806 ins->klass = mono_class_from_mono_type (field->type);
10807 ins->type = STACK_MP;
10812 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10814 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10815 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10824 if (mini_is_gsharedvt_klass (klass)) {
10825 MonoInst *offset_ins;
10827 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10828 /* The value is offset by 1 */
10829 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10830 dreg = alloc_ireg_mp (cfg);
10831 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10832 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10836 load->flags |= ins_flag;
10837 if (sp [0]->opcode != OP_LDADDR)
10838 load->flags |= MONO_INST_FAULT;
10850 context_used = mini_class_check_context_used (cfg, klass);
10852 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10853 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10857 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10858 * to be called here.
10860 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10861 mono_class_vtable (cfg->domain, klass);
10862 CHECK_TYPELOAD (klass);
10864 mono_domain_lock (cfg->domain);
10865 if (cfg->domain->special_static_fields)
10866 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10867 mono_domain_unlock (cfg->domain);
10869 is_special_static = mono_class_field_is_special_static (field);
10871 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10872 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10876 /* Generate IR to compute the field address */
10877 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10879 * Fast access to TLS data
10880 * Inline version of get_thread_static_data () in
10884 int idx, static_data_reg, array_reg, dreg;
10886 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10887 GSHAREDVT_FAILURE (op);
10889 static_data_reg = alloc_ireg (cfg);
10890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10892 if (cfg->compile_aot) {
10893 int offset_reg, offset2_reg, idx_reg;
10895 /* For TLS variables, this will return the TLS offset */
10896 EMIT_NEW_SFLDACONST (cfg, ins, field);
10897 offset_reg = ins->dreg;
10898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10899 idx_reg = alloc_ireg (cfg);
10900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10902 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10903 array_reg = alloc_ireg (cfg);
10904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10905 offset2_reg = alloc_ireg (cfg);
10906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10908 dreg = alloc_ireg (cfg);
10909 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10911 offset = (gsize)addr & 0x7fffffff;
10912 idx = offset & 0x3f;
10914 array_reg = alloc_ireg (cfg);
10915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10916 dreg = alloc_ireg (cfg);
10917 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10919 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10920 (cfg->compile_aot && is_special_static) ||
10921 (context_used && is_special_static)) {
10922 MonoInst *iargs [2];
10924 g_assert (field->parent);
10925 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10926 if (context_used) {
10927 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10928 field, MONO_RGCTX_INFO_CLASS_FIELD);
10930 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10932 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10933 } else if (context_used) {
10934 MonoInst *static_data;
10937 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10938 method->klass->name_space, method->klass->name, method->name,
10939 depth, field->offset);
10942 if (mono_class_needs_cctor_run (klass, method))
10943 emit_class_init (cfg, klass);
10946 * The pointer we're computing here is
10948 * super_info.static_data + field->offset
10950 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10951 klass, MONO_RGCTX_INFO_STATIC_DATA);
10953 if (mini_is_gsharedvt_klass (klass)) {
10954 MonoInst *offset_ins;
10956 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10957 /* The value is offset by 1 */
10958 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10959 dreg = alloc_ireg_mp (cfg);
10960 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10961 } else if (field->offset == 0) {
10964 int addr_reg = mono_alloc_preg (cfg);
10965 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10967 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10968 MonoInst *iargs [2];
10970 g_assert (field->parent);
10971 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10972 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10973 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10975 MonoVTable *vtable = NULL;
10977 if (!cfg->compile_aot)
10978 vtable = mono_class_vtable (cfg->domain, klass);
10979 CHECK_TYPELOAD (klass);
10982 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10983 if (!(g_slist_find (class_inits, klass))) {
10984 emit_class_init (cfg, klass);
10985 if (cfg->verbose_level > 2)
10986 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10987 class_inits = g_slist_prepend (class_inits, klass);
10990 if (cfg->run_cctors) {
10991 /* This makes so that inline cannot trigger */
10992 /* .cctors: too many apps depend on them */
10993 /* running with a specific order... */
10995 if (! vtable->initialized)
10996 INLINE_FAILURE ("class init");
10997 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10998 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10999 goto exception_exit;
11003 if (cfg->compile_aot)
11004 EMIT_NEW_SFLDACONST (cfg, ins, field);
11007 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11009 EMIT_NEW_PCONST (cfg, ins, addr);
11012 MonoInst *iargs [1];
11013 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11014 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11018 /* Generate IR to do the actual load/store operation */
11020 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11021 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11022 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11025 if (op == CEE_LDSFLDA) {
11026 ins->klass = mono_class_from_mono_type (ftype);
11027 ins->type = STACK_PTR;
11029 } else if (op == CEE_STSFLD) {
11032 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11033 store->flags |= ins_flag;
11035 gboolean is_const = FALSE;
11036 MonoVTable *vtable = NULL;
11037 gpointer addr = NULL;
11039 if (!context_used) {
11040 vtable = mono_class_vtable (cfg->domain, klass);
11041 CHECK_TYPELOAD (klass);
11043 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11044 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11045 int ro_type = ftype->type;
11047 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11048 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11049 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11052 GSHAREDVT_FAILURE (op);
11054 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11057 case MONO_TYPE_BOOLEAN:
11059 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11063 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11066 case MONO_TYPE_CHAR:
11068 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11072 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11077 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11081 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11086 case MONO_TYPE_PTR:
11087 case MONO_TYPE_FNPTR:
11088 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11089 type_to_eval_stack_type ((cfg), field->type, *sp);
11092 case MONO_TYPE_STRING:
11093 case MONO_TYPE_OBJECT:
11094 case MONO_TYPE_CLASS:
11095 case MONO_TYPE_SZARRAY:
11096 case MONO_TYPE_ARRAY:
11097 if (!mono_gc_is_moving ()) {
11098 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11099 type_to_eval_stack_type ((cfg), field->type, *sp);
11107 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11112 case MONO_TYPE_VALUETYPE:
11122 CHECK_STACK_OVF (1);
11124 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11125 load->flags |= ins_flag;
11131 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11132 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11133 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11144 token = read32 (ip + 1);
11145 klass = mini_get_class (method, token, generic_context);
11146 CHECK_TYPELOAD (klass);
11147 if (ins_flag & MONO_INST_VOLATILE) {
11148 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11149 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11151 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11152 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11153 ins->flags |= ins_flag;
11154 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11155 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11156 /* insert call to write barrier */
11157 emit_write_barrier (cfg, sp [0], sp [1]);
11169 const char *data_ptr;
11171 guint32 field_token;
11177 token = read32 (ip + 1);
11179 klass = mini_get_class (method, token, generic_context);
11180 CHECK_TYPELOAD (klass);
11182 context_used = mini_class_check_context_used (cfg, klass);
11184 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11185 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11186 ins->sreg1 = sp [0]->dreg;
11187 ins->type = STACK_I4;
11188 ins->dreg = alloc_ireg (cfg);
11189 MONO_ADD_INS (cfg->cbb, ins);
11190 *sp = mono_decompose_opcode (cfg, ins);
11193 if (context_used) {
11194 MonoInst *args [3];
11195 MonoClass *array_class = mono_array_class_get (klass, 1);
11196 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11198 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11201 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11202 array_class, MONO_RGCTX_INFO_VTABLE);
11207 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11209 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11211 if (cfg->opt & MONO_OPT_SHARED) {
11212 /* Decompose now to avoid problems with references to the domainvar */
11213 MonoInst *iargs [3];
11215 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11216 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11217 iargs [2] = sp [0];
11219 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11221 /* Decompose later since it is needed by abcrem */
11222 MonoClass *array_type = mono_array_class_get (klass, 1);
11223 mono_class_vtable (cfg->domain, array_type);
11224 CHECK_TYPELOAD (array_type);
11226 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11227 ins->dreg = alloc_ireg_ref (cfg);
11228 ins->sreg1 = sp [0]->dreg;
11229 ins->inst_newa_class = klass;
11230 ins->type = STACK_OBJ;
11231 ins->klass = array_type;
11232 MONO_ADD_INS (cfg->cbb, ins);
11233 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11234 cfg->cbb->has_array_access = TRUE;
11236 /* Needed so mono_emit_load_get_addr () gets called */
11237 mono_get_got_var (cfg);
11247 * we inline/optimize the initialization sequence if possible.
11248 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11249 * for small sizes open code the memcpy
11250 * ensure the rva field is big enough
11252 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11253 MonoMethod *memcpy_method = get_memcpy_method ();
11254 MonoInst *iargs [3];
11255 int add_reg = alloc_ireg_mp (cfg);
11257 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11258 if (cfg->compile_aot) {
11259 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11261 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11263 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11264 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11273 if (sp [0]->type != STACK_OBJ)
11276 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11277 ins->dreg = alloc_preg (cfg);
11278 ins->sreg1 = sp [0]->dreg;
11279 ins->type = STACK_I4;
11280 /* This flag will be inherited by the decomposition */
11281 ins->flags |= MONO_INST_FAULT;
11282 MONO_ADD_INS (cfg->cbb, ins);
11283 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11284 cfg->cbb->has_array_access = TRUE;
11292 if (sp [0]->type != STACK_OBJ)
11295 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11297 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11298 CHECK_TYPELOAD (klass);
11299 /* we need to make sure that this array is exactly the type it needs
11300 * to be for correctness. the wrappers are lax with their usage
11301 * so we need to ignore them here
11303 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11304 MonoClass *array_class = mono_array_class_get (klass, 1);
11305 mini_emit_check_array_type (cfg, sp [0], array_class);
11306 CHECK_TYPELOAD (array_class);
11310 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11315 case CEE_LDELEM_I1:
11316 case CEE_LDELEM_U1:
11317 case CEE_LDELEM_I2:
11318 case CEE_LDELEM_U2:
11319 case CEE_LDELEM_I4:
11320 case CEE_LDELEM_U4:
11321 case CEE_LDELEM_I8:
11323 case CEE_LDELEM_R4:
11324 case CEE_LDELEM_R8:
11325 case CEE_LDELEM_REF: {
11331 if (*ip == CEE_LDELEM) {
11333 token = read32 (ip + 1);
11334 klass = mini_get_class (method, token, generic_context);
11335 CHECK_TYPELOAD (klass);
11336 mono_class_init (klass);
11339 klass = array_access_to_klass (*ip);
11341 if (sp [0]->type != STACK_OBJ)
11344 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11346 if (mini_is_gsharedvt_variable_klass (klass)) {
11347 // FIXME-VT: OP_ICONST optimization
11348 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11349 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11350 ins->opcode = OP_LOADV_MEMBASE;
11351 } else if (sp [1]->opcode == OP_ICONST) {
11352 int array_reg = sp [0]->dreg;
11353 int index_reg = sp [1]->dreg;
11354 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11356 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11357 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11359 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11360 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11362 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11363 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11366 if (*ip == CEE_LDELEM)
11373 case CEE_STELEM_I1:
11374 case CEE_STELEM_I2:
11375 case CEE_STELEM_I4:
11376 case CEE_STELEM_I8:
11377 case CEE_STELEM_R4:
11378 case CEE_STELEM_R8:
11379 case CEE_STELEM_REF:
11384 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11386 if (*ip == CEE_STELEM) {
11388 token = read32 (ip + 1);
11389 klass = mini_get_class (method, token, generic_context);
11390 CHECK_TYPELOAD (klass);
11391 mono_class_init (klass);
11394 klass = array_access_to_klass (*ip);
11396 if (sp [0]->type != STACK_OBJ)
11399 emit_array_store (cfg, klass, sp, TRUE);
11401 if (*ip == CEE_STELEM)
11408 case CEE_CKFINITE: {
11412 if (cfg->llvm_only) {
11413 MonoInst *iargs [1];
11415 iargs [0] = sp [0];
11416 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11418 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11419 ins->sreg1 = sp [0]->dreg;
11420 ins->dreg = alloc_freg (cfg);
11421 ins->type = STACK_R8;
11422 MONO_ADD_INS (cfg->cbb, ins);
11424 *sp++ = mono_decompose_opcode (cfg, ins);
11430 case CEE_REFANYVAL: {
11431 MonoInst *src_var, *src;
11433 int klass_reg = alloc_preg (cfg);
11434 int dreg = alloc_preg (cfg);
11436 GSHAREDVT_FAILURE (*ip);
11439 MONO_INST_NEW (cfg, ins, *ip);
11442 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11443 CHECK_TYPELOAD (klass);
11445 context_used = mini_class_check_context_used (cfg, klass);
11448 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11450 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11451 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11454 if (context_used) {
11455 MonoInst *klass_ins;
11457 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11458 klass, MONO_RGCTX_INFO_KLASS);
11461 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11462 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11464 mini_emit_class_check (cfg, klass_reg, klass);
11466 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11467 ins->type = STACK_MP;
11468 ins->klass = klass;
11473 case CEE_MKREFANY: {
11474 MonoInst *loc, *addr;
11476 GSHAREDVT_FAILURE (*ip);
11479 MONO_INST_NEW (cfg, ins, *ip);
11482 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11483 CHECK_TYPELOAD (klass);
11485 context_used = mini_class_check_context_used (cfg, klass);
11487 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11488 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11490 if (context_used) {
11491 MonoInst *const_ins;
11492 int type_reg = alloc_preg (cfg);
11494 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11495 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11497 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11499 int const_reg = alloc_preg (cfg);
11500 int type_reg = alloc_preg (cfg);
11502 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11503 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11507 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11509 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11510 ins->type = STACK_VTYPE;
11511 ins->klass = mono_defaults.typed_reference_class;
11516 case CEE_LDTOKEN: {
11518 MonoClass *handle_class;
11520 CHECK_STACK_OVF (1);
11523 n = read32 (ip + 1);
11525 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11526 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11527 handle = mono_method_get_wrapper_data (method, n);
11528 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11529 if (handle_class == mono_defaults.typehandle_class)
11530 handle = &((MonoClass*)handle)->byval_arg;
11533 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11538 mono_class_init (handle_class);
11539 if (cfg->gshared) {
11540 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11541 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11542 /* This case handles ldtoken
11543 of an open type, like for
11546 } else if (handle_class == mono_defaults.typehandle_class) {
11547 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11548 } else if (handle_class == mono_defaults.fieldhandle_class)
11549 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11550 else if (handle_class == mono_defaults.methodhandle_class)
11551 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11553 g_assert_not_reached ();
11556 if ((cfg->opt & MONO_OPT_SHARED) &&
11557 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11558 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11559 MonoInst *addr, *vtvar, *iargs [3];
11560 int method_context_used;
11562 method_context_used = mini_method_check_context_used (cfg, method);
11564 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11566 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11567 EMIT_NEW_ICONST (cfg, iargs [1], n);
11568 if (method_context_used) {
11569 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11570 method, MONO_RGCTX_INFO_METHOD);
11571 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11573 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11574 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11576 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11578 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11580 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11582 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11583 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11584 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11585 (cmethod->klass == mono_defaults.systemtype_class) &&
11586 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11587 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11589 mono_class_init (tclass);
11590 if (context_used) {
11591 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11592 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11593 } else if (cfg->compile_aot) {
11594 if (method->wrapper_type) {
11595 mono_error_init (&error); //got to do it since there are multiple conditionals below
11596 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11597 /* Special case for static synchronized wrappers */
11598 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11600 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11601 /* FIXME: n is not a normal token */
11603 EMIT_NEW_PCONST (cfg, ins, NULL);
11606 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11609 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11611 EMIT_NEW_PCONST (cfg, ins, rt);
11613 ins->type = STACK_OBJ;
11614 ins->klass = cmethod->klass;
11617 MonoInst *addr, *vtvar;
11619 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11621 if (context_used) {
11622 if (handle_class == mono_defaults.typehandle_class) {
11623 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11624 mono_class_from_mono_type ((MonoType *)handle),
11625 MONO_RGCTX_INFO_TYPE);
11626 } else if (handle_class == mono_defaults.methodhandle_class) {
11627 ins = emit_get_rgctx_method (cfg, context_used,
11628 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11629 } else if (handle_class == mono_defaults.fieldhandle_class) {
11630 ins = emit_get_rgctx_field (cfg, context_used,
11631 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11633 g_assert_not_reached ();
11635 } else if (cfg->compile_aot) {
11636 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11638 EMIT_NEW_PCONST (cfg, ins, handle);
11640 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11641 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11642 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11652 if (sp [-1]->type != STACK_OBJ)
11655 MONO_INST_NEW (cfg, ins, OP_THROW);
11657 ins->sreg1 = sp [0]->dreg;
11659 cfg->cbb->out_of_line = TRUE;
11660 MONO_ADD_INS (cfg->cbb, ins);
11661 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11662 MONO_ADD_INS (cfg->cbb, ins);
11665 link_bblock (cfg, cfg->cbb, end_bblock);
11666 start_new_bblock = 1;
11667 /* This can complicate code generation for llvm since the return value might not be defined */
11668 if (COMPILE_LLVM (cfg))
11669 INLINE_FAILURE ("throw");
11671 case CEE_ENDFINALLY:
11672 if (!ip_in_finally_clause (cfg, ip - header->code))
11674 /* mono_save_seq_point_info () depends on this */
11675 if (sp != stack_start)
11676 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11677 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11678 MONO_ADD_INS (cfg->cbb, ins);
11680 start_new_bblock = 1;
11683 * Control will leave the method so empty the stack, otherwise
11684 * the next basic block will start with a nonempty stack.
11686 while (sp != stack_start) {
11691 case CEE_LEAVE_S: {
11694 if (*ip == CEE_LEAVE) {
11696 target = ip + 5 + (gint32)read32(ip + 1);
11699 target = ip + 2 + (signed char)(ip [1]);
11702 /* empty the stack */
11703 while (sp != stack_start) {
11708 * If this leave statement is in a catch block, check for a
11709 * pending exception, and rethrow it if necessary.
11710 * We avoid doing this in runtime invoke wrappers, since those are called
11711 * by native code which excepts the wrapper to catch all exceptions.
11713 for (i = 0; i < header->num_clauses; ++i) {
11714 MonoExceptionClause *clause = &header->clauses [i];
11717 * Use <= in the final comparison to handle clauses with multiple
11718 * leave statements, like in bug #78024.
11719 * The ordering of the exception clauses guarantees that we find the
11720 * innermost clause.
11722 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11724 MonoBasicBlock *dont_throw;
11729 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11732 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11734 NEW_BBLOCK (cfg, dont_throw);
11737 * Currently, we always rethrow the abort exception, despite the
11738 * fact that this is not correct. See thread6.cs for an example.
11739 * But propagating the abort exception is more important than
11740 * getting the sematics right.
11742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11744 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11746 MONO_START_BB (cfg, dont_throw);
11751 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11754 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11756 MonoExceptionClause *clause;
11758 for (tmp = handlers; tmp; tmp = tmp->next) {
11759 clause = (MonoExceptionClause *)tmp->data;
11760 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11762 link_bblock (cfg, cfg->cbb, tblock);
11763 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11764 ins->inst_target_bb = tblock;
11765 ins->inst_eh_block = clause;
11766 MONO_ADD_INS (cfg->cbb, ins);
11767 cfg->cbb->has_call_handler = 1;
11768 if (COMPILE_LLVM (cfg)) {
11769 MonoBasicBlock *target_bb;
11772 * Link the finally bblock with the target, since it will
11773 * conceptually branch there.
11775 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11776 GET_BBLOCK (cfg, target_bb, target);
11777 link_bblock (cfg, tblock, target_bb);
11780 g_list_free (handlers);
11783 MONO_INST_NEW (cfg, ins, OP_BR);
11784 MONO_ADD_INS (cfg->cbb, ins);
11785 GET_BBLOCK (cfg, tblock, target);
11786 link_bblock (cfg, cfg->cbb, tblock);
11787 ins->inst_target_bb = tblock;
11789 start_new_bblock = 1;
11791 if (*ip == CEE_LEAVE)
11800 * Mono specific opcodes
11802 case MONO_CUSTOM_PREFIX: {
11804 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11808 case CEE_MONO_ICALL: {
11810 MonoJitICallInfo *info;
11812 token = read32 (ip + 2);
11813 func = mono_method_get_wrapper_data (method, token);
11814 info = mono_find_jit_icall_by_addr (func);
11816 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11819 CHECK_STACK (info->sig->param_count);
11820 sp -= info->sig->param_count;
11822 ins = mono_emit_jit_icall (cfg, info->func, sp);
11823 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11827 inline_costs += 10 * num_calls++;
11831 case CEE_MONO_LDPTR_CARD_TABLE:
11832 case CEE_MONO_LDPTR_NURSERY_START:
11833 case CEE_MONO_LDPTR_NURSERY_BITS:
11834 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11835 CHECK_STACK_OVF (1);
11838 case CEE_MONO_LDPTR_CARD_TABLE:
11839 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11841 case CEE_MONO_LDPTR_NURSERY_START:
11842 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11844 case CEE_MONO_LDPTR_NURSERY_BITS:
11845 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11847 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11848 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11854 inline_costs += 10 * num_calls++;
11857 case CEE_MONO_LDPTR: {
11860 CHECK_STACK_OVF (1);
11862 token = read32 (ip + 2);
11864 ptr = mono_method_get_wrapper_data (method, token);
11865 EMIT_NEW_PCONST (cfg, ins, ptr);
11868 inline_costs += 10 * num_calls++;
11869 /* Can't embed random pointers into AOT code */
11873 case CEE_MONO_JIT_ICALL_ADDR: {
11874 MonoJitICallInfo *callinfo;
11877 CHECK_STACK_OVF (1);
11879 token = read32 (ip + 2);
11881 ptr = mono_method_get_wrapper_data (method, token);
11882 callinfo = mono_find_jit_icall_by_addr (ptr);
11883 g_assert (callinfo);
11884 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11887 inline_costs += 10 * num_calls++;
11890 case CEE_MONO_ICALL_ADDR: {
11891 MonoMethod *cmethod;
11894 CHECK_STACK_OVF (1);
11896 token = read32 (ip + 2);
11898 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11900 if (cfg->compile_aot) {
11901 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11903 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11904 * before the call, its not needed when using direct pinvoke.
11905 * This is not an optimization, but its used to avoid looking up pinvokes
11906 * on platforms which don't support dlopen ().
11908 EMIT_NEW_PCONST (cfg, ins, NULL);
11910 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11913 ptr = mono_lookup_internal_call (cmethod);
11915 EMIT_NEW_PCONST (cfg, ins, ptr);
11921 case CEE_MONO_VTADDR: {
11922 MonoInst *src_var, *src;
11928 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11929 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11934 case CEE_MONO_NEWOBJ: {
11935 MonoInst *iargs [2];
11937 CHECK_STACK_OVF (1);
11939 token = read32 (ip + 2);
11940 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11941 mono_class_init (klass);
11942 NEW_DOMAINCONST (cfg, iargs [0]);
11943 MONO_ADD_INS (cfg->cbb, iargs [0]);
11944 NEW_CLASSCONST (cfg, iargs [1], klass);
11945 MONO_ADD_INS (cfg->cbb, iargs [1]);
11946 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11948 inline_costs += 10 * num_calls++;
11951 case CEE_MONO_OBJADDR:
11954 MONO_INST_NEW (cfg, ins, OP_MOVE);
11955 ins->dreg = alloc_ireg_mp (cfg);
11956 ins->sreg1 = sp [0]->dreg;
11957 ins->type = STACK_MP;
11958 MONO_ADD_INS (cfg->cbb, ins);
11962 case CEE_MONO_LDNATIVEOBJ:
11964 * Similar to LDOBJ, but instead load the unmanaged
11965 * representation of the vtype to the stack.
11970 token = read32 (ip + 2);
11971 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11972 g_assert (klass->valuetype);
11973 mono_class_init (klass);
11976 MonoInst *src, *dest, *temp;
11979 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11980 temp->backend.is_pinvoke = 1;
11981 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11982 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11984 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11985 dest->type = STACK_VTYPE;
11986 dest->klass = klass;
11992 case CEE_MONO_RETOBJ: {
11994 * Same as RET, but return the native representation of a vtype
11997 g_assert (cfg->ret);
11998 g_assert (mono_method_signature (method)->pinvoke);
12003 token = read32 (ip + 2);
12004 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12006 if (!cfg->vret_addr) {
12007 g_assert (cfg->ret_var_is_local);
12009 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12011 EMIT_NEW_RETLOADA (cfg, ins);
12013 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12015 if (sp != stack_start)
12018 MONO_INST_NEW (cfg, ins, OP_BR);
12019 ins->inst_target_bb = end_bblock;
12020 MONO_ADD_INS (cfg->cbb, ins);
12021 link_bblock (cfg, cfg->cbb, end_bblock);
12022 start_new_bblock = 1;
12026 case CEE_MONO_SAVE_LMF:
12027 case CEE_MONO_RESTORE_LMF:
12030 case CEE_MONO_CLASSCONST:
12031 CHECK_STACK_OVF (1);
12033 token = read32 (ip + 2);
12034 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12037 inline_costs += 10 * num_calls++;
12039 case CEE_MONO_NOT_TAKEN:
12040 cfg->cbb->out_of_line = TRUE;
12043 case CEE_MONO_TLS: {
12046 CHECK_STACK_OVF (1);
12048 key = (MonoTlsKey)read32 (ip + 2);
12049 g_assert (key < TLS_KEY_NUM);
12051 ins = mono_create_tls_get (cfg, key);
12053 ins->type = STACK_PTR;
12058 case CEE_MONO_DYN_CALL: {
12059 MonoCallInst *call;
12061 /* It would be easier to call a trampoline, but that would put an
12062 * extra frame on the stack, confusing exception handling. So
12063 * implement it inline using an opcode for now.
12066 if (!cfg->dyn_call_var) {
12067 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12068 /* prevent it from being register allocated */
12069 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12072 /* Has to use a call inst since it local regalloc expects it */
12073 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12074 ins = (MonoInst*)call;
12076 ins->sreg1 = sp [0]->dreg;
12077 ins->sreg2 = sp [1]->dreg;
12078 MONO_ADD_INS (cfg->cbb, ins);
12080 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12083 inline_costs += 10 * num_calls++;
12087 case CEE_MONO_MEMORY_BARRIER: {
12089 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12093 case CEE_MONO_ATOMIC_STORE_I4: {
12094 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12100 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12101 ins->dreg = sp [0]->dreg;
12102 ins->sreg1 = sp [1]->dreg;
12103 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12104 MONO_ADD_INS (cfg->cbb, ins);
12109 case CEE_MONO_JIT_ATTACH: {
12110 MonoInst *args [16], *domain_ins;
12111 MonoInst *ad_ins, *jit_tls_ins;
12112 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12114 g_assert (!mono_threads_is_coop_enabled ());
12116 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12118 EMIT_NEW_PCONST (cfg, ins, NULL);
12119 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12121 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12122 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12124 if (ad_ins && jit_tls_ins) {
12125 NEW_BBLOCK (cfg, next_bb);
12126 NEW_BBLOCK (cfg, call_bb);
12128 if (cfg->compile_aot) {
12129 /* AOT code is only used in the root domain */
12130 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12132 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12134 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12141 MONO_START_BB (cfg, call_bb);
12144 /* AOT code is only used in the root domain */
12145 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12146 if (cfg->compile_aot) {
12150 * This is called on unattached threads, so it cannot go through the trampoline
12151 * infrastructure. Use an indirect call through a got slot initialized at load time
12154 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12155 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12157 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12159 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12162 MONO_START_BB (cfg, next_bb);
12167 case CEE_MONO_JIT_DETACH: {
12168 MonoInst *args [16];
12170 /* Restore the original domain */
12171 dreg = alloc_ireg (cfg);
12172 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12173 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12177 case CEE_MONO_CALLI_EXTRA_ARG: {
12179 MonoMethodSignature *fsig;
12183 * This is the same as CEE_CALLI, but passes an additional argument
12184 * to the called method in llvmonly mode.
12185 * This is only used by delegate invoke wrappers to call the
12186 * actual delegate method.
12188 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12191 token = read32 (ip + 2);
12199 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12202 if (cfg->llvm_only)
12203 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12205 n = fsig->param_count + fsig->hasthis + 1;
12212 if (cfg->llvm_only) {
12214 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12215 * cconv. This is set by mono_init_delegate ().
12217 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12218 MonoInst *callee = addr;
12219 MonoInst *call, *localloc_ins;
12220 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12221 int low_bit_reg = alloc_preg (cfg);
12223 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12224 NEW_BBLOCK (cfg, end_bb);
12226 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12230 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12231 addr = emit_get_rgctx_sig (cfg, context_used,
12232 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12234 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12236 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12237 ins->dreg = alloc_preg (cfg);
12238 ins->inst_imm = 2 * SIZEOF_VOID_P;
12239 MONO_ADD_INS (cfg->cbb, ins);
12240 localloc_ins = ins;
12241 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12242 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12245 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12246 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12248 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12249 MONO_START_BB (cfg, is_gsharedvt_bb);
12250 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12251 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12252 ins->dreg = call->dreg;
12254 MONO_START_BB (cfg, end_bb);
12256 /* Caller uses a normal calling conv */
12258 MonoInst *callee = addr;
12259 MonoInst *call, *localloc_ins;
12260 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12261 int low_bit_reg = alloc_preg (cfg);
12263 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12264 NEW_BBLOCK (cfg, end_bb);
12266 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12267 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12268 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12270 /* Normal case: callee uses a normal cconv, no conversion is needed */
12271 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12273 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12274 MONO_START_BB (cfg, is_gsharedvt_bb);
12275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12276 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12277 MONO_ADD_INS (cfg->cbb, addr);
12279 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12281 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12282 ins->dreg = alloc_preg (cfg);
12283 ins->inst_imm = 2 * SIZEOF_VOID_P;
12284 MONO_ADD_INS (cfg->cbb, ins);
12285 localloc_ins = ins;
12286 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12290 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12291 ins->dreg = call->dreg;
12292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12294 MONO_START_BB (cfg, end_bb);
12297 /* Same as CEE_CALLI */
12298 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12300 * We pass the address to the gsharedvt trampoline in the rgctx reg
12302 MonoInst *callee = addr;
12304 addr = emit_get_rgctx_sig (cfg, context_used,
12305 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12306 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12308 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12312 if (!MONO_TYPE_IS_VOID (fsig->ret))
12313 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12315 CHECK_CFG_EXCEPTION;
12319 constrained_class = NULL;
12322 case CEE_MONO_LDDOMAIN:
12323 CHECK_STACK_OVF (1);
12324 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12328 case CEE_MONO_GET_LAST_ERROR:
12330 CHECK_STACK_OVF (1);
12332 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12333 ins->dreg = alloc_dreg (cfg, STACK_I4);
12334 ins->type = STACK_I4;
12335 MONO_ADD_INS (cfg->cbb, ins);
12341 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12347 case CEE_PREFIX1: {
12350 case CEE_ARGLIST: {
12351 /* somewhat similar to LDTOKEN */
12352 MonoInst *addr, *vtvar;
12353 CHECK_STACK_OVF (1);
12354 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12356 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12357 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12359 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12360 ins->type = STACK_VTYPE;
12361 ins->klass = mono_defaults.argumenthandle_class;
12371 MonoInst *cmp, *arg1, *arg2;
12379 * The following transforms:
12380 * CEE_CEQ into OP_CEQ
12381 * CEE_CGT into OP_CGT
12382 * CEE_CGT_UN into OP_CGT_UN
12383 * CEE_CLT into OP_CLT
12384 * CEE_CLT_UN into OP_CLT_UN
12386 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12388 MONO_INST_NEW (cfg, ins, cmp->opcode);
12389 cmp->sreg1 = arg1->dreg;
12390 cmp->sreg2 = arg2->dreg;
12391 type_from_op (cfg, cmp, arg1, arg2);
12393 add_widen_op (cfg, cmp, &arg1, &arg2);
12394 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12395 cmp->opcode = OP_LCOMPARE;
12396 else if (arg1->type == STACK_R4)
12397 cmp->opcode = OP_RCOMPARE;
12398 else if (arg1->type == STACK_R8)
12399 cmp->opcode = OP_FCOMPARE;
12401 cmp->opcode = OP_ICOMPARE;
12402 MONO_ADD_INS (cfg->cbb, cmp);
12403 ins->type = STACK_I4;
12404 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12405 type_from_op (cfg, ins, arg1, arg2);
12407 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12409 * The backends expect the fceq opcodes to do the
12412 ins->sreg1 = cmp->sreg1;
12413 ins->sreg2 = cmp->sreg2;
12416 MONO_ADD_INS (cfg->cbb, ins);
12422 MonoInst *argconst;
12423 MonoMethod *cil_method;
12425 CHECK_STACK_OVF (1);
12427 n = read32 (ip + 2);
12428 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12431 mono_class_init (cmethod->klass);
12433 mono_save_token_info (cfg, image, n, cmethod);
12435 context_used = mini_method_check_context_used (cfg, cmethod);
12437 cil_method = cmethod;
12438 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12439 emit_method_access_failure (cfg, method, cil_method);
12441 if (mono_security_core_clr_enabled ())
12442 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12445 * Optimize the common case of ldftn+delegate creation
12447 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12448 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12449 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12450 MonoInst *target_ins, *handle_ins;
12451 MonoMethod *invoke;
12452 int invoke_context_used;
12454 invoke = mono_get_delegate_invoke (ctor_method->klass);
12455 if (!invoke || !mono_method_signature (invoke))
12458 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12460 target_ins = sp [-1];
12462 if (mono_security_core_clr_enabled ())
12463 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12465 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12466 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12467 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12469 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12473 /* FIXME: SGEN support */
12474 if (invoke_context_used == 0 || cfg->llvm_only) {
12476 if (cfg->verbose_level > 3)
12477 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12478 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12481 CHECK_CFG_EXCEPTION;
12491 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12492 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12496 inline_costs += 10 * num_calls++;
12499 case CEE_LDVIRTFTN: {
12500 MonoInst *args [2];
12504 n = read32 (ip + 2);
12505 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12508 mono_class_init (cmethod->klass);
12510 context_used = mini_method_check_context_used (cfg, cmethod);
12512 if (mono_security_core_clr_enabled ())
12513 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12516 * Optimize the common case of ldvirtftn+delegate creation
12518 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12519 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12520 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12521 MonoInst *target_ins, *handle_ins;
12522 MonoMethod *invoke;
12523 int invoke_context_used;
12524 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12526 invoke = mono_get_delegate_invoke (ctor_method->klass);
12527 if (!invoke || !mono_method_signature (invoke))
12530 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12532 target_ins = sp [-1];
12534 if (mono_security_core_clr_enabled ())
12535 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12537 /* FIXME: SGEN support */
12538 if (invoke_context_used == 0 || cfg->llvm_only) {
12540 if (cfg->verbose_level > 3)
12541 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12542 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12545 CHECK_CFG_EXCEPTION;
12558 args [1] = emit_get_rgctx_method (cfg, context_used,
12559 cmethod, MONO_RGCTX_INFO_METHOD);
12562 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12564 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12567 inline_costs += 10 * num_calls++;
12571 CHECK_STACK_OVF (1);
12573 n = read16 (ip + 2);
12575 EMIT_NEW_ARGLOAD (cfg, ins, n);
12580 CHECK_STACK_OVF (1);
12582 n = read16 (ip + 2);
12584 NEW_ARGLOADA (cfg, ins, n);
12585 MONO_ADD_INS (cfg->cbb, ins);
12593 n = read16 (ip + 2);
12595 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12597 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12601 CHECK_STACK_OVF (1);
12603 n = read16 (ip + 2);
12605 EMIT_NEW_LOCLOAD (cfg, ins, n);
12610 unsigned char *tmp_ip;
12611 CHECK_STACK_OVF (1);
12613 n = read16 (ip + 2);
12616 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12622 EMIT_NEW_LOCLOADA (cfg, ins, n);
12631 n = read16 (ip + 2);
12633 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12635 emit_stloc_ir (cfg, sp, header, n);
12639 case CEE_LOCALLOC: {
12641 MonoBasicBlock *non_zero_bb, *end_bb;
12642 int alloc_ptr = alloc_preg (cfg);
12644 if (sp != stack_start)
12646 if (cfg->method != method)
12648 * Inlining this into a loop in a parent could lead to
12649 * stack overflows which is different behavior than the
12650 * non-inlined case, thus disable inlining in this case.
12652 INLINE_FAILURE("localloc");
12654 NEW_BBLOCK (cfg, non_zero_bb);
12655 NEW_BBLOCK (cfg, end_bb);
12657 /* if size != zero */
12658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12661 //size is zero, so result is NULL
12662 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12663 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12665 MONO_START_BB (cfg, non_zero_bb);
12666 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12667 ins->dreg = alloc_ptr;
12668 ins->sreg1 = sp [0]->dreg;
12669 ins->type = STACK_PTR;
12670 MONO_ADD_INS (cfg->cbb, ins);
12672 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12674 ins->flags |= MONO_INST_INIT;
12676 MONO_START_BB (cfg, end_bb);
12677 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12678 ins->type = STACK_PTR;
12684 case CEE_ENDFILTER: {
12685 MonoExceptionClause *clause, *nearest;
12690 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12692 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12693 ins->sreg1 = (*sp)->dreg;
12694 MONO_ADD_INS (cfg->cbb, ins);
12695 start_new_bblock = 1;
12699 for (cc = 0; cc < header->num_clauses; ++cc) {
12700 clause = &header->clauses [cc];
12701 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12702 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12703 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12706 g_assert (nearest);
12707 if ((ip - header->code) != nearest->handler_offset)
12712 case CEE_UNALIGNED_:
12713 ins_flag |= MONO_INST_UNALIGNED;
12714 /* FIXME: record alignment? we can assume 1 for now */
12718 case CEE_VOLATILE_:
12719 ins_flag |= MONO_INST_VOLATILE;
12723 ins_flag |= MONO_INST_TAILCALL;
12724 cfg->flags |= MONO_CFG_HAS_TAIL;
12725 /* Can't inline tail calls at this time */
12726 inline_costs += 100000;
12733 token = read32 (ip + 2);
12734 klass = mini_get_class (method, token, generic_context);
12735 CHECK_TYPELOAD (klass);
12736 if (generic_class_is_reference_type (cfg, klass))
12737 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12739 mini_emit_initobj (cfg, *sp, NULL, klass);
12743 case CEE_CONSTRAINED_:
12745 token = read32 (ip + 2);
12746 constrained_class = mini_get_class (method, token, generic_context);
12747 CHECK_TYPELOAD (constrained_class);
12751 case CEE_INITBLK: {
12752 MonoInst *iargs [3];
12756 /* Skip optimized paths for volatile operations. */
12757 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12758 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12759 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12760 /* emit_memset only works when val == 0 */
12761 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12764 iargs [0] = sp [0];
12765 iargs [1] = sp [1];
12766 iargs [2] = sp [2];
12767 if (ip [1] == CEE_CPBLK) {
12769 * FIXME: It's unclear whether we should be emitting both the acquire
12770 * and release barriers for cpblk. It is technically both a load and
12771 * store operation, so it seems like that's the sensible thing to do.
12773 * FIXME: We emit full barriers on both sides of the operation for
12774 * simplicity. We should have a separate atomic memcpy method instead.
12776 MonoMethod *memcpy_method = get_memcpy_method ();
12778 if (ins_flag & MONO_INST_VOLATILE)
12779 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12781 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12782 call->flags |= ins_flag;
12784 if (ins_flag & MONO_INST_VOLATILE)
12785 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12787 MonoMethod *memset_method = get_memset_method ();
12788 if (ins_flag & MONO_INST_VOLATILE) {
12789 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12790 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12792 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12793 call->flags |= ins_flag;
12804 ins_flag |= MONO_INST_NOTYPECHECK;
12806 ins_flag |= MONO_INST_NORANGECHECK;
12807 /* we ignore the no-nullcheck for now since we
12808 * really do it explicitly only when doing callvirt->call
12812 case CEE_RETHROW: {
12814 int handler_offset = -1;
12816 for (i = 0; i < header->num_clauses; ++i) {
12817 MonoExceptionClause *clause = &header->clauses [i];
12818 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12819 handler_offset = clause->handler_offset;
12824 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12826 if (handler_offset == -1)
12829 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12830 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12831 ins->sreg1 = load->dreg;
12832 MONO_ADD_INS (cfg->cbb, ins);
12834 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12835 MONO_ADD_INS (cfg->cbb, ins);
12838 link_bblock (cfg, cfg->cbb, end_bblock);
12839 start_new_bblock = 1;
12847 CHECK_STACK_OVF (1);
12849 token = read32 (ip + 2);
12850 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12851 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12854 val = mono_type_size (type, &ialign);
12856 MonoClass *klass = mini_get_class (method, token, generic_context);
12857 CHECK_TYPELOAD (klass);
12859 val = mono_type_size (&klass->byval_arg, &ialign);
12861 if (mini_is_gsharedvt_klass (klass))
12862 GSHAREDVT_FAILURE (*ip);
12864 EMIT_NEW_ICONST (cfg, ins, val);
12869 case CEE_REFANYTYPE: {
12870 MonoInst *src_var, *src;
12872 GSHAREDVT_FAILURE (*ip);
12878 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12880 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12881 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12882 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12887 case CEE_READONLY_:
12900 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12910 g_warning ("opcode 0x%02x not handled", *ip);
12914 if (start_new_bblock != 1)
12917 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12918 if (cfg->cbb->next_bb) {
12919 /* This could already be set because of inlining, #693905 */
12920 MonoBasicBlock *bb = cfg->cbb;
12922 while (bb->next_bb)
12924 bb->next_bb = end_bblock;
12926 cfg->cbb->next_bb = end_bblock;
12929 if (cfg->method == method && cfg->domainvar) {
12931 MonoInst *get_domain;
12933 cfg->cbb = init_localsbb;
12935 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12936 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12937 MONO_ADD_INS (cfg->cbb, store);
12940 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12941 if (cfg->compile_aot)
12942 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12943 mono_get_got_var (cfg);
12946 if (cfg->method == method && cfg->got_var)
12947 mono_emit_load_got_addr (cfg);
12949 if (init_localsbb) {
12950 cfg->cbb = init_localsbb;
12952 for (i = 0; i < header->num_locals; ++i) {
12953 emit_init_local (cfg, i, header->locals [i], init_locals);
12957 if (cfg->init_ref_vars && cfg->method == method) {
12958 /* Emit initialization for ref vars */
12959 // FIXME: Avoid duplication initialization for IL locals.
12960 for (i = 0; i < cfg->num_varinfo; ++i) {
12961 MonoInst *ins = cfg->varinfo [i];
12963 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12964 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12968 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12969 cfg->cbb = init_localsbb;
12970 emit_push_lmf (cfg);
12973 cfg->cbb = init_localsbb;
12974 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12977 MonoBasicBlock *bb;
12980 * Make seq points at backward branch targets interruptable.
12982 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12983 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12984 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12987 /* Add a sequence point for method entry/exit events */
12988 if (seq_points && cfg->gen_sdb_seq_points) {
12989 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12990 MONO_ADD_INS (init_localsbb, ins);
12991 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12992 MONO_ADD_INS (cfg->bb_exit, ins);
12996 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12997 * the code they refer to was dead (#11880).
12999 if (sym_seq_points) {
13000 for (i = 0; i < header->code_size; ++i) {
13001 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13004 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13005 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13012 if (cfg->method == method) {
13013 MonoBasicBlock *bb;
13014 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13015 if (bb == cfg->bb_init)
13018 bb->region = mono_find_block_region (cfg, bb->real_offset);
13020 mono_create_spvar_for_region (cfg, bb->region);
13021 if (cfg->verbose_level > 2)
13022 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13025 MonoBasicBlock *bb;
13026 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13027 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13028 bb->real_offset = inline_offset;
13032 if (inline_costs < 0) {
13035 /* Method is too large */
13036 mname = mono_method_full_name (method, TRUE);
13037 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13041 if ((cfg->verbose_level > 2) && (cfg->method == method))
13042 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13047 g_assert (!mono_error_ok (&cfg->error));
13051 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13055 set_exception_type_from_invalid_il (cfg, method, ip);
13059 g_slist_free (class_inits);
13060 mono_basic_block_free (original_bb);
13061 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13062 if (cfg->exception_type)
13065 return inline_costs;
13069 store_membase_reg_to_store_membase_imm (int opcode)
13072 case OP_STORE_MEMBASE_REG:
13073 return OP_STORE_MEMBASE_IMM;
13074 case OP_STOREI1_MEMBASE_REG:
13075 return OP_STOREI1_MEMBASE_IMM;
13076 case OP_STOREI2_MEMBASE_REG:
13077 return OP_STOREI2_MEMBASE_IMM;
13078 case OP_STOREI4_MEMBASE_REG:
13079 return OP_STOREI4_MEMBASE_IMM;
13080 case OP_STOREI8_MEMBASE_REG:
13081 return OP_STOREI8_MEMBASE_IMM;
13083 g_assert_not_reached ();
13090 mono_op_to_op_imm (int opcode)
13094 return OP_IADD_IMM;
13096 return OP_ISUB_IMM;
13098 return OP_IDIV_IMM;
13100 return OP_IDIV_UN_IMM;
13102 return OP_IREM_IMM;
13104 return OP_IREM_UN_IMM;
13106 return OP_IMUL_IMM;
13108 return OP_IAND_IMM;
13112 return OP_IXOR_IMM;
13114 return OP_ISHL_IMM;
13116 return OP_ISHR_IMM;
13118 return OP_ISHR_UN_IMM;
13121 return OP_LADD_IMM;
13123 return OP_LSUB_IMM;
13125 return OP_LAND_IMM;
13129 return OP_LXOR_IMM;
13131 return OP_LSHL_IMM;
13133 return OP_LSHR_IMM;
13135 return OP_LSHR_UN_IMM;
13136 #if SIZEOF_REGISTER == 8
13138 return OP_LREM_IMM;
13142 return OP_COMPARE_IMM;
13144 return OP_ICOMPARE_IMM;
13146 return OP_LCOMPARE_IMM;
13148 case OP_STORE_MEMBASE_REG:
13149 return OP_STORE_MEMBASE_IMM;
13150 case OP_STOREI1_MEMBASE_REG:
13151 return OP_STOREI1_MEMBASE_IMM;
13152 case OP_STOREI2_MEMBASE_REG:
13153 return OP_STOREI2_MEMBASE_IMM;
13154 case OP_STOREI4_MEMBASE_REG:
13155 return OP_STOREI4_MEMBASE_IMM;
13157 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13159 return OP_X86_PUSH_IMM;
13160 case OP_X86_COMPARE_MEMBASE_REG:
13161 return OP_X86_COMPARE_MEMBASE_IMM;
13163 #if defined(TARGET_AMD64)
13164 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13165 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13167 case OP_VOIDCALL_REG:
13168 return OP_VOIDCALL;
13176 return OP_LOCALLOC_IMM;
13183 ldind_to_load_membase (int opcode)
13187 return OP_LOADI1_MEMBASE;
13189 return OP_LOADU1_MEMBASE;
13191 return OP_LOADI2_MEMBASE;
13193 return OP_LOADU2_MEMBASE;
13195 return OP_LOADI4_MEMBASE;
13197 return OP_LOADU4_MEMBASE;
13199 return OP_LOAD_MEMBASE;
13200 case CEE_LDIND_REF:
13201 return OP_LOAD_MEMBASE;
13203 return OP_LOADI8_MEMBASE;
13205 return OP_LOADR4_MEMBASE;
13207 return OP_LOADR8_MEMBASE;
13209 g_assert_not_reached ();
13216 stind_to_store_membase (int opcode)
13220 return OP_STOREI1_MEMBASE_REG;
13222 return OP_STOREI2_MEMBASE_REG;
13224 return OP_STOREI4_MEMBASE_REG;
13226 case CEE_STIND_REF:
13227 return OP_STORE_MEMBASE_REG;
13229 return OP_STOREI8_MEMBASE_REG;
13231 return OP_STORER4_MEMBASE_REG;
13233 return OP_STORER8_MEMBASE_REG;
13235 g_assert_not_reached ();
13242 mono_load_membase_to_load_mem (int opcode)
13244 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13245 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13247 case OP_LOAD_MEMBASE:
13248 return OP_LOAD_MEM;
13249 case OP_LOADU1_MEMBASE:
13250 return OP_LOADU1_MEM;
13251 case OP_LOADU2_MEMBASE:
13252 return OP_LOADU2_MEM;
13253 case OP_LOADI4_MEMBASE:
13254 return OP_LOADI4_MEM;
13255 case OP_LOADU4_MEMBASE:
13256 return OP_LOADU4_MEM;
13257 #if SIZEOF_REGISTER == 8
13258 case OP_LOADI8_MEMBASE:
13259 return OP_LOADI8_MEM;
13268 op_to_op_dest_membase (int store_opcode, int opcode)
13270 #if defined(TARGET_X86)
13271 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13276 return OP_X86_ADD_MEMBASE_REG;
13278 return OP_X86_SUB_MEMBASE_REG;
13280 return OP_X86_AND_MEMBASE_REG;
13282 return OP_X86_OR_MEMBASE_REG;
13284 return OP_X86_XOR_MEMBASE_REG;
13287 return OP_X86_ADD_MEMBASE_IMM;
13290 return OP_X86_SUB_MEMBASE_IMM;
13293 return OP_X86_AND_MEMBASE_IMM;
13296 return OP_X86_OR_MEMBASE_IMM;
13299 return OP_X86_XOR_MEMBASE_IMM;
13305 #if defined(TARGET_AMD64)
13306 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13311 return OP_X86_ADD_MEMBASE_REG;
13313 return OP_X86_SUB_MEMBASE_REG;
13315 return OP_X86_AND_MEMBASE_REG;
13317 return OP_X86_OR_MEMBASE_REG;
13319 return OP_X86_XOR_MEMBASE_REG;
13321 return OP_X86_ADD_MEMBASE_IMM;
13323 return OP_X86_SUB_MEMBASE_IMM;
13325 return OP_X86_AND_MEMBASE_IMM;
13327 return OP_X86_OR_MEMBASE_IMM;
13329 return OP_X86_XOR_MEMBASE_IMM;
13331 return OP_AMD64_ADD_MEMBASE_REG;
13333 return OP_AMD64_SUB_MEMBASE_REG;
13335 return OP_AMD64_AND_MEMBASE_REG;
13337 return OP_AMD64_OR_MEMBASE_REG;
13339 return OP_AMD64_XOR_MEMBASE_REG;
13342 return OP_AMD64_ADD_MEMBASE_IMM;
13345 return OP_AMD64_SUB_MEMBASE_IMM;
13348 return OP_AMD64_AND_MEMBASE_IMM;
13351 return OP_AMD64_OR_MEMBASE_IMM;
13354 return OP_AMD64_XOR_MEMBASE_IMM;
13364 op_to_op_store_membase (int store_opcode, int opcode)
13366 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13369 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13370 return OP_X86_SETEQ_MEMBASE;
13372 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13373 return OP_X86_SETNE_MEMBASE;
13381 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13384 /* FIXME: This has sign extension issues */
13386 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13387 return OP_X86_COMPARE_MEMBASE8_IMM;
13390 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13395 return OP_X86_PUSH_MEMBASE;
13396 case OP_COMPARE_IMM:
13397 case OP_ICOMPARE_IMM:
13398 return OP_X86_COMPARE_MEMBASE_IMM;
13401 return OP_X86_COMPARE_MEMBASE_REG;
13405 #ifdef TARGET_AMD64
13406 /* FIXME: This has sign extension issues */
13408 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13409 return OP_X86_COMPARE_MEMBASE8_IMM;
13414 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13415 return OP_X86_PUSH_MEMBASE;
13417 /* FIXME: This only works for 32 bit immediates
13418 case OP_COMPARE_IMM:
13419 case OP_LCOMPARE_IMM:
13420 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13421 return OP_AMD64_COMPARE_MEMBASE_IMM;
13423 case OP_ICOMPARE_IMM:
13424 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13425 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13429 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13430 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13431 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13432 return OP_AMD64_COMPARE_MEMBASE_REG;
13435 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13436 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13445 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13448 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13454 return OP_X86_COMPARE_REG_MEMBASE;
13456 return OP_X86_ADD_REG_MEMBASE;
13458 return OP_X86_SUB_REG_MEMBASE;
13460 return OP_X86_AND_REG_MEMBASE;
13462 return OP_X86_OR_REG_MEMBASE;
13464 return OP_X86_XOR_REG_MEMBASE;
13468 #ifdef TARGET_AMD64
13469 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13472 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13474 return OP_X86_ADD_REG_MEMBASE;
13476 return OP_X86_SUB_REG_MEMBASE;
13478 return OP_X86_AND_REG_MEMBASE;
13480 return OP_X86_OR_REG_MEMBASE;
13482 return OP_X86_XOR_REG_MEMBASE;
13484 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13488 return OP_AMD64_COMPARE_REG_MEMBASE;
13490 return OP_AMD64_ADD_REG_MEMBASE;
13492 return OP_AMD64_SUB_REG_MEMBASE;
13494 return OP_AMD64_AND_REG_MEMBASE;
13496 return OP_AMD64_OR_REG_MEMBASE;
13498 return OP_AMD64_XOR_REG_MEMBASE;
13507 mono_op_to_op_imm_noemul (int opcode)
13510 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13516 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13523 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13528 return mono_op_to_op_imm (opcode);
13533 * mono_handle_global_vregs:
13535 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13539 mono_handle_global_vregs (MonoCompile *cfg)
13541 gint32 *vreg_to_bb;
13542 MonoBasicBlock *bb;
13545 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13547 #ifdef MONO_ARCH_SIMD_INTRINSICS
13548 if (cfg->uses_simd_intrinsics)
13549 mono_simd_simplify_indirection (cfg);
13552 /* Find local vregs used in more than one bb */
13553 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13554 MonoInst *ins = bb->code;
13555 int block_num = bb->block_num;
13557 if (cfg->verbose_level > 2)
13558 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13561 for (; ins; ins = ins->next) {
13562 const char *spec = INS_INFO (ins->opcode);
13563 int regtype = 0, regindex;
13566 if (G_UNLIKELY (cfg->verbose_level > 2))
13567 mono_print_ins (ins);
13569 g_assert (ins->opcode >= MONO_CEE_LAST);
13571 for (regindex = 0; regindex < 4; regindex ++) {
13574 if (regindex == 0) {
13575 regtype = spec [MONO_INST_DEST];
13576 if (regtype == ' ')
13579 } else if (regindex == 1) {
13580 regtype = spec [MONO_INST_SRC1];
13581 if (regtype == ' ')
13584 } else if (regindex == 2) {
13585 regtype = spec [MONO_INST_SRC2];
13586 if (regtype == ' ')
13589 } else if (regindex == 3) {
13590 regtype = spec [MONO_INST_SRC3];
13591 if (regtype == ' ')
13596 #if SIZEOF_REGISTER == 4
13597 /* In the LLVM case, the long opcodes are not decomposed */
13598 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13600 * Since some instructions reference the original long vreg,
13601 * and some reference the two component vregs, it is quite hard
13602 * to determine when it needs to be global. So be conservative.
13604 if (!get_vreg_to_inst (cfg, vreg)) {
13605 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13607 if (cfg->verbose_level > 2)
13608 printf ("LONG VREG R%d made global.\n", vreg);
13612 * Make the component vregs volatile since the optimizations can
13613 * get confused otherwise.
13615 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13616 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13620 g_assert (vreg != -1);
13622 prev_bb = vreg_to_bb [vreg];
13623 if (prev_bb == 0) {
13624 /* 0 is a valid block num */
13625 vreg_to_bb [vreg] = block_num + 1;
13626 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13627 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13630 if (!get_vreg_to_inst (cfg, vreg)) {
13631 if (G_UNLIKELY (cfg->verbose_level > 2))
13632 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13636 if (vreg_is_ref (cfg, vreg))
13637 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13639 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13642 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13645 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13649 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13652 g_assert_not_reached ();
13656 /* Flag as having been used in more than one bb */
13657 vreg_to_bb [vreg] = -1;
13663 /* If a variable is used in only one bblock, convert it into a local vreg */
13664 for (i = 0; i < cfg->num_varinfo; i++) {
13665 MonoInst *var = cfg->varinfo [i];
13666 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13668 switch (var->type) {
13674 #if SIZEOF_REGISTER == 8
13677 #if !defined(TARGET_X86)
13678 /* Enabling this screws up the fp stack on x86 */
13681 if (mono_arch_is_soft_float ())
13685 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13689 /* Arguments are implicitly global */
13690 /* Putting R4 vars into registers doesn't work currently */
13691 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13692 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13694 * Make that the variable's liveness interval doesn't contain a call, since
13695 * that would cause the lvreg to be spilled, making the whole optimization
13698 /* This is too slow for JIT compilation */
13700 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13702 int def_index, call_index, ins_index;
13703 gboolean spilled = FALSE;
13708 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13709 const char *spec = INS_INFO (ins->opcode);
13711 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13712 def_index = ins_index;
13714 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13715 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13716 if (call_index > def_index) {
13722 if (MONO_IS_CALL (ins))
13723 call_index = ins_index;
13733 if (G_UNLIKELY (cfg->verbose_level > 2))
13734 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13735 var->flags |= MONO_INST_IS_DEAD;
13736 cfg->vreg_to_inst [var->dreg] = NULL;
13743 * Compress the varinfo and vars tables so the liveness computation is faster and
13744 * takes up less space.
13747 for (i = 0; i < cfg->num_varinfo; ++i) {
13748 MonoInst *var = cfg->varinfo [i];
13749 if (pos < i && cfg->locals_start == i)
13750 cfg->locals_start = pos;
13751 if (!(var->flags & MONO_INST_IS_DEAD)) {
13753 cfg->varinfo [pos] = cfg->varinfo [i];
13754 cfg->varinfo [pos]->inst_c0 = pos;
13755 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13756 cfg->vars [pos].idx = pos;
13757 #if SIZEOF_REGISTER == 4
13758 if (cfg->varinfo [pos]->type == STACK_I8) {
13759 /* Modify the two component vars too */
13762 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13763 var1->inst_c0 = pos;
13764 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13765 var1->inst_c0 = pos;
13772 cfg->num_varinfo = pos;
13773 if (cfg->locals_start > cfg->num_varinfo)
13774 cfg->locals_start = cfg->num_varinfo;
13778 * mono_allocate_gsharedvt_vars:
13780 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13781 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13784 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13788 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13790 for (i = 0; i < cfg->num_varinfo; ++i) {
13791 MonoInst *ins = cfg->varinfo [i];
13794 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13795 if (i >= cfg->locals_start) {
13797 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13798 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13799 ins->opcode = OP_GSHAREDVT_LOCAL;
13800 ins->inst_imm = idx;
13803 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13804 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13811 * mono_spill_global_vars:
13813 * Generate spill code for variables which are not allocated to registers,
13814 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13815 * code is generated which could be optimized by the local optimization passes.
13818 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13820 MonoBasicBlock *bb;
13822 int orig_next_vreg;
13823 guint32 *vreg_to_lvreg;
13825 guint32 i, lvregs_len, lvregs_size;
13826 gboolean dest_has_lvreg = FALSE;
13827 MonoStackType stacktypes [128];
13828 MonoInst **live_range_start, **live_range_end;
13829 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13831 *need_local_opts = FALSE;
13833 memset (spec2, 0, sizeof (spec2));
13835 /* FIXME: Move this function to mini.c */
13836 stacktypes ['i'] = STACK_PTR;
13837 stacktypes ['l'] = STACK_I8;
13838 stacktypes ['f'] = STACK_R8;
13839 #ifdef MONO_ARCH_SIMD_INTRINSICS
13840 stacktypes ['x'] = STACK_VTYPE;
13843 #if SIZEOF_REGISTER == 4
13844 /* Create MonoInsts for longs */
13845 for (i = 0; i < cfg->num_varinfo; i++) {
13846 MonoInst *ins = cfg->varinfo [i];
13848 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13849 switch (ins->type) {
13854 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13857 g_assert (ins->opcode == OP_REGOFFSET);
13859 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13861 tree->opcode = OP_REGOFFSET;
13862 tree->inst_basereg = ins->inst_basereg;
13863 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13865 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13867 tree->opcode = OP_REGOFFSET;
13868 tree->inst_basereg = ins->inst_basereg;
13869 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13879 if (cfg->compute_gc_maps) {
13880 /* registers need liveness info even for !non refs */
13881 for (i = 0; i < cfg->num_varinfo; i++) {
13882 MonoInst *ins = cfg->varinfo [i];
13884 if (ins->opcode == OP_REGVAR)
13885 ins->flags |= MONO_INST_GC_TRACK;
13889 /* FIXME: widening and truncation */
13892 * As an optimization, when a variable allocated to the stack is first loaded into
13893 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13894 * the variable again.
13896 orig_next_vreg = cfg->next_vreg;
13897 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13898 lvregs_size = 1024;
13899 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13903 * These arrays contain the first and last instructions accessing a given
13905 * Since we emit bblocks in the same order we process them here, and we
13906 * don't split live ranges, these will precisely describe the live range of
13907 * the variable, i.e. the instruction range where a valid value can be found
13908 * in the variables location.
13909 * The live range is computed using the liveness info computed by the liveness pass.
13910 * We can't use vmv->range, since that is an abstract live range, and we need
13911 * one which is instruction precise.
13912 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13914 /* FIXME: Only do this if debugging info is requested */
13915 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13916 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13917 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13918 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13920 /* Add spill loads/stores */
13921 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13924 if (cfg->verbose_level > 2)
13925 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13927 /* Clear vreg_to_lvreg array */
13928 for (i = 0; i < lvregs_len; i++)
13929 vreg_to_lvreg [lvregs [i]] = 0;
13933 MONO_BB_FOR_EACH_INS (bb, ins) {
13934 const char *spec = INS_INFO (ins->opcode);
13935 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13936 gboolean store, no_lvreg;
13937 int sregs [MONO_MAX_SRC_REGS];
13939 if (G_UNLIKELY (cfg->verbose_level > 2))
13940 mono_print_ins (ins);
13942 if (ins->opcode == OP_NOP)
13946 * We handle LDADDR here as well, since it can only be decomposed
13947 * when variable addresses are known.
13949 if (ins->opcode == OP_LDADDR) {
13950 MonoInst *var = (MonoInst *)ins->inst_p0;
13952 if (var->opcode == OP_VTARG_ADDR) {
13953 /* Happens on SPARC/S390 where vtypes are passed by reference */
13954 MonoInst *vtaddr = var->inst_left;
13955 if (vtaddr->opcode == OP_REGVAR) {
13956 ins->opcode = OP_MOVE;
13957 ins->sreg1 = vtaddr->dreg;
13959 else if (var->inst_left->opcode == OP_REGOFFSET) {
13960 ins->opcode = OP_LOAD_MEMBASE;
13961 ins->inst_basereg = vtaddr->inst_basereg;
13962 ins->inst_offset = vtaddr->inst_offset;
13965 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13966 /* gsharedvt arg passed by ref */
13967 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13969 ins->opcode = OP_LOAD_MEMBASE;
13970 ins->inst_basereg = var->inst_basereg;
13971 ins->inst_offset = var->inst_offset;
13972 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13973 MonoInst *load, *load2, *load3;
13974 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13975 int reg1, reg2, reg3;
13976 MonoInst *info_var = cfg->gsharedvt_info_var;
13977 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13981 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13984 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13986 g_assert (info_var);
13987 g_assert (locals_var);
13989 /* Mark the instruction used to compute the locals var as used */
13990 cfg->gsharedvt_locals_var_ins = NULL;
13992 /* Load the offset */
13993 if (info_var->opcode == OP_REGOFFSET) {
13994 reg1 = alloc_ireg (cfg);
13995 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13996 } else if (info_var->opcode == OP_REGVAR) {
13998 reg1 = info_var->dreg;
14000 g_assert_not_reached ();
14002 reg2 = alloc_ireg (cfg);
14003 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14004 /* Load the locals area address */
14005 reg3 = alloc_ireg (cfg);
14006 if (locals_var->opcode == OP_REGOFFSET) {
14007 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14008 } else if (locals_var->opcode == OP_REGVAR) {
14009 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14011 g_assert_not_reached ();
14013 /* Compute the address */
14014 ins->opcode = OP_PADD;
14018 mono_bblock_insert_before_ins (bb, ins, load3);
14019 mono_bblock_insert_before_ins (bb, load3, load2);
14021 mono_bblock_insert_before_ins (bb, load2, load);
14023 g_assert (var->opcode == OP_REGOFFSET);
14025 ins->opcode = OP_ADD_IMM;
14026 ins->sreg1 = var->inst_basereg;
14027 ins->inst_imm = var->inst_offset;
14030 *need_local_opts = TRUE;
14031 spec = INS_INFO (ins->opcode);
14034 if (ins->opcode < MONO_CEE_LAST) {
14035 mono_print_ins (ins);
14036 g_assert_not_reached ();
14040 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14044 if (MONO_IS_STORE_MEMBASE (ins)) {
14045 tmp_reg = ins->dreg;
14046 ins->dreg = ins->sreg2;
14047 ins->sreg2 = tmp_reg;
14050 spec2 [MONO_INST_DEST] = ' ';
14051 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14052 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14053 spec2 [MONO_INST_SRC3] = ' ';
14055 } else if (MONO_IS_STORE_MEMINDEX (ins))
14056 g_assert_not_reached ();
14061 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14062 printf ("\t %.3s %d", spec, ins->dreg);
14063 num_sregs = mono_inst_get_src_registers (ins, sregs);
14064 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14065 printf (" %d", sregs [srcindex]);
14072 regtype = spec [MONO_INST_DEST];
14073 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14076 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14077 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14078 MonoInst *store_ins;
14080 MonoInst *def_ins = ins;
14081 int dreg = ins->dreg; /* The original vreg */
14083 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14085 if (var->opcode == OP_REGVAR) {
14086 ins->dreg = var->dreg;
14087 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14089 * Instead of emitting a load+store, use a _membase opcode.
14091 g_assert (var->opcode == OP_REGOFFSET);
14092 if (ins->opcode == OP_MOVE) {
14096 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14097 ins->inst_basereg = var->inst_basereg;
14098 ins->inst_offset = var->inst_offset;
14101 spec = INS_INFO (ins->opcode);
14105 g_assert (var->opcode == OP_REGOFFSET);
14107 prev_dreg = ins->dreg;
14109 /* Invalidate any previous lvreg for this vreg */
14110 vreg_to_lvreg [ins->dreg] = 0;
14114 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14116 store_opcode = OP_STOREI8_MEMBASE_REG;
14119 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14121 #if SIZEOF_REGISTER != 8
14122 if (regtype == 'l') {
14123 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14124 mono_bblock_insert_after_ins (bb, ins, store_ins);
14125 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14126 mono_bblock_insert_after_ins (bb, ins, store_ins);
14127 def_ins = store_ins;
14132 g_assert (store_opcode != OP_STOREV_MEMBASE);
14134 /* Try to fuse the store into the instruction itself */
14135 /* FIXME: Add more instructions */
14136 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14137 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14138 ins->inst_imm = ins->inst_c0;
14139 ins->inst_destbasereg = var->inst_basereg;
14140 ins->inst_offset = var->inst_offset;
14141 spec = INS_INFO (ins->opcode);
14142 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14143 ins->opcode = store_opcode;
14144 ins->inst_destbasereg = var->inst_basereg;
14145 ins->inst_offset = var->inst_offset;
14149 tmp_reg = ins->dreg;
14150 ins->dreg = ins->sreg2;
14151 ins->sreg2 = tmp_reg;
14154 spec2 [MONO_INST_DEST] = ' ';
14155 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14156 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14157 spec2 [MONO_INST_SRC3] = ' ';
14159 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14160 // FIXME: The backends expect the base reg to be in inst_basereg
14161 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14163 ins->inst_basereg = var->inst_basereg;
14164 ins->inst_offset = var->inst_offset;
14165 spec = INS_INFO (ins->opcode);
14167 /* printf ("INS: "); mono_print_ins (ins); */
14168 /* Create a store instruction */
14169 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14171 /* Insert it after the instruction */
14172 mono_bblock_insert_after_ins (bb, ins, store_ins);
14174 def_ins = store_ins;
14177 * We can't assign ins->dreg to var->dreg here, since the
14178 * sregs could use it. So set a flag, and do it after
14181 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14182 dest_has_lvreg = TRUE;
14187 if (def_ins && !live_range_start [dreg]) {
14188 live_range_start [dreg] = def_ins;
14189 live_range_start_bb [dreg] = bb;
14192 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14195 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14196 tmp->inst_c1 = dreg;
14197 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14204 num_sregs = mono_inst_get_src_registers (ins, sregs);
14205 for (srcindex = 0; srcindex < 3; ++srcindex) {
14206 regtype = spec [MONO_INST_SRC1 + srcindex];
14207 sreg = sregs [srcindex];
14209 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14210 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14211 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14212 MonoInst *use_ins = ins;
14213 MonoInst *load_ins;
14214 guint32 load_opcode;
14216 if (var->opcode == OP_REGVAR) {
14217 sregs [srcindex] = var->dreg;
14218 //mono_inst_set_src_registers (ins, sregs);
14219 live_range_end [sreg] = use_ins;
14220 live_range_end_bb [sreg] = bb;
14222 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14225 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14226 /* var->dreg is a hreg */
14227 tmp->inst_c1 = sreg;
14228 mono_bblock_insert_after_ins (bb, ins, tmp);
14234 g_assert (var->opcode == OP_REGOFFSET);
14236 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14238 g_assert (load_opcode != OP_LOADV_MEMBASE);
14240 if (vreg_to_lvreg [sreg]) {
14241 g_assert (vreg_to_lvreg [sreg] != -1);
14243 /* The variable is already loaded to an lvreg */
14244 if (G_UNLIKELY (cfg->verbose_level > 2))
14245 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14246 sregs [srcindex] = vreg_to_lvreg [sreg];
14247 //mono_inst_set_src_registers (ins, sregs);
14251 /* Try to fuse the load into the instruction */
14252 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14253 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14254 sregs [0] = var->inst_basereg;
14255 //mono_inst_set_src_registers (ins, sregs);
14256 ins->inst_offset = var->inst_offset;
14257 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14258 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14259 sregs [1] = var->inst_basereg;
14260 //mono_inst_set_src_registers (ins, sregs);
14261 ins->inst_offset = var->inst_offset;
14263 if (MONO_IS_REAL_MOVE (ins)) {
14264 ins->opcode = OP_NOP;
14267 //printf ("%d ", srcindex); mono_print_ins (ins);
14269 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14271 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14272 if (var->dreg == prev_dreg) {
14274 * sreg refers to the value loaded by the load
14275 * emitted below, but we need to use ins->dreg
14276 * since it refers to the store emitted earlier.
14280 g_assert (sreg != -1);
14281 vreg_to_lvreg [var->dreg] = sreg;
14282 if (lvregs_len >= lvregs_size) {
14283 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14284 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14285 lvregs = new_lvregs;
14288 lvregs [lvregs_len ++] = var->dreg;
14292 sregs [srcindex] = sreg;
14293 //mono_inst_set_src_registers (ins, sregs);
14295 #if SIZEOF_REGISTER != 8
14296 if (regtype == 'l') {
14297 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14298 mono_bblock_insert_before_ins (bb, ins, load_ins);
14299 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14300 mono_bblock_insert_before_ins (bb, ins, load_ins);
14301 use_ins = load_ins;
14306 #if SIZEOF_REGISTER == 4
14307 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14309 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14310 mono_bblock_insert_before_ins (bb, ins, load_ins);
14311 use_ins = load_ins;
14315 if (var->dreg < orig_next_vreg) {
14316 live_range_end [var->dreg] = use_ins;
14317 live_range_end_bb [var->dreg] = bb;
14320 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14323 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14324 tmp->inst_c1 = var->dreg;
14325 mono_bblock_insert_after_ins (bb, ins, tmp);
14329 mono_inst_set_src_registers (ins, sregs);
14331 if (dest_has_lvreg) {
14332 g_assert (ins->dreg != -1);
14333 vreg_to_lvreg [prev_dreg] = ins->dreg;
14334 if (lvregs_len >= lvregs_size) {
14335 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14336 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14337 lvregs = new_lvregs;
14340 lvregs [lvregs_len ++] = prev_dreg;
14341 dest_has_lvreg = FALSE;
14345 tmp_reg = ins->dreg;
14346 ins->dreg = ins->sreg2;
14347 ins->sreg2 = tmp_reg;
14350 if (MONO_IS_CALL (ins)) {
14351 /* Clear vreg_to_lvreg array */
14352 for (i = 0; i < lvregs_len; i++)
14353 vreg_to_lvreg [lvregs [i]] = 0;
14355 } else if (ins->opcode == OP_NOP) {
14357 MONO_INST_NULLIFY_SREGS (ins);
14360 if (cfg->verbose_level > 2)
14361 mono_print_ins_index (1, ins);
14364 /* Extend the live range based on the liveness info */
14365 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14366 for (i = 0; i < cfg->num_varinfo; i ++) {
14367 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14369 if (vreg_is_volatile (cfg, vi->vreg))
14370 /* The liveness info is incomplete */
14373 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14374 /* Live from at least the first ins of this bb */
14375 live_range_start [vi->vreg] = bb->code;
14376 live_range_start_bb [vi->vreg] = bb;
14379 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14380 /* Live at least until the last ins of this bb */
14381 live_range_end [vi->vreg] = bb->last_ins;
14382 live_range_end_bb [vi->vreg] = bb;
14389 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14390 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14392 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14393 for (i = 0; i < cfg->num_varinfo; ++i) {
14394 int vreg = MONO_VARINFO (cfg, i)->vreg;
14397 if (live_range_start [vreg]) {
14398 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14400 ins->inst_c1 = vreg;
14401 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14403 if (live_range_end [vreg]) {
14404 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14406 ins->inst_c1 = vreg;
14407 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14408 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14410 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14415 if (cfg->gsharedvt_locals_var_ins) {
14416 /* Nullify if unused */
14417 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14418 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14421 g_free (live_range_start);
14422 g_free (live_range_end);
14423 g_free (live_range_start_bb);
14424 g_free (live_range_end_bb);
14430 * - use 'iadd' instead of 'int_add'
14431 * - handling ovf opcodes: decompose in method_to_ir.
14432 * - unify iregs/fregs
14433 * -> partly done, the missing parts are:
14434 * - a more complete unification would involve unifying the hregs as well, so
14435 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14436 * would no longer map to the machine hregs, so the code generators would need to
14437 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14438 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14439 * fp/non-fp branches speeds it up by about 15%.
14440 * - use sext/zext opcodes instead of shifts
14442 * - get rid of TEMPLOADs if possible and use vregs instead
14443 * - clean up usage of OP_P/OP_ opcodes
14444 * - cleanup usage of DUMMY_USE
14445 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14447 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14448 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14449 * - make sure handle_stack_args () is called before the branch is emitted
14450 * - when the new IR is done, get rid of all unused stuff
14451 * - COMPARE/BEQ as separate instructions or unify them ?
14452 * - keeping them separate allows specialized compare instructions like
14453 * compare_imm, compare_membase
14454 * - most back ends unify fp compare+branch, fp compare+ceq
14455 * - integrate mono_save_args into inline_method
14456 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14457 * - handle long shift opts on 32 bit platforms somehow: they require
14458 * 3 sregs (2 for arg1 and 1 for arg2)
14459 * - make byref a 'normal' type.
14460 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14461 * variable if needed.
14462 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14463 * like inline_method.
14464 * - remove inlining restrictions
14465 * - fix LNEG and enable cfold of INEG
14466 * - generalize x86 optimizations like ldelema as a peephole optimization
14467 * - add store_mem_imm for amd64
14468 * - optimize the loading of the interruption flag in the managed->native wrappers
14469 * - avoid special handling of OP_NOP in passes
14470 * - move code inserting instructions into one function/macro.
14471 * - try a coalescing phase after liveness analysis
14472 * - add float -> vreg conversion + local optimizations on !x86
14473 * - figure out how to handle decomposed branches during optimizations, ie.
14474 * compare+branch, op_jump_table+op_br etc.
14475 * - promote RuntimeXHandles to vregs
14476 * - vtype cleanups:
14477 * - add a NEW_VARLOADA_VREG macro
14478 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14479 * accessing vtype fields.
14480 * - get rid of I8CONST on 64 bit platforms
14481 * - dealing with the increase in code size due to branches created during opcode
14483 * - use extended basic blocks
14484 * - all parts of the JIT
14485 * - handle_global_vregs () && local regalloc
14486 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14487 * - sources of increase in code size:
14490 * - isinst and castclass
14491 * - lvregs not allocated to global registers even if used multiple times
14492 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14494 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14495 * - add all micro optimizations from the old JIT
14496 * - put tree optimizations into the deadce pass
14497 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14498 * specific function.
14499 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14500 * fcompare + branchCC.
14501 * - create a helper function for allocating a stack slot, taking into account
14502 * MONO_CFG_HAS_SPILLUP.
14504 * - merge the ia64 switch changes.
14505 * - optimize mono_regstate2_alloc_int/float.
14506 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14507 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14508 * parts of the tree could be separated by other instructions, killing the tree
14509 * arguments, or stores killing loads etc. Also, should we fold loads into other
14510 * instructions if the result of the load is used multiple times ?
14511 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14512 * - LAST MERGE: 108395.
14513 * - when returning vtypes in registers, generate IR and append it to the end of the
14514 * last bb instead of doing it in the epilog.
14515 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14523 - When to decompose opcodes:
14524 - earlier: this makes some optimizations hard to implement, since the low level IR
14525 no longer contains the neccessary information. But it is easier to do.
14526 - later: harder to implement, enables more optimizations.
14527 - Branches inside bblocks:
14528 - created when decomposing complex opcodes.
14529 - branches to another bblock: harmless, but not tracked by the branch
14530 optimizations, so need to branch to a label at the start of the bblock.
14531 - branches to inside the same bblock: very problematic, trips up the local
14532 reg allocator. Can be fixed by spitting the current bblock, but that is a
14533 complex operation, since some local vregs can become global vregs etc.
14534 - Local/global vregs:
14535 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14536 local register allocator.
14537 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14538 structure, created by mono_create_var (). Assigned to hregs or the stack by
14539 the global register allocator.
14540 - When to do optimizations like alu->alu_imm:
14541 - earlier -> saves work later on since the IR will be smaller/simpler
14542 - later -> can work on more instructions
14543 - Handling of valuetypes:
14544 - When a vtype is pushed on the stack, a new temporary is created, an
14545 instruction computing its address (LDADDR) is emitted and pushed on
14546 the stack. Need to optimize cases when the vtype is used immediately as in
14547 argument passing, stloc etc.
14548 - Instead of the to_end stuff in the old JIT, simply call the function handling
14549 the values on the stack before emitting the last instruction of the bb.
14552 #else /* !DISABLE_JIT */
14554 MONO_EMPTY_SOURCE_FILE (method_to_ir);
14556 #endif /* !DISABLE_JIT */