3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1165 ldind_to_type (int op)
1168 case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1169 case CEE_LDIND_U1: return mono_defaults.byte_class;
1170 case CEE_LDIND_I2: return mono_defaults.int16_class;
1171 case CEE_LDIND_U2: return mono_defaults.uint16_class;
1172 case CEE_LDIND_I4: return mono_defaults.int32_class;
1173 case CEE_LDIND_U4: return mono_defaults.uint32_class;
1174 case CEE_LDIND_I8: return mono_defaults.int64_class;
1175 case CEE_LDIND_I: return mono_defaults.int_class;
1176 case CEE_LDIND_R4: return mono_defaults.single_class;
1177 case CEE_LDIND_R8: return mono_defaults.double_class;
1178 case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1179 default: g_error ("Unknown ldind type %d", op);
1186 param_table [STACK_MAX] [STACK_MAX] = {
1191 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1196 switch (args->type) {
1206 for (i = 0; i < sig->param_count; ++i) {
1207 switch (args [i].type) {
1211 if (!sig->params [i]->byref)
1215 if (sig->params [i]->byref)
1217 switch (sig->params [i]->type) {
1218 case MONO_TYPE_CLASS:
1219 case MONO_TYPE_STRING:
1220 case MONO_TYPE_OBJECT:
1221 case MONO_TYPE_SZARRAY:
1222 case MONO_TYPE_ARRAY:
1229 if (sig->params [i]->byref)
1231 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1240 /*if (!param_table [args [i].type] [sig->params [i]->type])
1248 * When we need a pointer to the current domain many times in a method, we
1249 * call mono_domain_get() once and we store the result in a local variable.
1250 * This function returns the variable that represents the MonoDomain*.
1252 inline static MonoInst *
1253 mono_get_domainvar (MonoCompile *cfg)
1255 if (!cfg->domainvar)
1256 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 return cfg->domainvar;
1261 * The got_var contains the address of the Global Offset Table when AOT
1265 mono_get_got_var (MonoCompile *cfg)
1267 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1269 if (!cfg->got_var) {
1270 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 return cfg->got_var;
1276 mono_create_rgctx_var (MonoCompile *cfg)
1278 if (!cfg->rgctx_var) {
1279 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1280 /* force the var to be stack allocated */
1281 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1286 mono_get_vtable_var (MonoCompile *cfg)
1288 g_assert (cfg->gshared);
1290 mono_create_rgctx_var (cfg);
1292 return cfg->rgctx_var;
1296 type_from_stack_type (MonoInst *ins) {
1297 switch (ins->type) {
1298 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1299 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1300 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1301 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1302 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1304 return &ins->klass->this_arg;
1305 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1306 case STACK_VTYPE: return &ins->klass->byval_arg;
1308 g_error ("stack type %d to monotype not handled\n", ins->type);
1313 static G_GNUC_UNUSED int
1314 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1316 t = mono_type_get_underlying_type (t);
1328 case MONO_TYPE_FNPTR:
1330 case MONO_TYPE_CLASS:
1331 case MONO_TYPE_STRING:
1332 case MONO_TYPE_OBJECT:
1333 case MONO_TYPE_SZARRAY:
1334 case MONO_TYPE_ARRAY:
1340 return cfg->r4_stack_type;
1343 case MONO_TYPE_VALUETYPE:
1344 case MONO_TYPE_TYPEDBYREF:
1346 case MONO_TYPE_GENERICINST:
1347 if (mono_type_generic_inst_is_valuetype (t))
1353 g_assert_not_reached ();
1360 array_access_to_klass (int opcode)
1364 return mono_defaults.byte_class;
1366 return mono_defaults.uint16_class;
1369 return mono_defaults.int_class;
1372 return mono_defaults.sbyte_class;
1375 return mono_defaults.int16_class;
1378 return mono_defaults.int32_class;
1380 return mono_defaults.uint32_class;
1383 return mono_defaults.int64_class;
1386 return mono_defaults.single_class;
1389 return mono_defaults.double_class;
1390 case CEE_LDELEM_REF:
1391 case CEE_STELEM_REF:
1392 return mono_defaults.object_class;
1394 g_assert_not_reached ();
1400 * We try to share variables when possible
1403 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1408 /* inlining can result in deeper stacks */
1409 if (slot >= cfg->header->max_stack)
1410 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 pos = ins->type - 1 + slot * STACK_MAX;
1414 switch (ins->type) {
1421 if ((vnum = cfg->intvars [pos]))
1422 return cfg->varinfo [vnum];
1423 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1424 cfg->intvars [pos] = res->inst_c0;
1427 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1433 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 * Don't use this if a generic_context is set, since that means AOT can't
1437 * look up the method using just the image+token.
1438 * table == 0 means this is a reference made from a wrapper.
1440 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1441 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1442 jump_info_token->image = image;
1443 jump_info_token->token = token;
1444 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1449 * This function is called to handle items that are left on the evaluation stack
1450 * at basic block boundaries. What happens is that we save the values to local variables
1451 * and we reload them later when first entering the target basic block (with the
1452 * handle_loaded_temps () function).
1453 * A single joint point will use the same variables (stored in the array bb->out_stack or
1454 * bb->in_stack, if the basic block is before or after the joint point).
1456 * This function needs to be called _before_ emitting the last instruction of
1457 * the bb (i.e. before emitting a branch).
1458 * If the stack merge fails at a join point, cfg->unverifiable is set.
1461 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 MonoBasicBlock *bb = cfg->cbb;
1465 MonoBasicBlock *outb;
1466 MonoInst *inst, **locals;
1471 if (cfg->verbose_level > 3)
1472 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1473 if (!bb->out_scount) {
1474 bb->out_scount = count;
1475 //printf ("bblock %d has out:", bb->block_num);
1477 for (i = 0; i < bb->out_count; ++i) {
1478 outb = bb->out_bb [i];
1479 /* exception handlers are linked, but they should not be considered for stack args */
1480 if (outb->flags & BB_EXCEPTION_HANDLER)
1482 //printf (" %d", outb->block_num);
1483 if (outb->in_stack) {
1485 bb->out_stack = outb->in_stack;
1491 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1492 for (i = 0; i < count; ++i) {
1494 * try to reuse temps already allocated for this purpouse, if they occupy the same
1495 * stack slot and if they are of the same type.
1496 * This won't cause conflicts since if 'local' is used to
1497 * store one of the values in the in_stack of a bblock, then
1498 * the same variable will be used for the same outgoing stack
1500 * This doesn't work when inlining methods, since the bblocks
1501 * in the inlined methods do not inherit their in_stack from
1502 * the bblock they are inlined to. See bug #58863 for an
1505 if (cfg->inlined_method)
1506 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1508 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1513 for (i = 0; i < bb->out_count; ++i) {
1514 outb = bb->out_bb [i];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER)
1518 if (outb->in_scount) {
1519 if (outb->in_scount != bb->out_scount) {
1520 cfg->unverifiable = TRUE;
1523 continue; /* check they are the same locals */
1525 outb->in_scount = count;
1526 outb->in_stack = bb->out_stack;
1529 locals = bb->out_stack;
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1540 * It is possible that the out bblocks already have in_stack assigned, and
1541 * the in_stacks differ. In this case, we will store to all the different
1548 /* Find a bblock which has a different in_stack */
1550 while (bindex < bb->out_count) {
1551 outb = bb->out_bb [bindex];
1552 /* exception handlers are linked, but they should not be considered for stack args */
1553 if (outb->flags & BB_EXCEPTION_HANDLER) {
1557 if (outb->in_stack != locals) {
1558 for (i = 0; i < count; ++i) {
1559 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1560 inst->cil_code = sp [i]->cil_code;
1561 sp [i] = locals [i];
1562 if (cfg->verbose_level > 3)
1563 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1565 locals = outb->in_stack;
1575 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1579 if (cfg->compile_aot) {
1580 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1586 ji.type = patch_type;
1587 ji.data.target = data;
1588 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1589 mono_error_assert_ok (&error);
1591 EMIT_NEW_PCONST (cfg, ins, target);
1597 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1599 int tls_offset = mono_tls_get_tls_offset (key);
1601 if (cfg->compile_aot)
1604 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1606 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1607 ins->dreg = mono_alloc_preg (cfg);
1608 ins->inst_offset = tls_offset;
1615 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1617 int tls_offset = mono_tls_get_tls_offset (key);
1619 if (cfg->compile_aot)
1622 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1624 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1625 ins->sreg1 = value->dreg;
1626 ins->inst_offset = tls_offset;
1634 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1636 MonoInst *fast_tls = NULL;
1638 if (!mini_get_debug_options ()->use_fallback_tls)
1639 fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 MONO_ADD_INS (cfg->cbb, fast_tls);
1646 if (cfg->compile_aot) {
1649 * tls getters are critical pieces of code and we don't want to resolve them
1650 * through the standard plt/tramp mechanism since we might expose ourselves
1651 * to crashes and infinite recursions.
1653 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1654 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1656 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1657 return mono_emit_jit_icall (cfg, getter, NULL);
1662 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1664 MonoInst *fast_tls = NULL;
1666 if (!mini_get_debug_options ()->use_fallback_tls)
1667 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 MONO_ADD_INS (cfg->cbb, fast_tls);
1674 if (cfg->compile_aot) {
1676 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1677 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1679 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1680 return mono_emit_jit_icall (cfg, setter, &value);
1687 * Emit IR to push the current LMF onto the LMF stack.
1690 emit_push_lmf (MonoCompile *cfg)
1693 * Emit IR to push the LMF:
1694 * lmf_addr = <lmf_addr from tls>
1695 * lmf->lmf_addr = lmf_addr
1696 * lmf->prev_lmf = *lmf_addr
1699 MonoInst *ins, *lmf_ins;
1704 int lmf_reg, prev_lmf_reg;
1706 * Store lmf_addr in a variable, so it can be allocated to a global register.
1708 if (!cfg->lmf_addr_var)
1709 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1714 int jit_tls_dreg = ins->dreg;
1716 lmf_reg = alloc_preg (cfg);
1717 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1719 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1724 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1725 lmf_reg = ins->dreg;
1727 prev_lmf_reg = alloc_preg (cfg);
1728 /* Save previous_lmf */
1729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1730 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1738 * Emit IR to pop the current LMF from the LMF stack.
1741 emit_pop_lmf (MonoCompile *cfg)
1743 int lmf_reg, lmf_addr_reg;
1749 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1750 lmf_reg = ins->dreg;
1754 * Emit IR to pop the LMF:
1755 * *(lmf->lmf_addr) = lmf->prev_lmf
1757 /* This could be called before emit_push_lmf () */
1758 if (!cfg->lmf_addr_var)
1759 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1760 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1762 prev_lmf_reg = alloc_preg (cfg);
1763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1764 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1768 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1771 type = mini_get_underlying_type (type);
1772 switch (type->type) {
1773 case MONO_TYPE_VOID:
1774 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1781 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1785 case MONO_TYPE_FNPTR:
1786 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1787 case MONO_TYPE_CLASS:
1788 case MONO_TYPE_STRING:
1789 case MONO_TYPE_OBJECT:
1790 case MONO_TYPE_SZARRAY:
1791 case MONO_TYPE_ARRAY:
1792 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1795 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1798 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1800 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = mono_class_enum_basetype (type->data.klass);
1808 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 case MONO_TYPE_MVAR:
1817 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1819 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1824 //XXX this ignores if t is byref
1825 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1828 * target_type_is_incompatible:
1829 * @cfg: MonoCompile context
1831 * Check that the item @arg on the evaluation stack can be stored
1832 * in the target type (can be a local, or field, etc).
1833 * The cfg arg can be used to check if we need verification or just
1836 * Returns: non-0 value if arg can't be stored on a target.
1839 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1841 MonoType *simple_type;
1844 if (target->byref) {
1845 /* FIXME: check that the pointed to types match */
1846 if (arg->type == STACK_MP) {
1847 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1848 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1849 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1851 /* if the target is native int& or same type */
1852 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1855 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1856 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1857 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1861 if (arg->type == STACK_PTR)
1866 simple_type = mini_get_underlying_type (target);
1867 switch (simple_type->type) {
1868 case MONO_TYPE_VOID:
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1880 /* STACK_MP is needed when setting pinned locals */
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1886 case MONO_TYPE_FNPTR:
1888 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1889 * in native int. (#688008).
1891 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1894 case MONO_TYPE_CLASS:
1895 case MONO_TYPE_STRING:
1896 case MONO_TYPE_OBJECT:
1897 case MONO_TYPE_SZARRAY:
1898 case MONO_TYPE_ARRAY:
1899 if (arg->type != STACK_OBJ)
1901 /* FIXME: check type compatibility */
1905 if (arg->type != STACK_I8)
1909 if (arg->type != cfg->r4_stack_type)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 MonoClass *target_class;
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 target_class = mono_class_from_mono_type (target);
1937 /* The second cases is needed when doing partial sharing */
1938 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1942 if (arg->type != STACK_OBJ)
1944 /* FIXME: check type compatibility */
1948 case MONO_TYPE_MVAR:
1949 g_assert (cfg->gshared);
1950 if (mini_type_var_is_vt (simple_type)) {
1951 if (arg->type != STACK_VTYPE)
1954 if (arg->type != STACK_OBJ)
1959 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1965 * Prepare arguments for passing to a function call.
1966 * Return a non-zero value if the arguments can't be passed to the given
1968 * The type checks are not yet complete and some conversions may need
1969 * casts on 32 or 64 bit architectures.
1971 * FIXME: implement this using target_type_is_incompatible ()
1974 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1976 MonoType *simple_type;
1980 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1984 for (i = 0; i < sig->param_count; ++i) {
1985 if (sig->params [i]->byref) {
1986 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1990 simple_type = mini_get_underlying_type (sig->params [i]);
1992 switch (simple_type->type) {
1993 case MONO_TYPE_VOID:
2002 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2008 case MONO_TYPE_FNPTR:
2009 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 if (args [i]->type != STACK_OBJ)
2022 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != cfg->r4_stack_type)
2030 if (args [i]->type != STACK_R8)
2033 case MONO_TYPE_VALUETYPE:
2034 if (simple_type->data.klass->enumtype) {
2035 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_TYPEDBYREF:
2042 if (args [i]->type != STACK_VTYPE)
2045 case MONO_TYPE_GENERICINST:
2046 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2049 case MONO_TYPE_MVAR:
2051 if (args [i]->type != STACK_VTYPE)
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2066 case OP_CALL_MEMBASE:
2068 case OP_VOIDCALL_MEMBASE:
2070 case OP_FCALL_MEMBASE:
2072 case OP_RCALL_MEMBASE:
2074 case OP_VCALL_MEMBASE:
2076 case OP_LCALL_MEMBASE:
2079 g_assert_not_reached ();
2086 callvirt_to_call_reg (int opcode)
2089 case OP_CALL_MEMBASE:
2091 case OP_VOIDCALL_MEMBASE:
2092 return OP_VOIDCALL_REG;
2093 case OP_FCALL_MEMBASE:
2094 return OP_FCALL_REG;
2095 case OP_RCALL_MEMBASE:
2096 return OP_RCALL_REG;
2097 case OP_VCALL_MEMBASE:
2098 return OP_VCALL_REG;
2099 case OP_LCALL_MEMBASE:
2100 return OP_LCALL_REG;
2102 g_assert_not_reached ();
2108 /* Either METHOD or IMT_ARG needs to be set */
2110 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2114 if (COMPILE_LLVM (cfg)) {
2116 method_reg = alloc_preg (cfg);
2117 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2119 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2120 method_reg = ins->dreg;
2124 call->imt_arg_reg = method_reg;
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2131 method_reg = alloc_preg (cfg);
2132 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2134 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2135 method_reg = ins->dreg;
2138 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2154 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2157 return mono_class_check_context_used (klass);
2163 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2166 return mono_method_check_context_used (method);
2172 * check_method_sharing:
2174 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2177 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2179 gboolean pass_vtable = FALSE;
2180 gboolean pass_mrgctx = FALSE;
2182 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2183 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2184 gboolean sharable = FALSE;
2186 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2190 * Pass vtable iff target method might
2191 * be shared, which means that sharing
2192 * is enabled for its class and its
2193 * context is sharable (and it's not a
2196 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2200 if (mini_method_get_context (cmethod) &&
2201 mini_method_get_context (cmethod)->method_inst) {
2202 g_assert (!pass_vtable);
2204 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2207 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2212 if (out_pass_vtable)
2213 *out_pass_vtable = pass_vtable;
2214 if (out_pass_mrgctx)
2215 *out_pass_mrgctx = pass_mrgctx;
2218 inline static MonoCallInst *
2219 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2220 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
2224 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2232 mini_profiler_emit_tail_call (cfg, target);
2234 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2236 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2239 call->signature = sig;
2240 call->rgctx_reg = rgctx;
2241 sig_ret = mini_get_underlying_type (sig->ret);
2243 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2246 if (mini_type_is_vtype (sig_ret)) {
2247 call->vret_var = cfg->vret_addr;
2248 //g_assert_not_reached ();
2250 } else if (mini_type_is_vtype (sig_ret)) {
2251 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2254 temp->backend.is_pinvoke = sig->pinvoke;
2257 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2258 * address of return value to increase optimization opportunities.
2259 * Before vtype decomposition, the dreg of the call ins itself represents the
2260 * fact the call modifies the return value. After decomposition, the call will
2261 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2262 * will be transformed into an LDADDR.
2264 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2265 loada->dreg = alloc_preg (cfg);
2266 loada->inst_p0 = temp;
2267 /* We reference the call too since call->dreg could change during optimization */
2268 loada->inst_p1 = call;
2269 MONO_ADD_INS (cfg->cbb, loada);
2271 call->inst.dreg = temp->dreg;
2273 call->vret_var = loada;
2274 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2275 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2277 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2278 if (COMPILE_SOFT_FLOAT (cfg)) {
2280 * If the call has a float argument, we would need to do an r8->r4 conversion using
2281 * an icall, but that cannot be done during the call sequence since it would clobber
2282 * the call registers + the stack. So we do it before emitting the call.
2284 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2286 MonoInst *in = call->args [i];
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mono_type_get_underlying_type (t);
2294 if (!t->byref && t->type == MONO_TYPE_R4) {
2295 MonoInst *iargs [1];
2299 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2301 /* The result will be in an int vreg */
2302 call->args [i] = conv;
2308 call->need_unbox_trampoline = unbox_trampoline;
2311 if (COMPILE_LLVM (cfg))
2312 mono_llvm_emit_call (cfg, call);
2314 mono_arch_emit_call (cfg, call);
2316 mono_arch_emit_call (cfg, call);
2319 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2320 cfg->flags |= MONO_CFG_HAS_CALLS;
2326 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2328 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2329 cfg->uses_rgctx_reg = TRUE;
2330 call->rgctx_reg = TRUE;
2332 call->rgctx_arg_reg = rgctx_reg;
2337 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2342 gboolean check_sp = FALSE;
2344 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2345 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2347 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2352 rgctx_reg = mono_alloc_preg (cfg);
2353 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2357 if (!cfg->stack_inbalance_var)
2358 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2360 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2361 ins->dreg = cfg->stack_inbalance_var->dreg;
2362 MONO_ADD_INS (cfg->cbb, ins);
2365 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
2367 call->inst.sreg1 = addr->dreg;
2370 emit_imt_argument (cfg, call, NULL, imt_arg);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 sp_reg = mono_alloc_preg (cfg);
2379 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2381 MONO_ADD_INS (cfg->cbb, ins);
2383 /* Restore the stack so we don't crash when throwing the exception */
2384 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2385 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2386 MONO_ADD_INS (cfg->cbb, ins);
2388 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2389 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2393 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2395 return (MonoInst*)call;
2399 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2402 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2403 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2405 #ifndef DISABLE_REMOTING
2406 gboolean might_be_remote = FALSE;
2408 gboolean virtual_ = this_ins != NULL;
2409 gboolean enable_for_aot = TRUE;
2412 MonoInst *call_target = NULL;
2414 gboolean need_unbox_trampoline;
2417 sig = mono_method_signature (method);
2419 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2420 g_assert_not_reached ();
2423 rgctx_reg = mono_alloc_preg (cfg);
2424 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2427 if (method->string_ctor) {
2428 /* Create the real signature */
2429 /* FIXME: Cache these */
2430 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2431 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2436 context_used = mini_method_check_context_used (cfg, method);
2438 #ifndef DISABLE_REMOTING
2439 might_be_remote = this_ins && sig->hasthis &&
2440 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2441 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2443 if (might_be_remote && context_used) {
2446 g_assert (cfg->gshared);
2448 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2450 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2454 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2455 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2457 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2459 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
2461 #ifndef DISABLE_REMOTING
2462 if (might_be_remote)
2463 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2466 call->method = method;
2467 call->inst.flags |= MONO_INST_HAS_METHOD;
2468 call->inst.inst_left = this_ins;
2469 call->tail_call = tail;
2472 int vtable_reg, slot_reg, this_reg;
2475 this_reg = this_ins->dreg;
2477 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2478 MonoInst *dummy_use;
2480 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2482 /* Make a call to delegate->invoke_impl */
2483 call->inst.inst_basereg = this_reg;
2484 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2485 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2487 /* We must emit a dummy use here because the delegate trampoline will
2488 replace the 'this' argument with the delegate target making this activation
2489 no longer a root for the delegate.
2490 This is an issue for delegates that target collectible code such as dynamic
2491 methods of GC'able assemblies.
2493 For a test case look into #667921.
2495 FIXME: a dummy use is not the best way to do it as the local register allocator
2496 will put it on a caller save register and spil it around the call.
2497 Ideally, we would either put it on a callee save register or only do the store part.
2499 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2501 return (MonoInst*)call;
2504 if ((!cfg->compile_aot || enable_for_aot) &&
2505 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2506 (MONO_METHOD_IS_FINAL (method) &&
2507 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2508 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2510 * the method is not virtual, we just need to ensure this is not null
2511 * and then we can call the method directly.
2513 #ifndef DISABLE_REMOTING
2514 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2516 * The check above ensures method is not gshared, this is needed since
2517 * gshared methods can't have wrappers.
2519 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2523 if (!method->string_ctor)
2524 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2526 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2527 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2529 * the method is virtual, but we can statically dispatch since either
2530 * it's class or the method itself are sealed.
2531 * But first we need to ensure it's not a null reference.
2533 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2535 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2536 } else if (call_target) {
2537 vtable_reg = alloc_preg (cfg);
2538 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2540 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2541 call->inst.sreg1 = call_target->dreg;
2542 call->inst.flags &= !MONO_INST_HAS_METHOD;
2544 vtable_reg = alloc_preg (cfg);
2545 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2546 if (mono_class_is_interface (method->klass)) {
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2552 slot_reg = vtable_reg;
2553 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2554 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2556 g_assert (mono_method_signature (method)->generic_param_count);
2557 emit_imt_argument (cfg, call, call->method, imt_arg);
2561 call->inst.sreg1 = slot_reg;
2562 call->inst.inst_offset = offset;
2563 call->is_virtual = TRUE;
2567 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2572 return (MonoInst*)call;
2576 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2578 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2582 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2589 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
2592 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2594 return (MonoInst*)call;
2598 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2600 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2604 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2608 * mono_emit_abs_call:
2610 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2612 inline static MonoInst*
2613 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2614 MonoMethodSignature *sig, MonoInst **args)
2616 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2620 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 if (cfg->abs_patches == NULL)
2624 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2625 g_hash_table_insert (cfg->abs_patches, ji, ji);
2626 ins = mono_emit_native_call (cfg, ji, sig, args);
2627 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2631 static MonoMethodSignature*
2632 sig_to_rgctx_sig (MonoMethodSignature *sig)
2634 // FIXME: memory allocation
2635 MonoMethodSignature *res;
2638 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2639 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2640 res->param_count = sig->param_count + 1;
2641 for (i = 0; i < sig->param_count; ++i)
2642 res->params [i] = sig->params [i];
2643 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2647 /* Make an indirect call to FSIG passing an additional argument */
2649 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2651 MonoMethodSignature *csig;
2652 MonoInst *args_buf [16];
2654 int i, pindex, tmp_reg;
2656 /* Make a call with an rgctx/extra arg */
2657 if (fsig->param_count + 2 < 16)
2660 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2663 args [pindex ++] = orig_args [0];
2664 for (i = 0; i < fsig->param_count; ++i)
2665 args [pindex ++] = orig_args [fsig->hasthis + i];
2666 tmp_reg = alloc_preg (cfg);
2667 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2668 csig = sig_to_rgctx_sig (fsig);
2669 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2672 /* Emit an indirect call to the function descriptor ADDR */
2674 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2676 int addr_reg, arg_reg;
2677 MonoInst *call_target;
2679 g_assert (cfg->llvm_only);
2682 * addr points to a <addr, arg> pair, load both of them, and
2683 * make a call to addr, passing arg as an extra arg.
2685 addr_reg = alloc_preg (cfg);
2686 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2687 arg_reg = alloc_preg (cfg);
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2690 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2694 direct_icalls_enabled (MonoCompile *cfg)
2698 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2700 if (cfg->compile_llvm && !cfg->llvm_only)
2703 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2709 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2712 * Call the jit icall without a wrapper if possible.
2713 * The wrapper is needed for the following reasons:
2714 * - to handle exceptions thrown using mono_raise_exceptions () from the
2715 * icall function. The EH code needs the lmf frame pushed by the
2716 * wrapper to be able to unwind back to managed code.
2717 * - to be able to do stack walks for asynchronously suspended
2718 * threads when debugging.
2720 if (info->no_raise && direct_icalls_enabled (cfg)) {
2724 if (!info->wrapper_method) {
2725 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2726 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2728 mono_memory_barrier ();
2732 * Inline the wrapper method, which is basically a call to the C icall, and
2733 * an exception check.
2735 costs = inline_method (cfg, info->wrapper_method, NULL,
2736 args, NULL, il_offset, TRUE);
2737 g_assert (costs > 0);
2738 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2742 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2747 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2749 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2750 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2754 * Native code might return non register sized integers
2755 * without initializing the upper bits.
2757 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2758 case OP_LOADI1_MEMBASE:
2759 widen_op = OP_ICONV_TO_I1;
2761 case OP_LOADU1_MEMBASE:
2762 widen_op = OP_ICONV_TO_U1;
2764 case OP_LOADI2_MEMBASE:
2765 widen_op = OP_ICONV_TO_I2;
2767 case OP_LOADU2_MEMBASE:
2768 widen_op = OP_ICONV_TO_U2;
2774 if (widen_op != -1) {
2775 int dreg = alloc_preg (cfg);
2778 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2779 widen->type = ins->type;
2790 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2792 MonoInst *args [16];
2794 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2795 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2797 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2801 mini_get_memcpy_method (void)
2803 static MonoMethod *memcpy_method = NULL;
2804 if (!memcpy_method) {
2805 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2807 g_error ("Old corlib found. Install a new one");
2809 return memcpy_method;
2813 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2815 int card_table_shift_bits;
2816 gpointer card_table_mask;
2818 MonoInst *dummy_use;
2819 int nursery_shift_bits;
2820 size_t nursery_size;
2822 if (!cfg->gen_write_barriers)
2825 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2827 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2829 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2831 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2834 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2835 wbarrier->sreg1 = ptr->dreg;
2836 wbarrier->sreg2 = value->dreg;
2837 MONO_ADD_INS (cfg->cbb, wbarrier);
2838 } else if (card_table) {
2839 int offset_reg = alloc_preg (cfg);
2844 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2845 * collector case, so, for the serial collector, it might slightly slow down nursery
2846 * collections. We also expect that the host system and the target system have the same card
2847 * table configuration, which is the case if they have the same pointer size.
2850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2851 if (card_table_mask)
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2854 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2855 * IMM's larger than 32bits.
2857 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2858 card_reg = ins->dreg;
2860 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2863 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2864 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2867 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2871 mini_get_memset_method (void)
2873 static MonoMethod *memset_method = NULL;
2874 if (!memset_method) {
2875 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2877 g_error ("Old corlib found. Install a new one");
2879 return memset_method;
2883 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2885 MonoInst *iargs [3];
2888 MonoMethod *memset_method;
2889 MonoInst *size_ins = NULL;
2890 MonoInst *bzero_ins = NULL;
2891 static MonoMethod *bzero_method;
2893 /* FIXME: Optimize this for the case when dest is an LDADDR */
2894 mono_class_init (klass);
2895 if (mini_is_gsharedvt_klass (klass)) {
2896 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2897 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2899 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
2900 g_assert (bzero_method);
2902 iargs [1] = size_ins;
2903 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
2907 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2909 n = mono_class_value_size (klass, &align);
2911 if (n <= sizeof (gpointer) * 8) {
2912 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2915 memset_method = mini_get_memset_method ();
2917 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2918 EMIT_NEW_ICONST (cfg, iargs [2], n);
2919 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2926 * Emit IR to return either the this pointer for instance method,
2927 * or the mrgctx for static methods.
2930 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2932 MonoInst *this_ins = NULL;
2934 g_assert (cfg->gshared);
2936 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2937 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2938 !method->klass->valuetype)
2939 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
2941 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2942 MonoInst *mrgctx_loc, *mrgctx_var;
2944 g_assert (!this_ins);
2945 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2947 mrgctx_loc = mono_get_vtable_var (cfg);
2948 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2951 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
2952 MonoInst *mrgctx_loc, *mrgctx_var;
2954 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
2955 mrgctx_loc = mono_get_vtable_var (cfg);
2956 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2958 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
2961 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2962 MonoInst *vtable_loc, *vtable_var;
2964 g_assert (!this_ins);
2966 vtable_loc = mono_get_vtable_var (cfg);
2967 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2969 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2970 MonoInst *mrgctx_var = vtable_var;
2973 vtable_reg = alloc_preg (cfg);
2974 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2975 vtable_var->type = STACK_PTR;
2983 vtable_reg = alloc_preg (cfg);
2984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2989 static MonoJumpInfoRgctxEntry *
2990 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2992 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2993 res->method = method;
2994 res->in_mrgctx = in_mrgctx;
2995 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2996 res->data->type = patch_type;
2997 res->data->data.target = patch_data;
2998 res->info_type = info_type;
3003 static inline MonoInst*
3004 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3006 MonoInst *args [16];
3009 // FIXME: No fastpath since the slot is not a compile time constant
3011 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3012 if (entry->in_mrgctx)
3013 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3015 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3019 * FIXME: This can be called during decompose, which is a problem since it creates
3021 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3023 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3025 MonoBasicBlock *is_null_bb, *end_bb;
3026 MonoInst *res, *ins, *call;
3029 slot = mini_get_rgctx_entry_slot (entry);
3031 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3032 index = MONO_RGCTX_SLOT_INDEX (slot);
3034 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3035 for (depth = 0; ; ++depth) {
3036 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3038 if (index < size - 1)
3043 NEW_BBLOCK (cfg, end_bb);
3044 NEW_BBLOCK (cfg, is_null_bb);
3047 rgctx_reg = rgctx->dreg;
3049 rgctx_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3052 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3053 NEW_BBLOCK (cfg, is_null_bb);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3059 for (i = 0; i < depth; ++i) {
3060 int array_reg = alloc_preg (cfg);
3062 /* load ptr to next array */
3063 if (mrgctx && i == 0)
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3067 rgctx_reg = array_reg;
3068 /* is the ptr null? */
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3070 /* if yes, jump to actual trampoline */
3071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3075 val_reg = alloc_preg (cfg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3077 /* is the slot null? */
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3079 /* if yes, jump to actual trampoline */
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3083 res_reg = alloc_preg (cfg);
3084 MONO_INST_NEW (cfg, ins, OP_MOVE);
3085 ins->dreg = res_reg;
3086 ins->sreg1 = val_reg;
3087 MONO_ADD_INS (cfg->cbb, ins);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3092 MONO_START_BB (cfg, is_null_bb);
3094 EMIT_NEW_ICONST (cfg, args [1], index);
3096 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3098 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3099 MONO_INST_NEW (cfg, ins, OP_MOVE);
3100 ins->dreg = res_reg;
3101 ins->sreg1 = call->dreg;
3102 MONO_ADD_INS (cfg->cbb, ins);
3103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3105 MONO_START_BB (cfg, end_bb);
3114 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3117 static inline MonoInst*
3118 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3121 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3123 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3127 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3128 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3130 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3131 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3133 return emit_rgctx_fetch (cfg, rgctx, entry);
3137 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3138 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3140 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3141 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3143 return emit_rgctx_fetch (cfg, rgctx, entry);
3147 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3148 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3150 MonoJumpInfoGSharedVtCall *call_info;
3151 MonoJumpInfoRgctxEntry *entry;
3154 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3155 call_info->sig = sig;
3156 call_info->method = cmethod;
3158 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3159 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3161 return emit_rgctx_fetch (cfg, rgctx, entry);
3165 * emit_get_rgctx_virt_method:
3167 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3170 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3171 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3173 MonoJumpInfoVirtMethod *info;
3174 MonoJumpInfoRgctxEntry *entry;
3177 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3178 info->klass = klass;
3179 info->method = virt_method;
3181 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3182 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3184 return emit_rgctx_fetch (cfg, rgctx, entry);
3188 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3189 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3191 MonoJumpInfoRgctxEntry *entry;
3194 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3195 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3197 return emit_rgctx_fetch (cfg, rgctx, entry);
3201 * emit_get_rgctx_method:
3203 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3204 * normal constants, else emit a load from the rgctx.
3207 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3208 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3210 if (!context_used) {
3213 switch (rgctx_type) {
3214 case MONO_RGCTX_INFO_METHOD:
3215 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3217 case MONO_RGCTX_INFO_METHOD_RGCTX:
3218 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3221 g_assert_not_reached ();
3224 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3225 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3227 return emit_rgctx_fetch (cfg, rgctx, entry);
3232 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3233 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3235 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3236 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3238 return emit_rgctx_fetch (cfg, rgctx, entry);
3242 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3244 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3245 MonoRuntimeGenericContextInfoTemplate *template_;
3250 for (i = 0; i < info->num_entries; ++i) {
3251 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3253 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3257 if (info->num_entries == info->count_entries) {
3258 MonoRuntimeGenericContextInfoTemplate *new_entries;
3259 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3261 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3263 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3264 info->entries = new_entries;
3265 info->count_entries = new_count_entries;
3268 idx = info->num_entries;
3269 template_ = &info->entries [idx];
3270 template_->info_type = rgctx_type;
3271 template_->data = data;
3273 info->num_entries ++;
3279 * emit_get_gsharedvt_info:
3281 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3284 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3289 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3290 /* Load info->entries [idx] */
3291 dreg = alloc_preg (cfg);
3292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3298 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3300 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3304 * On return the caller must check @klass for load errors.
3307 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3309 MonoInst *vtable_arg;
3312 context_used = mini_class_check_context_used (cfg, klass);
3315 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3316 klass, MONO_RGCTX_INFO_VTABLE);
3318 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3322 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3325 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3329 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3330 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3332 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3333 ins->sreg1 = vtable_arg->dreg;
3334 MONO_ADD_INS (cfg->cbb, ins);
3337 MonoBasicBlock *inited_bb;
3338 MonoInst *args [16];
3340 inited_reg = alloc_ireg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3344 NEW_BBLOCK (cfg, inited_bb);
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3349 args [0] = vtable_arg;
3350 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3352 MONO_START_BB (cfg, inited_bb);
3357 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3361 if (cfg->gen_seq_points && cfg->method == method) {
3362 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3364 ins->flags |= MONO_INST_NONEMPTY_STACK;
3365 MONO_ADD_INS (cfg->cbb, ins);
3370 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 int vtable_reg = alloc_preg (cfg);
3374 int klass_reg = alloc_preg (cfg);
3375 MonoBasicBlock *is_null_bb = NULL;
3377 int to_klass_reg, context_used;
3380 NEW_BBLOCK (cfg, is_null_bb);
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3386 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3388 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3397 context_used = mini_class_check_context_used (cfg, klass);
3399 MonoInst *class_ins;
3401 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3402 to_klass_reg = class_ins->dreg;
3404 to_klass_reg = alloc_preg (cfg);
3405 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3410 MONO_START_BB (cfg, is_null_bb);
3415 mini_reset_cast_details (MonoCompile *cfg)
3417 /* Reset the variables holding the cast details */
3418 if (mini_get_debug_options ()->better_cast_details) {
3419 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3420 /* It is enough to reset the from field */
3421 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3426 * On return the caller must check @array_class for load errors
3429 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3431 int vtable_reg = alloc_preg (cfg);
3434 context_used = mini_class_check_context_used (cfg, array_class);
3436 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3438 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3440 if (cfg->opt & MONO_OPT_SHARED) {
3441 int class_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3445 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3447 } else if (context_used) {
3448 MonoInst *vtable_ins;
3450 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3453 if (cfg->compile_aot) {
3457 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3459 vt_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3461 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3464 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3470 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3472 mini_reset_cast_details (cfg);
3476 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3477 * generic code is generated.
3480 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3482 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3485 MonoInst *rgctx, *addr;
3487 /* FIXME: What if the class is shared? We might not
3488 have to get the address of the method from the
3490 addr = emit_get_rgctx_method (cfg, context_used, method,
3491 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3492 if (cfg->llvm_only) {
3493 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3494 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3496 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3498 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3501 gboolean pass_vtable, pass_mrgctx;
3502 MonoInst *rgctx_arg = NULL;
3504 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3505 g_assert (!pass_mrgctx);
3508 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3511 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3514 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3519 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3523 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3524 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3525 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3526 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3528 obj_reg = sp [0]->dreg;
3529 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3532 /* FIXME: generics */
3533 g_assert (klass->rank == 0);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3543 MonoInst *element_class;
3545 /* This assertion is from the unboxcast insn */
3546 g_assert (klass->rank == 0);
3548 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3549 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3554 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3555 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3556 mini_reset_cast_details (cfg);
3559 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3560 MONO_ADD_INS (cfg->cbb, add);
3561 add->type = STACK_MP;
3568 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3570 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3571 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3575 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3581 args [1] = klass_inst;
3584 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3586 NEW_BBLOCK (cfg, is_ref_bb);
3587 NEW_BBLOCK (cfg, is_nullable_bb);
3588 NEW_BBLOCK (cfg, end_bb);
3589 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3596 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3597 addr_reg = alloc_dreg (cfg, STACK_MP);
3601 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3602 MONO_ADD_INS (cfg->cbb, addr);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_ref_bb);
3609 /* Save the ref to a temporary */
3610 dreg = alloc_ireg (cfg);
3611 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3612 addr->dreg = addr_reg;
3613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3617 MONO_START_BB (cfg, is_nullable_bb);
3620 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3621 MonoInst *unbox_call;
3622 MonoMethodSignature *unbox_sig;
3624 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3625 unbox_sig->ret = &klass->byval_arg;
3626 unbox_sig->param_count = 1;
3627 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3630 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3632 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3634 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3635 addr->dreg = addr_reg;
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3641 MONO_START_BB (cfg, end_bb);
3644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3650 * Returns NULL and set the cfg exception on error.
3653 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3655 MonoInst *iargs [2];
3660 MonoRgctxInfoType rgctx_info;
3661 MonoInst *iargs [2];
3662 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3664 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3666 if (cfg->opt & MONO_OPT_SHARED)
3667 rgctx_info = MONO_RGCTX_INFO_KLASS;
3669 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3670 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3672 if (cfg->opt & MONO_OPT_SHARED) {
3673 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3675 alloc_ftn = ves_icall_object_new;
3678 alloc_ftn = ves_icall_object_new_specific;
3681 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3682 if (known_instance_size) {
3683 int size = mono_class_instance_size (klass);
3684 if (size < sizeof (MonoObject))
3685 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3687 EMIT_NEW_ICONST (cfg, iargs [1], size);
3689 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3692 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3695 if (cfg->opt & MONO_OPT_SHARED) {
3696 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3697 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3699 alloc_ftn = ves_icall_object_new;
3700 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3701 /* This happens often in argument checking code, eg. throw new FooException... */
3702 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3703 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3704 alloc_ftn = mono_helper_newobj_mscorlib;
3706 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3707 MonoMethod *managed_alloc = NULL;
3710 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3711 cfg->exception_ptr = klass;
3715 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3717 if (managed_alloc) {
3718 int size = mono_class_instance_size (klass);
3719 if (size < sizeof (MonoObject))
3720 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3722 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3723 EMIT_NEW_ICONST (cfg, iargs [1], size);
3724 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3726 alloc_ftn = ves_icall_object_new_specific;
3727 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3730 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3734 * Returns NULL and set the cfg exception on error.
3737 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3739 MonoInst *alloc, *ins;
3741 if (mono_class_is_nullable (klass)) {
3742 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3745 if (cfg->llvm_only && cfg->gsharedvt) {
3746 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3747 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3748 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3750 /* FIXME: What if the class is shared? We might not
3751 have to get the method address from the RGCTX. */
3752 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3753 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3754 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3756 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3759 gboolean pass_vtable, pass_mrgctx;
3760 MonoInst *rgctx_arg = NULL;
3762 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3763 g_assert (!pass_mrgctx);
3766 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3769 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3772 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3776 if (mini_is_gsharedvt_klass (klass)) {
3777 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3778 MonoInst *res, *is_ref, *src_var, *addr;
3781 dreg = alloc_ireg (cfg);
3783 NEW_BBLOCK (cfg, is_ref_bb);
3784 NEW_BBLOCK (cfg, is_nullable_bb);
3785 NEW_BBLOCK (cfg, end_bb);
3786 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3794 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3797 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3798 ins->opcode = OP_STOREV_MEMBASE;
3800 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3801 res->type = STACK_OBJ;
3803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3806 MONO_START_BB (cfg, is_ref_bb);
3808 /* val is a vtype, so has to load the value manually */
3809 src_var = get_vreg_to_inst (cfg, val->dreg);
3811 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3812 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3817 MONO_START_BB (cfg, is_nullable_bb);
3820 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3821 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3823 MonoMethodSignature *box_sig;
3826 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3827 * construct that method at JIT time, so have to do things by hand.
3829 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3830 box_sig->ret = &mono_defaults.object_class->byval_arg;
3831 box_sig->param_count = 1;
3832 box_sig->params [0] = &klass->byval_arg;
3835 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
3837 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3838 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3839 res->type = STACK_OBJ;
3843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3845 MONO_START_BB (cfg, end_bb);
3849 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3853 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3858 static GHashTable* direct_icall_type_hash;
3861 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
3863 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3864 if (!direct_icalls_enabled (cfg))
3868 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
3869 * Whitelist a few icalls for now.
3871 if (!direct_icall_type_hash) {
3872 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
3874 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
3875 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
3876 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
3877 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
3878 mono_memory_barrier ();
3879 direct_icall_type_hash = h;
3882 if (cmethod->klass == mono_defaults.math_class)
3884 /* No locking needed */
3885 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
3891 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3893 if (cmethod->klass == mono_defaults.systemtype_class) {
3894 if (!strcmp (cmethod->name, "GetType"))
3900 static G_GNUC_UNUSED MonoInst*
3901 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
3903 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
3904 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3907 switch (enum_type->type) {
3910 #if SIZEOF_REGISTER == 8
3922 MonoInst *load, *and_, *cmp, *ceq;
3923 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3924 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3925 int dest_reg = alloc_ireg (cfg);
3927 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3928 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3929 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3930 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3932 ceq->type = STACK_I4;
3935 load = mono_decompose_opcode (cfg, load);
3936 and_ = mono_decompose_opcode (cfg, and_);
3937 cmp = mono_decompose_opcode (cfg, cmp);
3938 ceq = mono_decompose_opcode (cfg, ceq);
3946 * Returns NULL and set the cfg exception on error.
3948 static G_GNUC_UNUSED MonoInst*
3949 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
3953 gpointer trampoline;
3954 MonoInst *obj, *method_ins, *tramp_ins;
3958 if (virtual_ && !cfg->llvm_only) {
3959 MonoMethod *invoke = mono_get_delegate_invoke (klass);
3962 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
3966 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
3970 /* Inline the contents of mono_delegate_ctor */
3972 /* Set target field */
3973 /* Optimize away setting of NULL target */
3974 if (!MONO_INS_IS_PCONST_NULL (target)) {
3975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3976 if (cfg->gen_write_barriers) {
3977 dreg = alloc_preg (cfg);
3978 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3979 mini_emit_write_barrier (cfg, ptr, target);
3983 /* Set method field */
3984 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3988 * To avoid looking up the compiled code belonging to the target method
3989 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3990 * store it, and we fill it after the method has been compiled.
3992 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3993 MonoInst *code_slot_ins;
3996 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3998 domain = mono_domain_get ();
3999 mono_domain_lock (domain);
4000 if (!domain_jit_info (domain)->method_code_hash)
4001 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4002 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4004 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4005 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4007 mono_domain_unlock (domain);
4009 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4011 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4014 if (cfg->llvm_only) {
4015 MonoInst *args [16];
4020 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4021 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4024 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4030 if (cfg->compile_aot) {
4031 MonoDelegateClassMethodPair *del_tramp;
4033 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4034 del_tramp->klass = klass;
4035 del_tramp->method = context_used ? NULL : method;
4036 del_tramp->is_virtual = virtual_;
4037 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4040 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4042 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4043 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4046 /* Set invoke_impl field */
4048 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4050 dreg = alloc_preg (cfg);
4051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4054 dreg = alloc_preg (cfg);
4055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4056 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4059 dreg = alloc_preg (cfg);
4060 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4063 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4069 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4071 MonoJitICallInfo *info;
4073 /* Need to register the icall so it gets an icall wrapper */
4074 info = mono_get_array_new_va_icall (rank);
4076 cfg->flags |= MONO_CFG_HAS_VARARGS;
4078 /* mono_array_new_va () needs a vararg calling convention */
4079 cfg->exception_message = g_strdup ("array-new");
4080 cfg->disable_llvm = TRUE;
4082 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4083 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4087 * handle_constrained_gsharedvt_call:
4089 * Handle constrained calls where the receiver is a gsharedvt type.
4090 * Return the instruction representing the call. Set the cfg exception on failure.
4093 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4094 gboolean *ref_emit_widen)
4096 MonoInst *ins = NULL;
4097 gboolean emit_widen = *ref_emit_widen;
4100 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4101 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4102 * pack the arguments into an array, and do the rest of the work in in an icall.
4104 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4105 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4106 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4107 MonoInst *args [16];
4110 * This case handles calls to
4111 * - object:ToString()/Equals()/GetHashCode(),
4112 * - System.IComparable<T>:CompareTo()
4113 * - System.IEquatable<T>:Equals ()
4114 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4118 if (mono_method_check_context_used (cmethod))
4119 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4121 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4122 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4124 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4125 if (fsig->hasthis && fsig->param_count) {
4126 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4127 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4128 ins->dreg = alloc_preg (cfg);
4129 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4130 MONO_ADD_INS (cfg->cbb, ins);
4133 if (mini_is_gsharedvt_type (fsig->params [0])) {
4134 int addr_reg, deref_arg_reg;
4136 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4137 deref_arg_reg = alloc_preg (cfg);
4138 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4139 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4141 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4142 addr_reg = ins->dreg;
4143 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4145 EMIT_NEW_ICONST (cfg, args [3], 0);
4146 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4149 EMIT_NEW_ICONST (cfg, args [3], 0);
4150 EMIT_NEW_ICONST (cfg, args [4], 0);
4152 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4155 if (mini_is_gsharedvt_type (fsig->ret)) {
4156 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4157 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4162 MONO_ADD_INS (cfg->cbb, add);
4164 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4165 MONO_ADD_INS (cfg->cbb, ins);
4166 /* ins represents the call result */
4169 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4172 *ref_emit_widen = emit_widen;
4181 mono_emit_load_got_addr (MonoCompile *cfg)
4183 MonoInst *getaddr, *dummy_use;
4185 if (!cfg->got_var || cfg->got_var_allocated)
4188 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4189 getaddr->cil_code = cfg->header->code;
4190 getaddr->dreg = cfg->got_var->dreg;
4192 /* Add it to the start of the first bblock */
4193 if (cfg->bb_entry->code) {
4194 getaddr->next = cfg->bb_entry->code;
4195 cfg->bb_entry->code = getaddr;
4198 MONO_ADD_INS (cfg->bb_entry, getaddr);
4200 cfg->got_var_allocated = TRUE;
4203 * Add a dummy use to keep the got_var alive, since real uses might
4204 * only be generated by the back ends.
4205 * Add it to end_bblock, so the variable's lifetime covers the whole
4207 * It would be better to make the usage of the got var explicit in all
4208 * cases when the backend needs it (i.e. calls, throw etc.), so this
4209 * wouldn't be needed.
4211 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4212 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4215 static int inline_limit;
4216 static gboolean inline_limit_inited;
4219 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4221 MonoMethodHeaderSummary header;
4223 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4224 MonoMethodSignature *sig = mono_method_signature (method);
4228 if (cfg->disable_inline)
4233 if (cfg->inline_depth > 10)
4236 if (!mono_method_get_header_summary (method, &header))
4239 /*runtime, icall and pinvoke are checked by summary call*/
4240 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4241 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4242 (mono_class_is_marshalbyref (method->klass)) ||
4246 /* also consider num_locals? */
4247 /* Do the size check early to avoid creating vtables */
4248 if (!inline_limit_inited) {
4250 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4251 inline_limit = atoi (inlinelimit);
4252 g_free (inlinelimit);
4254 inline_limit = INLINE_LENGTH_LIMIT;
4255 inline_limit_inited = TRUE;
4257 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4261 * if we can initialize the class of the method right away, we do,
4262 * otherwise we don't allow inlining if the class needs initialization,
4263 * since it would mean inserting a call to mono_runtime_class_init()
4264 * inside the inlined code
4266 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4269 if (!(cfg->opt & MONO_OPT_SHARED)) {
4270 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4271 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4272 if (method->klass->has_cctor) {
4273 vtable = mono_class_vtable (cfg->domain, method->klass);
4276 if (!cfg->compile_aot) {
4278 if (!mono_runtime_class_init_full (vtable, &error)) {
4279 mono_error_cleanup (&error);
4284 } else if (mono_class_is_before_field_init (method->klass)) {
4285 if (cfg->run_cctors && method->klass->has_cctor) {
4286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4287 if (!method->klass->runtime_info)
4288 /* No vtable created yet */
4290 vtable = mono_class_vtable (cfg->domain, method->klass);
4293 /* This makes so that inline cannot trigger */
4294 /* .cctors: too many apps depend on them */
4295 /* running with a specific order... */
4296 if (! vtable->initialized)
4299 if (!mono_runtime_class_init_full (vtable, &error)) {
4300 mono_error_cleanup (&error);
4304 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4305 if (!method->klass->runtime_info)
4306 /* No vtable created yet */
4308 vtable = mono_class_vtable (cfg->domain, method->klass);
4311 if (!vtable->initialized)
4316 * If we're compiling for shared code
4317 * the cctor will need to be run at aot method load time, for example,
4318 * or at the end of the compilation of the inlining method.
4320 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4324 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4325 if (mono_arch_is_soft_float ()) {
4327 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4329 for (i = 0; i < sig->param_count; ++i)
4330 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4335 if (g_list_find (cfg->dont_inline, method))
4338 if (mono_profiler_get_call_instrumentation_flags (method))
4345 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4347 if (!cfg->compile_aot) {
4349 if (vtable->initialized)
4353 if (mono_class_is_before_field_init (klass)) {
4354 if (cfg->method == method)
4358 if (!mono_class_needs_cctor_run (klass, method))
4361 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4362 /* The initialization is already done before the method is called */
4369 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4373 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4376 if (mini_is_gsharedvt_variable_klass (klass)) {
4379 mono_class_init (klass);
4380 size = mono_class_array_element_size (klass);
4383 mult_reg = alloc_preg (cfg);
4384 array_reg = arr->dreg;
4385 index_reg = index->dreg;
4387 #if SIZEOF_REGISTER == 8
4388 /* The array reg is 64 bits but the index reg is only 32 */
4389 if (COMPILE_LLVM (cfg)) {
4391 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4392 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4393 * of OP_X86_LEA for llvm.
4395 index2_reg = index_reg;
4397 index2_reg = alloc_preg (cfg);
4398 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4401 if (index->type == STACK_I8) {
4402 index2_reg = alloc_preg (cfg);
4403 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4405 index2_reg = index_reg;
4410 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4412 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4413 if (size == 1 || size == 2 || size == 4 || size == 8) {
4414 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4416 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4417 ins->klass = mono_class_get_element_class (klass);
4418 ins->type = STACK_MP;
4424 add_reg = alloc_ireg_mp (cfg);
4427 MonoInst *rgctx_ins;
4430 g_assert (cfg->gshared);
4431 context_used = mini_class_check_context_used (cfg, klass);
4432 g_assert (context_used);
4433 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4434 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4438 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4439 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4440 ins->klass = mono_class_get_element_class (klass);
4441 ins->type = STACK_MP;
4442 MONO_ADD_INS (cfg->cbb, ins);
4448 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4450 int bounds_reg = alloc_preg (cfg);
4451 int add_reg = alloc_ireg_mp (cfg);
4452 int mult_reg = alloc_preg (cfg);
4453 int mult2_reg = alloc_preg (cfg);
4454 int low1_reg = alloc_preg (cfg);
4455 int low2_reg = alloc_preg (cfg);
4456 int high1_reg = alloc_preg (cfg);
4457 int high2_reg = alloc_preg (cfg);
4458 int realidx1_reg = alloc_preg (cfg);
4459 int realidx2_reg = alloc_preg (cfg);
4460 int sum_reg = alloc_preg (cfg);
4461 int index1, index2, tmpreg;
4465 mono_class_init (klass);
4466 size = mono_class_array_element_size (klass);
4468 index1 = index_ins1->dreg;
4469 index2 = index_ins2->dreg;
4471 #if SIZEOF_REGISTER == 8
4472 /* The array reg is 64 bits but the index reg is only 32 */
4473 if (COMPILE_LLVM (cfg)) {
4476 tmpreg = alloc_preg (cfg);
4477 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4479 tmpreg = alloc_preg (cfg);
4480 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4484 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4488 /* range checking */
4489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4490 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4493 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4494 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4496 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4497 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4498 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4501 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4502 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4503 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4504 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4506 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4508 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4509 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4511 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4512 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4514 ins->type = STACK_MP;
4516 MONO_ADD_INS (cfg->cbb, ins);
4522 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4526 MonoMethod *addr_method;
4528 MonoClass *eclass = cmethod->klass->element_class;
4530 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4533 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4535 /* emit_ldelema_2 depends on OP_LMUL */
4536 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4537 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4540 if (mini_is_gsharedvt_variable_klass (eclass))
4543 element_size = mono_class_array_element_size (eclass);
4544 addr_method = mono_marshal_get_array_address (rank, element_size);
4545 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4550 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4552 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4554 MonoInst *addr, *store, *load;
4555 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4557 /* the bounds check is already done by the callers */
4558 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4560 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4561 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4562 if (mini_type_is_reference (&eklass->byval_arg))
4563 mini_emit_write_barrier (cfg, addr, load);
4565 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4566 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4573 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4575 return mini_type_is_reference (&klass->byval_arg);
4579 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4581 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4582 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4583 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4584 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4585 MonoInst *iargs [3];
4588 mono_class_setup_vtable (obj_array);
4589 g_assert (helper->slot);
4591 if (sp [0]->type != STACK_OBJ)
4593 if (sp [2]->type != STACK_OBJ)
4600 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4604 if (mini_is_gsharedvt_variable_klass (klass)) {
4607 // FIXME-VT: OP_ICONST optimization
4608 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4609 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4610 ins->opcode = OP_STOREV_MEMBASE;
4611 } else if (sp [1]->opcode == OP_ICONST) {
4612 int array_reg = sp [0]->dreg;
4613 int index_reg = sp [1]->dreg;
4614 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4616 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4617 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4620 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4621 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4623 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4624 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4625 if (generic_class_is_reference_type (cfg, klass))
4626 mini_emit_write_barrier (cfg, addr, sp [2]);
4633 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4638 eklass = mono_class_from_mono_type (fsig->params [2]);
4640 eklass = mono_class_from_mono_type (fsig->ret);
4643 return emit_array_store (cfg, eklass, args, FALSE);
4645 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4646 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4652 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4655 int param_size, return_size;
4657 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4658 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4660 if (cfg->verbose_level > 3)
4661 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4663 //Don't allow mixing reference types with value types
4664 if (param_klass->valuetype != return_klass->valuetype) {
4665 if (cfg->verbose_level > 3)
4666 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4670 if (!param_klass->valuetype) {
4671 if (cfg->verbose_level > 3)
4672 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4677 if (param_klass->has_references || return_klass->has_references)
4680 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4681 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4682 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4683 if (cfg->verbose_level > 3)
4684 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4688 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4689 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4690 if (cfg->verbose_level > 3)
4691 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4695 param_size = mono_class_value_size (param_klass, &align);
4696 return_size = mono_class_value_size (return_klass, &align);
4698 //We can do it if sizes match
4699 if (param_size == return_size) {
4700 if (cfg->verbose_level > 3)
4701 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4705 //No simple way to handle struct if sizes don't match
4706 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4707 if (cfg->verbose_level > 3)
4708 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4713 * Same reg size category.
4714 * A quick note on why we don't require widening here.
4715 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4717 * Since the source value comes from a function argument, the JIT will already have
4718 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4720 if (param_size <= 4 && return_size <= 4) {
4721 if (cfg->verbose_level > 3)
4722 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4730 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4732 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4733 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4735 if (mini_is_gsharedvt_variable_type (fsig->ret))
4738 //Valuetypes that are semantically equivalent or numbers than can be widened to
4739 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4742 //Arrays of valuetypes that are semantically equivalent
4743 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
4750 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4752 #ifdef MONO_ARCH_SIMD_INTRINSICS
4753 MonoInst *ins = NULL;
4755 if (cfg->opt & MONO_OPT_SIMD) {
4756 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4762 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
4766 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4768 MonoInst *ins = NULL;
4769 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4770 MONO_ADD_INS (cfg->cbb, ins);
4771 ins->backend.memory_barrier_kind = kind;
4777 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4779 MonoInst *ins = NULL;
4782 /* The LLVM backend supports these intrinsics */
4783 if (cmethod->klass == mono_defaults.math_class) {
4784 if (strcmp (cmethod->name, "Sin") == 0) {
4786 } else if (strcmp (cmethod->name, "Cos") == 0) {
4788 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4790 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4794 if (opcode && fsig->param_count == 1) {
4795 MONO_INST_NEW (cfg, ins, opcode);
4796 ins->type = STACK_R8;
4797 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4798 ins->sreg1 = args [0]->dreg;
4799 MONO_ADD_INS (cfg->cbb, ins);
4803 if (cfg->opt & MONO_OPT_CMOV) {
4804 if (strcmp (cmethod->name, "Min") == 0) {
4805 if (fsig->params [0]->type == MONO_TYPE_I4)
4807 if (fsig->params [0]->type == MONO_TYPE_U4)
4808 opcode = OP_IMIN_UN;
4809 else if (fsig->params [0]->type == MONO_TYPE_I8)
4811 else if (fsig->params [0]->type == MONO_TYPE_U8)
4812 opcode = OP_LMIN_UN;
4813 } else if (strcmp (cmethod->name, "Max") == 0) {
4814 if (fsig->params [0]->type == MONO_TYPE_I4)
4816 if (fsig->params [0]->type == MONO_TYPE_U4)
4817 opcode = OP_IMAX_UN;
4818 else if (fsig->params [0]->type == MONO_TYPE_I8)
4820 else if (fsig->params [0]->type == MONO_TYPE_U8)
4821 opcode = OP_LMAX_UN;
4825 if (opcode && fsig->param_count == 2) {
4826 MONO_INST_NEW (cfg, ins, opcode);
4827 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4828 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4829 ins->sreg1 = args [0]->dreg;
4830 ins->sreg2 = args [1]->dreg;
4831 MONO_ADD_INS (cfg->cbb, ins);
4839 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4841 if (cmethod->klass == mono_defaults.array_class) {
4842 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4843 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4844 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4845 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4846 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
4847 return emit_array_unsafe_mov (cfg, fsig, args);
4855 mono_type_is_native_blittable (MonoType *t)
4857 if (MONO_TYPE_IS_REFERENCE (t))
4860 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
4863 MonoClass *klass = mono_class_from_mono_type (t);
4865 //MonoClass::blitable depends on mono_class_setup_fields being done.
4866 mono_class_setup_fields (klass);
4867 if (!klass->blittable)
4870 // If the native marshal size is different we can't convert PtrToStructure to a type load
4871 if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
4879 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4881 MonoInst *ins = NULL;
4882 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
4884 if (cmethod->klass == mono_defaults.string_class) {
4885 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
4886 int dreg = alloc_ireg (cfg);
4887 int index_reg = alloc_preg (cfg);
4888 int add_reg = alloc_preg (cfg);
4890 #if SIZEOF_REGISTER == 8
4891 if (COMPILE_LLVM (cfg)) {
4892 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
4894 /* The array reg is 64 bits but the index reg is only 32 */
4895 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4898 index_reg = args [1]->dreg;
4900 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4902 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4903 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
4904 add_reg = ins->dreg;
4905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4908 int mult_reg = alloc_preg (cfg);
4909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4910 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4912 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
4914 type_from_op (cfg, ins, NULL, NULL);
4916 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4917 int dreg = alloc_ireg (cfg);
4918 /* Decompose later to allow more optimizations */
4919 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4920 ins->type = STACK_I4;
4921 ins->flags |= MONO_INST_FAULT;
4922 cfg->cbb->has_array_access = TRUE;
4923 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4928 } else if (cmethod->klass == mono_defaults.object_class) {
4929 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
4930 int dreg = alloc_ireg_ref (cfg);
4931 int vt_reg = alloc_preg (cfg);
4932 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4933 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
4934 type_from_op (cfg, ins, NULL, NULL);
4937 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
4938 int dreg = alloc_ireg (cfg);
4939 int t1 = alloc_ireg (cfg);
4941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4942 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4943 ins->type = STACK_I4;
4946 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
4947 MONO_INST_NEW (cfg, ins, OP_NOP);
4948 MONO_ADD_INS (cfg->cbb, ins);
4952 } else if (cmethod->klass == mono_defaults.array_class) {
4953 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4954 return emit_array_generic_access (cfg, fsig, args, FALSE);
4955 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4956 return emit_array_generic_access (cfg, fsig, args, TRUE);
4958 #ifndef MONO_BIG_ARRAYS
4960 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4963 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
4964 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
4965 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4966 int dreg = alloc_ireg (cfg);
4967 int bounds_reg = alloc_ireg_mp (cfg);
4968 MonoBasicBlock *end_bb, *szarray_bb;
4969 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4971 NEW_BBLOCK (cfg, end_bb);
4972 NEW_BBLOCK (cfg, szarray_bb);
4974 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4975 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4978 /* Non-szarray case */
4980 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4981 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4983 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4984 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4986 MONO_START_BB (cfg, szarray_bb);
4989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4990 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
4992 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4993 MONO_START_BB (cfg, end_bb);
4995 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4996 ins->type = STACK_I4;
5002 if (cmethod->name [0] != 'g')
5005 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5006 int dreg = alloc_ireg (cfg);
5007 int vtable_reg = alloc_preg (cfg);
5008 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5009 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5010 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5011 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5012 type_from_op (cfg, ins, NULL, NULL);
5015 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5016 int dreg = alloc_ireg (cfg);
5018 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5019 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5020 type_from_op (cfg, ins, NULL, NULL);
5025 } else if (cmethod->klass == runtime_helpers_class) {
5026 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5027 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5029 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5030 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5032 g_assert (ctx->method_inst);
5033 g_assert (ctx->method_inst->type_argc == 1);
5034 MonoType *arg_type = ctx->method_inst->type_argv [0];
5040 /* Resolve the argument class as possible so we can handle common cases fast */
5041 t = mini_get_underlying_type (arg_type);
5042 klass = mono_class_from_mono_type (t);
5043 mono_class_init (klass);
5044 if (MONO_TYPE_IS_REFERENCE (t))
5045 EMIT_NEW_ICONST (cfg, ins, 1);
5046 else if (MONO_TYPE_IS_PRIMITIVE (t))
5047 EMIT_NEW_ICONST (cfg, ins, 0);
5048 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5049 EMIT_NEW_ICONST (cfg, ins, 1);
5050 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5051 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5053 g_assert (cfg->gshared);
5055 /* Have to use the original argument class here */
5056 MonoClass *arg_class = mono_class_from_mono_type (arg_type);
5057 int context_used = mini_class_check_context_used (cfg, arg_class);
5059 /* This returns 1 or 2 */
5060 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5061 int dreg = alloc_ireg (cfg);
5062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5068 } else if (cmethod->klass == mono_defaults.monitor_class) {
5069 gboolean is_enter = FALSE;
5070 gboolean is_v4 = FALSE;
5072 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5076 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5081 * To make async stack traces work, icalls which can block should have a wrapper.
5082 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5084 MonoBasicBlock *end_bb;
5086 NEW_BBLOCK (cfg, end_bb);
5088 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5090 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5091 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5092 MONO_START_BB (cfg, end_bb);
5095 } else if (cmethod->klass == mono_defaults.thread_class) {
5096 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5097 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5098 MONO_ADD_INS (cfg->cbb, ins);
5100 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5101 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5102 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5104 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5106 if (fsig->params [0]->type == MONO_TYPE_I1)
5107 opcode = OP_LOADI1_MEMBASE;
5108 else if (fsig->params [0]->type == MONO_TYPE_U1)
5109 opcode = OP_LOADU1_MEMBASE;
5110 else if (fsig->params [0]->type == MONO_TYPE_I2)
5111 opcode = OP_LOADI2_MEMBASE;
5112 else if (fsig->params [0]->type == MONO_TYPE_U2)
5113 opcode = OP_LOADU2_MEMBASE;
5114 else if (fsig->params [0]->type == MONO_TYPE_I4)
5115 opcode = OP_LOADI4_MEMBASE;
5116 else if (fsig->params [0]->type == MONO_TYPE_U4)
5117 opcode = OP_LOADU4_MEMBASE;
5118 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5119 opcode = OP_LOADI8_MEMBASE;
5120 else if (fsig->params [0]->type == MONO_TYPE_R4)
5121 opcode = OP_LOADR4_MEMBASE;
5122 else if (fsig->params [0]->type == MONO_TYPE_R8)
5123 opcode = OP_LOADR8_MEMBASE;
5124 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5125 opcode = OP_LOAD_MEMBASE;
5128 MONO_INST_NEW (cfg, ins, opcode);
5129 ins->inst_basereg = args [0]->dreg;
5130 ins->inst_offset = 0;
5131 MONO_ADD_INS (cfg->cbb, ins);
5133 switch (fsig->params [0]->type) {
5140 ins->dreg = mono_alloc_ireg (cfg);
5141 ins->type = STACK_I4;
5145 ins->dreg = mono_alloc_lreg (cfg);
5146 ins->type = STACK_I8;
5150 ins->dreg = mono_alloc_ireg (cfg);
5151 #if SIZEOF_REGISTER == 8
5152 ins->type = STACK_I8;
5154 ins->type = STACK_I4;
5159 ins->dreg = mono_alloc_freg (cfg);
5160 ins->type = STACK_R8;
5163 g_assert (mini_type_is_reference (fsig->params [0]));
5164 ins->dreg = mono_alloc_ireg_ref (cfg);
5165 ins->type = STACK_OBJ;
5169 if (opcode == OP_LOADI8_MEMBASE)
5170 ins = mono_decompose_opcode (cfg, ins);
5172 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5176 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5178 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5180 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5181 opcode = OP_STOREI1_MEMBASE_REG;
5182 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5183 opcode = OP_STOREI2_MEMBASE_REG;
5184 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5185 opcode = OP_STOREI4_MEMBASE_REG;
5186 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5187 opcode = OP_STOREI8_MEMBASE_REG;
5188 else if (fsig->params [0]->type == MONO_TYPE_R4)
5189 opcode = OP_STORER4_MEMBASE_REG;
5190 else if (fsig->params [0]->type == MONO_TYPE_R8)
5191 opcode = OP_STORER8_MEMBASE_REG;
5192 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5193 opcode = OP_STORE_MEMBASE_REG;
5196 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5198 MONO_INST_NEW (cfg, ins, opcode);
5199 ins->sreg1 = args [1]->dreg;
5200 ins->inst_destbasereg = args [0]->dreg;
5201 ins->inst_offset = 0;
5202 MONO_ADD_INS (cfg->cbb, ins);
5204 if (opcode == OP_STOREI8_MEMBASE_REG)
5205 ins = mono_decompose_opcode (cfg, ins);
5210 } else if (cmethod->klass->image == mono_defaults.corlib &&
5211 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5212 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5215 #if SIZEOF_REGISTER == 8
5216 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5217 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5218 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5219 ins->dreg = mono_alloc_preg (cfg);
5220 ins->sreg1 = args [0]->dreg;
5221 ins->type = STACK_I8;
5222 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5223 MONO_ADD_INS (cfg->cbb, ins);
5227 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5229 /* 64 bit reads are already atomic */
5230 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5231 load_ins->dreg = mono_alloc_preg (cfg);
5232 load_ins->inst_basereg = args [0]->dreg;
5233 load_ins->inst_offset = 0;
5234 load_ins->type = STACK_I8;
5235 MONO_ADD_INS (cfg->cbb, load_ins);
5237 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5244 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5245 MonoInst *ins_iconst;
5248 if (fsig->params [0]->type == MONO_TYPE_I4) {
5249 opcode = OP_ATOMIC_ADD_I4;
5250 cfg->has_atomic_add_i4 = TRUE;
5252 #if SIZEOF_REGISTER == 8
5253 else if (fsig->params [0]->type == MONO_TYPE_I8)
5254 opcode = OP_ATOMIC_ADD_I8;
5257 if (!mono_arch_opcode_supported (opcode))
5259 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5260 ins_iconst->inst_c0 = 1;
5261 ins_iconst->dreg = mono_alloc_ireg (cfg);
5262 MONO_ADD_INS (cfg->cbb, ins_iconst);
5264 MONO_INST_NEW (cfg, ins, opcode);
5265 ins->dreg = mono_alloc_ireg (cfg);
5266 ins->inst_basereg = args [0]->dreg;
5267 ins->inst_offset = 0;
5268 ins->sreg2 = ins_iconst->dreg;
5269 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5270 MONO_ADD_INS (cfg->cbb, ins);
5272 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5273 MonoInst *ins_iconst;
5276 if (fsig->params [0]->type == MONO_TYPE_I4) {
5277 opcode = OP_ATOMIC_ADD_I4;
5278 cfg->has_atomic_add_i4 = TRUE;
5280 #if SIZEOF_REGISTER == 8
5281 else if (fsig->params [0]->type == MONO_TYPE_I8)
5282 opcode = OP_ATOMIC_ADD_I8;
5285 if (!mono_arch_opcode_supported (opcode))
5287 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5288 ins_iconst->inst_c0 = -1;
5289 ins_iconst->dreg = mono_alloc_ireg (cfg);
5290 MONO_ADD_INS (cfg->cbb, ins_iconst);
5292 MONO_INST_NEW (cfg, ins, opcode);
5293 ins->dreg = mono_alloc_ireg (cfg);
5294 ins->inst_basereg = args [0]->dreg;
5295 ins->inst_offset = 0;
5296 ins->sreg2 = ins_iconst->dreg;
5297 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5298 MONO_ADD_INS (cfg->cbb, ins);
5300 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5303 if (fsig->params [0]->type == MONO_TYPE_I4) {
5304 opcode = OP_ATOMIC_ADD_I4;
5305 cfg->has_atomic_add_i4 = TRUE;
5307 #if SIZEOF_REGISTER == 8
5308 else if (fsig->params [0]->type == MONO_TYPE_I8)
5309 opcode = OP_ATOMIC_ADD_I8;
5312 if (!mono_arch_opcode_supported (opcode))
5314 MONO_INST_NEW (cfg, ins, opcode);
5315 ins->dreg = mono_alloc_ireg (cfg);
5316 ins->inst_basereg = args [0]->dreg;
5317 ins->inst_offset = 0;
5318 ins->sreg2 = args [1]->dreg;
5319 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5320 MONO_ADD_INS (cfg->cbb, ins);
5323 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5324 MonoInst *f2i = NULL, *i2f;
5325 guint32 opcode, f2i_opcode, i2f_opcode;
5326 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5327 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5329 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5330 fsig->params [0]->type == MONO_TYPE_R4) {
5331 opcode = OP_ATOMIC_EXCHANGE_I4;
5332 f2i_opcode = OP_MOVE_F_TO_I4;
5333 i2f_opcode = OP_MOVE_I4_TO_F;
5334 cfg->has_atomic_exchange_i4 = TRUE;
5336 #if SIZEOF_REGISTER == 8
5338 fsig->params [0]->type == MONO_TYPE_I8 ||
5339 fsig->params [0]->type == MONO_TYPE_R8 ||
5340 fsig->params [0]->type == MONO_TYPE_I) {
5341 opcode = OP_ATOMIC_EXCHANGE_I8;
5342 f2i_opcode = OP_MOVE_F_TO_I8;
5343 i2f_opcode = OP_MOVE_I8_TO_F;
5346 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5347 opcode = OP_ATOMIC_EXCHANGE_I4;
5348 cfg->has_atomic_exchange_i4 = TRUE;
5354 if (!mono_arch_opcode_supported (opcode))
5358 /* TODO: Decompose these opcodes instead of bailing here. */
5359 if (COMPILE_SOFT_FLOAT (cfg))
5362 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5363 f2i->dreg = mono_alloc_ireg (cfg);
5364 f2i->sreg1 = args [1]->dreg;
5365 if (f2i_opcode == OP_MOVE_F_TO_I4)
5366 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5367 MONO_ADD_INS (cfg->cbb, f2i);
5370 MONO_INST_NEW (cfg, ins, opcode);
5371 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5372 ins->inst_basereg = args [0]->dreg;
5373 ins->inst_offset = 0;
5374 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5375 MONO_ADD_INS (cfg->cbb, ins);
5377 switch (fsig->params [0]->type) {
5379 ins->type = STACK_I4;
5382 ins->type = STACK_I8;
5385 #if SIZEOF_REGISTER == 8
5386 ins->type = STACK_I8;
5388 ins->type = STACK_I4;
5393 ins->type = STACK_R8;
5396 g_assert (mini_type_is_reference (fsig->params [0]));
5397 ins->type = STACK_OBJ;
5402 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5403 i2f->dreg = mono_alloc_freg (cfg);
5404 i2f->sreg1 = ins->dreg;
5405 i2f->type = STACK_R8;
5406 if (i2f_opcode == OP_MOVE_I4_TO_F)
5407 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5408 MONO_ADD_INS (cfg->cbb, i2f);
5413 if (cfg->gen_write_barriers && is_ref)
5414 mini_emit_write_barrier (cfg, args [0], args [1]);
5416 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5417 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5418 guint32 opcode, f2i_opcode, i2f_opcode;
5419 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5420 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5422 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5423 fsig->params [1]->type == MONO_TYPE_R4) {
5424 opcode = OP_ATOMIC_CAS_I4;
5425 f2i_opcode = OP_MOVE_F_TO_I4;
5426 i2f_opcode = OP_MOVE_I4_TO_F;
5427 cfg->has_atomic_cas_i4 = TRUE;
5429 #if SIZEOF_REGISTER == 8
5431 fsig->params [1]->type == MONO_TYPE_I8 ||
5432 fsig->params [1]->type == MONO_TYPE_R8 ||
5433 fsig->params [1]->type == MONO_TYPE_I) {
5434 opcode = OP_ATOMIC_CAS_I8;
5435 f2i_opcode = OP_MOVE_F_TO_I8;
5436 i2f_opcode = OP_MOVE_I8_TO_F;
5439 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5440 opcode = OP_ATOMIC_CAS_I4;
5441 cfg->has_atomic_cas_i4 = TRUE;
5447 if (!mono_arch_opcode_supported (opcode))
5451 /* TODO: Decompose these opcodes instead of bailing here. */
5452 if (COMPILE_SOFT_FLOAT (cfg))
5455 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5456 f2i_new->dreg = mono_alloc_ireg (cfg);
5457 f2i_new->sreg1 = args [1]->dreg;
5458 if (f2i_opcode == OP_MOVE_F_TO_I4)
5459 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5460 MONO_ADD_INS (cfg->cbb, f2i_new);
5462 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5463 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5464 f2i_cmp->sreg1 = args [2]->dreg;
5465 if (f2i_opcode == OP_MOVE_F_TO_I4)
5466 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5467 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5470 MONO_INST_NEW (cfg, ins, opcode);
5471 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5472 ins->sreg1 = args [0]->dreg;
5473 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5474 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5475 MONO_ADD_INS (cfg->cbb, ins);
5477 switch (fsig->params [1]->type) {
5479 ins->type = STACK_I4;
5482 ins->type = STACK_I8;
5485 #if SIZEOF_REGISTER == 8
5486 ins->type = STACK_I8;
5488 ins->type = STACK_I4;
5492 ins->type = cfg->r4_stack_type;
5495 ins->type = STACK_R8;
5498 g_assert (mini_type_is_reference (fsig->params [1]));
5499 ins->type = STACK_OBJ;
5504 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5505 i2f->dreg = mono_alloc_freg (cfg);
5506 i2f->sreg1 = ins->dreg;
5507 i2f->type = STACK_R8;
5508 if (i2f_opcode == OP_MOVE_I4_TO_F)
5509 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5510 MONO_ADD_INS (cfg->cbb, i2f);
5515 if (cfg->gen_write_barriers && is_ref)
5516 mini_emit_write_barrier (cfg, args [0], args [1]);
5518 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5519 fsig->params [1]->type == MONO_TYPE_I4) {
5520 MonoInst *cmp, *ceq;
5522 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5525 /* int32 r = CAS (location, value, comparand); */
5526 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5527 ins->dreg = alloc_ireg (cfg);
5528 ins->sreg1 = args [0]->dreg;
5529 ins->sreg2 = args [1]->dreg;
5530 ins->sreg3 = args [2]->dreg;
5531 ins->type = STACK_I4;
5532 MONO_ADD_INS (cfg->cbb, ins);
5534 /* bool result = r == comparand; */
5535 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5536 cmp->sreg1 = ins->dreg;
5537 cmp->sreg2 = args [2]->dreg;
5538 cmp->type = STACK_I4;
5539 MONO_ADD_INS (cfg->cbb, cmp);
5541 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5542 ceq->dreg = alloc_ireg (cfg);
5543 ceq->type = STACK_I4;
5544 MONO_ADD_INS (cfg->cbb, ceq);
5546 /* *success = result; */
5547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5549 cfg->has_atomic_cas_i4 = TRUE;
5551 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5552 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5556 } else if (cmethod->klass->image == mono_defaults.corlib &&
5557 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5558 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5561 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5563 MonoType *t = fsig->params [0];
5565 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5567 g_assert (t->byref);
5568 /* t is a byref type, so the reference check is more complicated */
5569 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5570 if (t->type == MONO_TYPE_I1)
5571 opcode = OP_ATOMIC_LOAD_I1;
5572 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5573 opcode = OP_ATOMIC_LOAD_U1;
5574 else if (t->type == MONO_TYPE_I2)
5575 opcode = OP_ATOMIC_LOAD_I2;
5576 else if (t->type == MONO_TYPE_U2)
5577 opcode = OP_ATOMIC_LOAD_U2;
5578 else if (t->type == MONO_TYPE_I4)
5579 opcode = OP_ATOMIC_LOAD_I4;
5580 else if (t->type == MONO_TYPE_U4)
5581 opcode = OP_ATOMIC_LOAD_U4;
5582 else if (t->type == MONO_TYPE_R4)
5583 opcode = OP_ATOMIC_LOAD_R4;
5584 else if (t->type == MONO_TYPE_R8)
5585 opcode = OP_ATOMIC_LOAD_R8;
5586 #if SIZEOF_REGISTER == 8
5587 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5588 opcode = OP_ATOMIC_LOAD_I8;
5589 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5590 opcode = OP_ATOMIC_LOAD_U8;
5592 else if (t->type == MONO_TYPE_I)
5593 opcode = OP_ATOMIC_LOAD_I4;
5594 else if (is_ref || t->type == MONO_TYPE_U)
5595 opcode = OP_ATOMIC_LOAD_U4;
5599 if (!mono_arch_opcode_supported (opcode))
5602 MONO_INST_NEW (cfg, ins, opcode);
5603 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5604 ins->sreg1 = args [0]->dreg;
5605 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5606 MONO_ADD_INS (cfg->cbb, ins);
5609 case MONO_TYPE_BOOLEAN:
5616 ins->type = STACK_I4;
5620 ins->type = STACK_I8;
5624 #if SIZEOF_REGISTER == 8
5625 ins->type = STACK_I8;
5627 ins->type = STACK_I4;
5631 ins->type = cfg->r4_stack_type;
5634 ins->type = STACK_R8;
5638 ins->type = STACK_OBJ;
5644 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5646 MonoType *t = fsig->params [0];
5649 g_assert (t->byref);
5650 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5651 if (t->type == MONO_TYPE_I1)
5652 opcode = OP_ATOMIC_STORE_I1;
5653 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5654 opcode = OP_ATOMIC_STORE_U1;
5655 else if (t->type == MONO_TYPE_I2)
5656 opcode = OP_ATOMIC_STORE_I2;
5657 else if (t->type == MONO_TYPE_U2)
5658 opcode = OP_ATOMIC_STORE_U2;
5659 else if (t->type == MONO_TYPE_I4)
5660 opcode = OP_ATOMIC_STORE_I4;
5661 else if (t->type == MONO_TYPE_U4)
5662 opcode = OP_ATOMIC_STORE_U4;
5663 else if (t->type == MONO_TYPE_R4)
5664 opcode = OP_ATOMIC_STORE_R4;
5665 else if (t->type == MONO_TYPE_R8)
5666 opcode = OP_ATOMIC_STORE_R8;
5667 #if SIZEOF_REGISTER == 8
5668 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5669 opcode = OP_ATOMIC_STORE_I8;
5670 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5671 opcode = OP_ATOMIC_STORE_U8;
5673 else if (t->type == MONO_TYPE_I)
5674 opcode = OP_ATOMIC_STORE_I4;
5675 else if (is_ref || t->type == MONO_TYPE_U)
5676 opcode = OP_ATOMIC_STORE_U4;
5680 if (!mono_arch_opcode_supported (opcode))
5683 MONO_INST_NEW (cfg, ins, opcode);
5684 ins->dreg = args [0]->dreg;
5685 ins->sreg1 = args [1]->dreg;
5686 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5687 MONO_ADD_INS (cfg->cbb, ins);
5689 if (cfg->gen_write_barriers && is_ref)
5690 mini_emit_write_barrier (cfg, args [0], args [1]);
5696 } else if (cmethod->klass->image == mono_defaults.corlib &&
5697 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5698 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5699 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5700 if (mini_should_insert_breakpoint (cfg->method)) {
5701 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5703 MONO_INST_NEW (cfg, ins, OP_NOP);
5704 MONO_ADD_INS (cfg->cbb, ins);
5708 } else if (cmethod->klass->image == mono_defaults.corlib &&
5709 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5710 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5711 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5713 EMIT_NEW_ICONST (cfg, ins, 1);
5715 EMIT_NEW_ICONST (cfg, ins, 0);
5718 } else if (cmethod->klass->image == mono_defaults.corlib &&
5719 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5720 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5721 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5722 /* No stack walks are currently available, so implement this as an intrinsic */
5723 MonoInst *assembly_ins;
5725 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5726 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5729 } else if (cmethod->klass->image == mono_defaults.corlib &&
5730 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5731 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5732 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5733 /* No stack walks are currently available, so implement this as an intrinsic */
5734 MonoInst *method_ins;
5735 MonoMethod *declaring = cfg->method;
5737 /* This returns the declaring generic method */
5738 if (declaring->is_inflated)
5739 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5740 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5741 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5742 cfg->no_inline = TRUE;
5743 if (cfg->method != cfg->current_method)
5744 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5747 } else if (cmethod->klass == mono_defaults.math_class) {
5749 * There is general branchless code for Min/Max, but it does not work for
5751 * http://everything2.com/?node_id=1051618
5753 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5754 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5755 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5756 ins->dreg = alloc_preg (cfg);
5757 ins->type = STACK_I4;
5758 MONO_ADD_INS (cfg->cbb, ins);
5760 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5761 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5762 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5763 !strcmp (cmethod->klass->name, "Selector")) ||
5764 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5765 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5766 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5767 !strcmp (cmethod->klass->name, "Selector"))
5769 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5770 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5771 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5774 MonoJumpInfoToken *ji;
5777 if (args [0]->opcode == OP_GOT_ENTRY) {
5778 pi = (MonoInst *)args [0]->inst_p1;
5779 g_assert (pi->opcode == OP_PATCH_INFO);
5780 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5781 ji = (MonoJumpInfoToken *)pi->inst_p0;
5783 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5784 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
5787 NULLIFY_INS (args [0]);
5789 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
5790 return_val_if_nok (&cfg->error, NULL);
5792 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5793 ins->dreg = mono_alloc_ireg (cfg);
5796 MONO_ADD_INS (cfg->cbb, ins);
5799 } else if (cmethod->klass->image == mono_defaults.corlib &&
5800 (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
5801 (strcmp (cmethod->klass->name, "Marshal") == 0)) {
5802 //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
5803 if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
5804 cmethod->is_inflated &&
5805 fsig->param_count == 1 &&
5806 !mini_method_check_context_used (cfg, cmethod)) {
5808 MonoGenericContext *method_context = mono_method_get_context (cmethod);
5809 MonoType *arg0 = method_context->method_inst->type_argv [0];
5810 if (mono_type_is_native_blittable (arg0))
5811 return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
5815 #ifdef MONO_ARCH_SIMD_INTRINSICS
5816 if (cfg->opt & MONO_OPT_SIMD) {
5817 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5823 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5827 if (COMPILE_LLVM (cfg)) {
5828 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5833 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5837 * This entry point could be used later for arbitrary method
5840 inline static MonoInst*
5841 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5842 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
5844 if (method->klass == mono_defaults.string_class) {
5845 /* managed string allocation support */
5846 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
5847 MonoInst *iargs [2];
5848 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5849 MonoMethod *managed_alloc = NULL;
5851 g_assert (vtable); /*Should not fail since it System.String*/
5852 #ifndef MONO_CROSS_COMPILE
5853 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5857 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5858 iargs [1] = args [0];
5859 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
5866 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5868 MonoInst *store, *temp;
5871 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5872 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5875 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5876 * would be different than the MonoInst's used to represent arguments, and
5877 * the ldelema implementation can't deal with that.
5878 * Solution: When ldelema is used on an inline argument, create a var for
5879 * it, emit ldelema on that var, and emit the saving code below in
5880 * inline_method () if needed.
5882 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5883 cfg->args [i] = temp;
5884 /* This uses cfg->args [i] which is set by the preceeding line */
5885 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5886 store->cil_code = sp [0]->cil_code;
5891 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5892 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5894 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5896 check_inline_called_method_name_limit (MonoMethod *called_method)
5899 static const char *limit = NULL;
5901 if (limit == NULL) {
5902 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5904 if (limit_string != NULL)
5905 limit = limit_string;
5910 if (limit [0] != '\0') {
5911 char *called_method_name = mono_method_full_name (called_method, TRUE);
5913 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5914 g_free (called_method_name);
5916 //return (strncmp_result <= 0);
5917 return (strncmp_result == 0);
5924 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5926 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5929 static const char *limit = NULL;
5931 if (limit == NULL) {
5932 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5933 if (limit_string != NULL) {
5934 limit = limit_string;
5940 if (limit [0] != '\0') {
5941 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5943 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5944 g_free (caller_method_name);
5946 //return (strncmp_result <= 0);
5947 return (strncmp_result == 0);
5955 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5957 static double r8_0 = 0.0;
5958 static float r4_0 = 0.0;
5962 rtype = mini_get_underlying_type (rtype);
5966 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5967 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5968 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5969 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5970 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5971 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
5972 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5973 ins->type = STACK_R4;
5974 ins->inst_p0 = (void*)&r4_0;
5976 MONO_ADD_INS (cfg->cbb, ins);
5977 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5978 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5979 ins->type = STACK_R8;
5980 ins->inst_p0 = (void*)&r8_0;
5982 MONO_ADD_INS (cfg->cbb, ins);
5983 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5984 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5985 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5986 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5987 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5989 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5994 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5998 rtype = mini_get_underlying_type (rtype);
6002 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6003 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6004 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6005 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6006 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6007 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6008 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6009 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6010 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6011 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6012 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6014 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6017 emit_init_rvar (cfg, dreg, rtype);
6021 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6023 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6025 MonoInst *var = cfg->locals [local];
6026 if (COMPILE_SOFT_FLOAT (cfg)) {
6028 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6029 emit_init_rvar (cfg, reg, type);
6030 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6033 emit_init_rvar (cfg, var->dreg, type);
6035 emit_dummy_init_rvar (cfg, var->dreg, type);
6040 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6042 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6048 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6051 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6052 guchar *ip, guint real_offset, gboolean inline_always)
6055 MonoInst *ins, *rvar = NULL;
6056 MonoMethodHeader *cheader;
6057 MonoBasicBlock *ebblock, *sbblock;
6059 MonoMethod *prev_inlined_method;
6060 MonoInst **prev_locals, **prev_args;
6061 MonoType **prev_arg_types;
6062 guint prev_real_offset;
6063 GHashTable *prev_cbb_hash;
6064 MonoBasicBlock **prev_cil_offset_to_bb;
6065 MonoBasicBlock *prev_cbb;
6066 const unsigned char *prev_ip;
6067 unsigned char *prev_cil_start;
6068 guint32 prev_cil_offset_to_bb_len;
6069 MonoMethod *prev_current_method;
6070 MonoGenericContext *prev_generic_context;
6071 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6073 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6075 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6076 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6079 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6080 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6085 fsig = mono_method_signature (cmethod);
6087 if (cfg->verbose_level > 2)
6088 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6090 if (!cmethod->inline_info) {
6091 cfg->stat_inlineable_methods++;
6092 cmethod->inline_info = 1;
6095 /* allocate local variables */
6096 cheader = mono_method_get_header_checked (cmethod, &error);
6098 if (inline_always) {
6099 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6100 mono_error_move (&cfg->error, &error);
6102 mono_error_cleanup (&error);
6107 /*Must verify before creating locals as it can cause the JIT to assert.*/
6108 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6109 mono_metadata_free_mh (cheader);
6113 /* allocate space to store the return value */
6114 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6115 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6118 prev_locals = cfg->locals;
6119 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6120 for (i = 0; i < cheader->num_locals; ++i)
6121 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6123 /* allocate start and end blocks */
6124 /* This is needed so if the inline is aborted, we can clean up */
6125 NEW_BBLOCK (cfg, sbblock);
6126 sbblock->real_offset = real_offset;
6128 NEW_BBLOCK (cfg, ebblock);
6129 ebblock->block_num = cfg->num_bblocks++;
6130 ebblock->real_offset = real_offset;
6132 prev_args = cfg->args;
6133 prev_arg_types = cfg->arg_types;
6134 prev_inlined_method = cfg->inlined_method;
6135 cfg->inlined_method = cmethod;
6136 cfg->ret_var_set = FALSE;
6137 cfg->inline_depth ++;
6138 prev_real_offset = cfg->real_offset;
6139 prev_cbb_hash = cfg->cbb_hash;
6140 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6141 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6142 prev_cil_start = cfg->cil_start;
6144 prev_cbb = cfg->cbb;
6145 prev_current_method = cfg->current_method;
6146 prev_generic_context = cfg->generic_context;
6147 prev_ret_var_set = cfg->ret_var_set;
6148 prev_disable_inline = cfg->disable_inline;
6150 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6153 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6155 ret_var_set = cfg->ret_var_set;
6157 cfg->inlined_method = prev_inlined_method;
6158 cfg->real_offset = prev_real_offset;
6159 cfg->cbb_hash = prev_cbb_hash;
6160 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6161 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6162 cfg->cil_start = prev_cil_start;
6164 cfg->locals = prev_locals;
6165 cfg->args = prev_args;
6166 cfg->arg_types = prev_arg_types;
6167 cfg->current_method = prev_current_method;
6168 cfg->generic_context = prev_generic_context;
6169 cfg->ret_var_set = prev_ret_var_set;
6170 cfg->disable_inline = prev_disable_inline;
6171 cfg->inline_depth --;
6173 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6174 if (cfg->verbose_level > 2)
6175 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6177 cfg->stat_inlined_methods++;
6179 /* always add some code to avoid block split failures */
6180 MONO_INST_NEW (cfg, ins, OP_NOP);
6181 MONO_ADD_INS (prev_cbb, ins);
6183 prev_cbb->next_bb = sbblock;
6184 link_bblock (cfg, prev_cbb, sbblock);
6187 * Get rid of the begin and end bblocks if possible to aid local
6190 if (prev_cbb->out_count == 1)
6191 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6193 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6194 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6196 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6197 MonoBasicBlock *prev = ebblock->in_bb [0];
6199 if (prev->next_bb == ebblock) {
6200 mono_merge_basic_blocks (cfg, prev, ebblock);
6202 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6203 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6204 cfg->cbb = prev_cbb;
6207 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6212 * Its possible that the rvar is set in some prev bblock, but not in others.
6218 for (i = 0; i < ebblock->in_count; ++i) {
6219 bb = ebblock->in_bb [i];
6221 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6224 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6234 * If the inlined method contains only a throw, then the ret var is not
6235 * set, so set it to a dummy value.
6238 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6240 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6243 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6246 if (cfg->verbose_level > 2)
6247 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6248 cfg->exception_type = MONO_EXCEPTION_NONE;
6250 /* This gets rid of the newly added bblocks */
6251 cfg->cbb = prev_cbb;
6253 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6258 * Some of these comments may well be out-of-date.
6259 * Design decisions: we do a single pass over the IL code (and we do bblock
6260 * splitting/merging in the few cases when it's required: a back jump to an IL
6261 * address that was not already seen as bblock starting point).
6262 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6263 * Complex operations are decomposed in simpler ones right away. We need to let the
6264 * arch-specific code peek and poke inside this process somehow (except when the
6265 * optimizations can take advantage of the full semantic info of coarse opcodes).
6266 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6267 * MonoInst->opcode initially is the IL opcode or some simplification of that
6268 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6269 * opcode with value bigger than OP_LAST.
6270 * At this point the IR can be handed over to an interpreter, a dumb code generator
6271 * or to the optimizing code generator that will translate it to SSA form.
6273 * Profiling directed optimizations.
6274 * We may compile by default with few or no optimizations and instrument the code
6275 * or the user may indicate what methods to optimize the most either in a config file
6276 * or through repeated runs where the compiler applies offline the optimizations to
6277 * each method and then decides if it was worth it.
6280 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6281 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6282 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6283 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6284 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6285 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6286 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6287 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6289 /* offset from br.s -> br like opcodes */
6290 #define BIG_BRANCH_OFFSET 13
6293 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6295 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6297 return b == NULL || b == bb;
6301 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6303 unsigned char *ip = start;
6304 unsigned char *target;
6307 MonoBasicBlock *bblock;
6308 const MonoOpcode *opcode;
6311 cli_addr = ip - start;
6312 i = mono_opcode_value ((const guint8 **)&ip, end);
6315 opcode = &mono_opcodes [i];
6316 switch (opcode->argument) {
6317 case MonoInlineNone:
6320 case MonoInlineString:
6321 case MonoInlineType:
6322 case MonoInlineField:
6323 case MonoInlineMethod:
6326 case MonoShortInlineR:
6333 case MonoShortInlineVar:
6334 case MonoShortInlineI:
6337 case MonoShortInlineBrTarget:
6338 target = start + cli_addr + 2 + (signed char)ip [1];
6339 GET_BBLOCK (cfg, bblock, target);
6342 GET_BBLOCK (cfg, bblock, ip);
6344 case MonoInlineBrTarget:
6345 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6346 GET_BBLOCK (cfg, bblock, target);
6349 GET_BBLOCK (cfg, bblock, ip);
6351 case MonoInlineSwitch: {
6352 guint32 n = read32 (ip + 1);
6355 cli_addr += 5 + 4 * n;
6356 target = start + cli_addr;
6357 GET_BBLOCK (cfg, bblock, target);
6359 for (j = 0; j < n; ++j) {
6360 target = start + cli_addr + (gint32)read32 (ip);
6361 GET_BBLOCK (cfg, bblock, target);
6371 g_assert_not_reached ();
6374 if (i == CEE_THROW) {
6375 unsigned char *bb_start = ip - 1;
6377 /* Find the start of the bblock containing the throw */
6379 while ((bb_start >= start) && !bblock) {
6380 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6384 bblock->out_of_line = 1;
6394 static inline MonoMethod *
6395 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6401 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6402 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6404 method = mono_class_inflate_generic_method_checked (method, context, error);
6407 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6413 static inline MonoMethod *
6414 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6417 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6419 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6420 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6424 if (!method && !cfg)
6425 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6430 static inline MonoMethodSignature*
6431 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6433 MonoMethodSignature *fsig;
6436 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6437 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6439 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6440 return_val_if_nok (error, NULL);
6443 fsig = mono_inflate_generic_signature(fsig, context, error);
6449 throw_exception (void)
6451 static MonoMethod *method = NULL;
6454 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6455 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6462 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6464 MonoMethod *thrower = throw_exception ();
6467 EMIT_NEW_PCONST (cfg, args [0], ex);
6468 mono_emit_method_call (cfg, thrower, args, NULL);
6472 * Return the original method is a wrapper is specified. We can only access
6473 * the custom attributes from the original method.
6476 get_original_method (MonoMethod *method)
6478 if (method->wrapper_type == MONO_WRAPPER_NONE)
6481 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6482 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6485 /* in other cases we need to find the original method */
6486 return mono_marshal_method_from_wrapper (method);
6490 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6492 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6493 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6495 emit_throw_exception (cfg, ex);
6499 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6501 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6502 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6504 emit_throw_exception (cfg, ex);
6508 * Check that the IL instructions at ip are the array initialization
6509 * sequence and return the pointer to the data and the size.
6512 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6515 * newarr[System.Int32]
6517 * ldtoken field valuetype ...
6518 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6520 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6522 guint32 token = read32 (ip + 7);
6523 guint32 field_token = read32 (ip + 2);
6524 guint32 field_index = field_token & 0xffffff;
6526 const char *data_ptr;
6528 MonoMethod *cmethod;
6529 MonoClass *dummy_class;
6530 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6534 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6538 *out_field_token = field_token;
6540 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6543 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6545 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6549 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6550 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6567 if (size > mono_type_size (field->type, &dummy_align))
6570 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6571 if (!image_is_dynamic (method->klass->image)) {
6572 field_index = read32 (ip + 2) & 0xffffff;
6573 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6574 data_ptr = mono_image_rva_map (method->klass->image, rva);
6575 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6576 /* for aot code we do the lookup on load */
6577 if (aot && data_ptr)
6578 return (const char *)GUINT_TO_POINTER (rva);
6580 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6582 data_ptr = mono_field_get_data (field);
6590 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6593 char *method_fname = mono_method_full_name (method, TRUE);
6595 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6598 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6599 mono_error_cleanup (&error);
6600 } else if (header->code_size == 0)
6601 method_code = g_strdup ("method body is empty.");
6603 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6604 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6605 g_free (method_fname);
6606 g_free (method_code);
6607 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6611 mono_type_to_stloc_coerce (MonoType *type)
6616 type = mini_get_underlying_type (type);
6618 switch (type->type) {
6620 return OP_ICONV_TO_I1;
6622 return OP_ICONV_TO_U1;
6624 return OP_ICONV_TO_I2;
6626 return OP_ICONV_TO_U2;
6632 case MONO_TYPE_FNPTR:
6633 case MONO_TYPE_CLASS:
6634 case MONO_TYPE_STRING:
6635 case MONO_TYPE_OBJECT:
6636 case MONO_TYPE_SZARRAY:
6637 case MONO_TYPE_ARRAY:
6642 case MONO_TYPE_TYPEDBYREF:
6643 case MONO_TYPE_GENERICINST:
6645 case MONO_TYPE_VALUETYPE:
6646 if (type->data.klass->enumtype) {
6647 type = mono_class_enum_basetype (type->data.klass);
6652 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
6655 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
6661 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6664 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
6667 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6668 if (cfg->verbose_level > 2)
6669 printf ("Found existing coercing is enough for stloc\n");
6671 MONO_INST_NEW (cfg, ins, coerce_op);
6672 ins->dreg = alloc_ireg (cfg);
6673 ins->sreg1 = sp [0]->dreg;
6674 ins->type = STACK_I4;
6675 ins->klass = mono_class_from_mono_type (header->locals [n]);
6676 MONO_ADD_INS (cfg->cbb, ins);
6677 *sp = mono_decompose_opcode (cfg, ins);
6682 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6683 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6684 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6685 /* Optimize reg-reg moves away */
6687 * Can't optimize other opcodes, since sp[0] might point to
6688 * the last ins of a decomposed opcode.
6690 sp [0]->dreg = (cfg)->locals [n]->dreg;
6692 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6697 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
6700 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
6703 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6704 if (cfg->verbose_level > 2)
6705 printf ("Found existing coercing is enough for starg\n");
6707 MONO_INST_NEW (cfg, ins, coerce_op);
6708 ins->dreg = alloc_ireg (cfg);
6709 ins->sreg1 = sp [0]->dreg;
6710 ins->type = STACK_I4;
6711 ins->klass = mono_class_from_mono_type (cfg->arg_types [n]);
6712 MONO_ADD_INS (cfg->cbb, ins);
6713 *sp = mono_decompose_opcode (cfg, ins);
6717 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6721 * ldloca inhibits many optimizations so try to get rid of it in common
6724 static inline unsigned char *
6725 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6735 local = read16 (ip + 2);
6739 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6740 /* From the INITOBJ case */
6741 token = read32 (ip + 2);
6742 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6743 CHECK_TYPELOAD (klass);
6744 type = mini_get_underlying_type (&klass->byval_arg);
6745 emit_init_local (cfg, local, type, TRUE);
6753 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6755 MonoInst *icall_args [16];
6756 MonoInst *call_target, *ins, *vtable_ins;
6757 int arg_reg, this_reg, vtable_reg;
6758 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6759 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6760 gboolean variant_iface = FALSE;
6763 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6766 * In llvm-only mode, vtables contain function descriptors instead of
6767 * method addresses/trampolines.
6769 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6772 slot = mono_method_get_imt_slot (cmethod);
6774 slot = mono_method_get_vtable_index (cmethod);
6776 this_reg = sp [0]->dreg;
6778 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6779 variant_iface = TRUE;
6781 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6783 * The simplest case, a normal virtual call.
6785 int slot_reg = alloc_preg (cfg);
6786 int addr_reg = alloc_preg (cfg);
6787 int arg_reg = alloc_preg (cfg);
6788 MonoBasicBlock *non_null_bb;
6790 vtable_reg = alloc_preg (cfg);
6791 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6792 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6794 /* Load the vtable slot, which contains a function descriptor. */
6795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6797 NEW_BBLOCK (cfg, non_null_bb);
6799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6800 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6801 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6804 // FIXME: Make the wrapper use the preserveall cconv
6805 // FIXME: Use one icall per slot for small slot numbers ?
6806 icall_args [0] = vtable_ins;
6807 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6808 /* Make the icall return the vtable slot value to save some code space */
6809 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6810 ins->dreg = slot_reg;
6811 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6814 MONO_START_BB (cfg, non_null_bb);
6815 /* Load the address + arg from the vtable slot */
6816 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6819 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6822 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6824 * A simple interface call
6826 * We make a call through an imt slot to obtain the function descriptor we need to call.
6827 * The imt slot contains a function descriptor for a runtime function + arg.
6829 int slot_reg = alloc_preg (cfg);
6830 int addr_reg = alloc_preg (cfg);
6831 int arg_reg = alloc_preg (cfg);
6832 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6834 vtable_reg = alloc_preg (cfg);
6835 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6836 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6839 * The slot is already initialized when the vtable is created so there is no need
6843 /* Load the imt slot, which contains a function descriptor. */
6844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6846 /* Load the address + arg of the imt thunk from the imt slot */
6847 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6848 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6850 * IMT thunks in llvm-only mode are C functions which take an info argument
6851 * plus the imt method and return the ftndesc to call.
6853 icall_args [0] = thunk_arg_ins;
6854 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6855 cmethod, MONO_RGCTX_INFO_METHOD);
6856 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6858 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6861 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6863 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6864 * dynamically extended as more instantiations are discovered.
6865 * This handles generic virtual methods both on classes and interfaces.
6867 int slot_reg = alloc_preg (cfg);
6868 int addr_reg = alloc_preg (cfg);
6869 int arg_reg = alloc_preg (cfg);
6870 int ftndesc_reg = alloc_preg (cfg);
6871 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6872 MonoBasicBlock *slowpath_bb, *end_bb;
6874 NEW_BBLOCK (cfg, slowpath_bb);
6875 NEW_BBLOCK (cfg, end_bb);
6877 vtable_reg = alloc_preg (cfg);
6878 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6880 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6882 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6884 /* Load the slot, which contains a function descriptor. */
6885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6887 /* These slots are not initialized, so fall back to the slow path until they are initialized */
6888 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
6889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6893 /* Same as with iface calls */
6894 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6895 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6896 icall_args [0] = thunk_arg_ins;
6897 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6898 cmethod, MONO_RGCTX_INFO_METHOD);
6899 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6900 ftndesc_ins->dreg = ftndesc_reg;
6902 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
6903 * they don't know about yet. Fall back to the slowpath in that case.
6905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
6906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6911 MONO_START_BB (cfg, slowpath_bb);
6912 icall_args [0] = vtable_ins;
6913 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6914 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6915 cmethod, MONO_RGCTX_INFO_METHOD);
6917 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
6919 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
6920 ftndesc_ins->dreg = ftndesc_reg;
6921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6924 MONO_START_BB (cfg, end_bb);
6925 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6929 * Non-optimized cases
6931 icall_args [0] = sp [0];
6932 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6934 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6935 cmethod, MONO_RGCTX_INFO_METHOD);
6937 arg_reg = alloc_preg (cfg);
6938 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
6939 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
6941 g_assert (is_gsharedvt);
6943 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
6945 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
6948 * Pass the extra argument even if the callee doesn't receive it, most
6949 * calling conventions allow this.
6951 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6955 is_exception_class (MonoClass *klass)
6958 if (klass == mono_defaults.exception_class)
6960 klass = klass->parent;
6966 * is_jit_optimizer_disabled:
6968 * Determine whenever M's assembly has a DebuggableAttribute with the
6969 * IsJITOptimizerDisabled flag set.
6972 is_jit_optimizer_disabled (MonoMethod *m)
6975 MonoAssembly *ass = m->klass->image->assembly;
6976 MonoCustomAttrInfo* attrs;
6979 gboolean val = FALSE;
6982 if (ass->jit_optimizer_disabled_inited)
6983 return ass->jit_optimizer_disabled;
6985 klass = mono_class_try_get_debuggable_attribute_class ();
6989 ass->jit_optimizer_disabled = FALSE;
6990 mono_memory_barrier ();
6991 ass->jit_optimizer_disabled_inited = TRUE;
6995 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
6996 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6998 for (i = 0; i < attrs->num_attrs; ++i) {
6999 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7001 MonoMethodSignature *sig;
7003 if (!attr->ctor || attr->ctor->klass != klass)
7005 /* Decode the attribute. See reflection.c */
7006 p = (const char*)attr->data;
7007 g_assert (read16 (p) == 0x0001);
7010 // FIXME: Support named parameters
7011 sig = mono_method_signature (attr->ctor);
7012 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7014 /* Two boolean arguments */
7018 mono_custom_attrs_free (attrs);
7021 ass->jit_optimizer_disabled = val;
7022 mono_memory_barrier ();
7023 ass->jit_optimizer_disabled_inited = TRUE;
7029 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7031 gboolean supported_tail_call;
7034 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7036 for (i = 0; i < fsig->param_count; ++i) {
7037 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7038 /* These can point to the current method's stack */
7039 supported_tail_call = FALSE;
7041 if (fsig->hasthis && cmethod->klass->valuetype)
7042 /* this might point to the current method's stack */
7043 supported_tail_call = FALSE;
7044 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7045 supported_tail_call = FALSE;
7046 if (cfg->method->save_lmf)
7047 supported_tail_call = FALSE;
7048 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7049 supported_tail_call = FALSE;
7050 if (call_opcode != CEE_CALL)
7051 supported_tail_call = FALSE;
7053 /* Debugging support */
7055 if (supported_tail_call) {
7056 if (!mono_debug_count ())
7057 supported_tail_call = FALSE;
7061 return supported_tail_call;
7067 * Handle calls made to ctors from NEWOBJ opcodes.
7070 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7071 MonoInst **sp, guint8 *ip, int *inline_costs)
7073 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7075 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7076 mono_method_is_generic_sharable (cmethod, TRUE)) {
7077 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7078 mono_class_vtable (cfg->domain, cmethod->klass);
7079 CHECK_TYPELOAD (cmethod->klass);
7081 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7082 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7085 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7086 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7088 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7090 CHECK_TYPELOAD (cmethod->klass);
7091 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7096 /* Avoid virtual calls to ctors if possible */
7097 if (mono_class_is_marshalbyref (cmethod->klass))
7098 callvirt_this_arg = sp [0];
7100 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7101 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7102 CHECK_CFG_EXCEPTION;
7103 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7104 mono_method_check_inlining (cfg, cmethod) &&
7105 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7108 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7109 cfg->real_offset += 5;
7111 *inline_costs += costs - 5;
7113 INLINE_FAILURE ("inline failure");
7114 // FIXME-VT: Clean this up
7115 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7116 GSHAREDVT_FAILURE(*ip);
7117 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7119 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7122 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7124 if (cfg->llvm_only) {
7125 // FIXME: Avoid initializing vtable_arg
7126 emit_llvmonly_calli (cfg, fsig, sp, addr);
7128 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7130 } else if (context_used &&
7131 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7132 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7133 MonoInst *cmethod_addr;
7135 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7137 if (cfg->llvm_only) {
7138 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7139 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7140 emit_llvmonly_calli (cfg, fsig, sp, addr);
7142 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7143 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7145 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7148 INLINE_FAILURE ("ctor call");
7149 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7150 callvirt_this_arg, NULL, vtable_arg);
7157 emit_setret (MonoCompile *cfg, MonoInst *val)
7159 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7162 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7165 if (!cfg->vret_addr) {
7166 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7168 EMIT_NEW_RETLOADA (cfg, ret_addr);
7170 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7171 ins->klass = mono_class_from_mono_type (ret_type);
7174 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7175 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7176 MonoInst *iargs [1];
7180 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7181 mono_arch_emit_setret (cfg, cfg->method, conv);
7183 mono_arch_emit_setret (cfg, cfg->method, val);
7186 mono_arch_emit_setret (cfg, cfg->method, val);
7192 * mono_method_to_ir:
7194 * Translate the .net IL into linear IR.
7196 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7197 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7198 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7199 * @inline_args: if not NULL, contains the arguments to the inline call
7200 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7201 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7203 * This method is used to turn ECMA IL into Mono's internal Linear IR
7204 * reprensetation. It is used both for entire methods, as well as
7205 * inlining existing methods. In the former case, the @start_bblock,
7206 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7207 * inline_offset is set to zero.
7209 * Returns: the inline cost, or -1 if there was an error processing this method.
7212 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7213 MonoInst *return_var, MonoInst **inline_args,
7214 guint inline_offset, gboolean is_virtual_call)
7217 MonoInst *ins, **sp, **stack_start;
7218 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7219 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7220 MonoMethod *cmethod, *method_definition;
7221 MonoInst **arg_array;
7222 MonoMethodHeader *header;
7224 guint32 token, ins_flag;
7226 MonoClass *constrained_class = NULL;
7227 unsigned char *ip, *end, *target, *err_pos;
7228 MonoMethodSignature *sig;
7229 MonoGenericContext *generic_context = NULL;
7230 MonoGenericContainer *generic_container = NULL;
7231 MonoType **param_types;
7232 int i, n, start_new_bblock, dreg;
7233 int num_calls = 0, inline_costs = 0;
7234 int breakpoint_id = 0;
7236 GSList *class_inits = NULL;
7237 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7239 gboolean init_locals, seq_points, skip_dead_blocks;
7240 gboolean sym_seq_points = FALSE;
7241 MonoDebugMethodInfo *minfo;
7242 MonoBitSet *seq_point_locs = NULL;
7243 MonoBitSet *seq_point_set_locs = NULL;
7245 cfg->disable_inline = is_jit_optimizer_disabled (method);
7247 /* serialization and xdomain stuff may need access to private fields and methods */
7248 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7249 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7250 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7251 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7252 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7253 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7255 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7256 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7257 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7258 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7259 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7261 image = method->klass->image;
7262 header = mono_method_get_header_checked (method, &cfg->error);
7264 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7265 goto exception_exit;
7267 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7270 generic_container = mono_method_get_generic_container (method);
7271 sig = mono_method_signature (method);
7272 num_args = sig->hasthis + sig->param_count;
7273 ip = (unsigned char*)header->code;
7274 cfg->cil_start = ip;
7275 end = ip + header->code_size;
7276 cfg->stat_cil_code_size += header->code_size;
7278 seq_points = cfg->gen_seq_points && cfg->method == method;
7280 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7281 /* We could hit a seq point before attaching to the JIT (#8338) */
7285 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7286 minfo = mono_debug_lookup_method (method);
7288 MonoSymSeqPoint *sps;
7289 int i, n_il_offsets;
7291 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7292 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7293 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7294 sym_seq_points = TRUE;
7295 for (i = 0; i < n_il_offsets; ++i) {
7296 if (sps [i].il_offset < header->code_size)
7297 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7301 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7303 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7305 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7306 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7308 mono_debug_free_method_async_debug_info (asyncMethod);
7310 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7311 /* Methods without line number info like auto-generated property accessors */
7312 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7313 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7314 sym_seq_points = TRUE;
7319 * Methods without init_locals set could cause asserts in various passes
7320 * (#497220). To work around this, we emit dummy initialization opcodes
7321 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7322 * on some platforms.
7324 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7325 init_locals = header->init_locals;
7329 method_definition = method;
7330 while (method_definition->is_inflated) {
7331 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7332 method_definition = imethod->declaring;
7335 /* SkipVerification is not allowed if core-clr is enabled */
7336 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7338 dont_verify_stloc = TRUE;
7341 if (sig->is_inflated)
7342 generic_context = mono_method_get_context (method);
7343 else if (generic_container)
7344 generic_context = &generic_container->context;
7345 cfg->generic_context = generic_context;
7348 g_assert (!sig->has_type_parameters);
7350 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7351 g_assert (method->is_inflated);
7352 g_assert (mono_method_get_context (method)->method_inst);
7354 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7355 g_assert (sig->generic_param_count);
7357 if (cfg->method == method) {
7358 cfg->real_offset = 0;
7360 cfg->real_offset = inline_offset;
7363 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7364 cfg->cil_offset_to_bb_len = header->code_size;
7366 cfg->current_method = method;
7368 if (cfg->verbose_level > 2)
7369 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7371 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7373 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7374 for (n = 0; n < sig->param_count; ++n)
7375 param_types [n + sig->hasthis] = sig->params [n];
7376 cfg->arg_types = param_types;
7378 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7379 if (cfg->method == method) {
7381 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7384 NEW_BBLOCK (cfg, start_bblock);
7385 cfg->bb_entry = start_bblock;
7386 start_bblock->cil_code = NULL;
7387 start_bblock->cil_length = 0;
7390 NEW_BBLOCK (cfg, end_bblock);
7391 cfg->bb_exit = end_bblock;
7392 end_bblock->cil_code = NULL;
7393 end_bblock->cil_length = 0;
7394 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7395 g_assert (cfg->num_bblocks == 2);
7397 arg_array = cfg->args;
7399 if (header->num_clauses) {
7400 cfg->spvars = g_hash_table_new (NULL, NULL);
7401 cfg->exvars = g_hash_table_new (NULL, NULL);
7403 /* handle exception clauses */
7404 for (i = 0; i < header->num_clauses; ++i) {
7405 MonoBasicBlock *try_bb;
7406 MonoExceptionClause *clause = &header->clauses [i];
7407 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7409 try_bb->real_offset = clause->try_offset;
7410 try_bb->try_start = TRUE;
7411 try_bb->region = ((i + 1) << 8) | clause->flags;
7412 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7413 tblock->real_offset = clause->handler_offset;
7414 tblock->flags |= BB_EXCEPTION_HANDLER;
7416 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
7417 mono_create_exvar_for_offset (cfg, clause->handler_offset);
7419 * Linking the try block with the EH block hinders inlining as we won't be able to
7420 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7422 if (COMPILE_LLVM (cfg))
7423 link_bblock (cfg, try_bb, tblock);
7425 if (*(ip + clause->handler_offset) == CEE_POP)
7426 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7428 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7429 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7430 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7431 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7432 MONO_ADD_INS (tblock, ins);
7434 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7435 /* finally clauses already have a seq point */
7436 /* seq points for filter clauses are emitted below */
7437 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7438 MONO_ADD_INS (tblock, ins);
7441 /* todo: is a fault block unsafe to optimize? */
7442 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7443 tblock->flags |= BB_EXCEPTION_UNSAFE;
7446 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7448 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7450 /* catch and filter blocks get the exception object on the stack */
7451 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7452 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7454 /* mostly like handle_stack_args (), but just sets the input args */
7455 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7456 tblock->in_scount = 1;
7457 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7458 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7462 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7463 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7464 if (!cfg->compile_llvm) {
7465 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7466 ins->dreg = tblock->in_stack [0]->dreg;
7467 MONO_ADD_INS (tblock, ins);
7470 MonoInst *dummy_use;
7473 * Add a dummy use for the exvar so its liveness info will be
7476 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7479 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7480 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7481 MONO_ADD_INS (tblock, ins);
7484 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7485 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7486 tblock->flags |= BB_EXCEPTION_HANDLER;
7487 tblock->real_offset = clause->data.filter_offset;
7488 tblock->in_scount = 1;
7489 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7490 /* The filter block shares the exvar with the handler block */
7491 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7492 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7493 MONO_ADD_INS (tblock, ins);
7497 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7498 clause->data.catch_class &&
7500 mono_class_check_context_used (clause->data.catch_class)) {
7502 * In shared generic code with catch
7503 * clauses containing type variables
7504 * the exception handling code has to
7505 * be able to get to the rgctx.
7506 * Therefore we have to make sure that
7507 * the vtable/mrgctx argument (for
7508 * static or generic methods) or the
7509 * "this" argument (for non-static
7510 * methods) are live.
7512 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7513 mini_method_get_context (method)->method_inst ||
7514 method->klass->valuetype) {
7515 mono_get_vtable_var (cfg);
7517 MonoInst *dummy_use;
7519 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7524 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7525 cfg->cbb = start_bblock;
7526 cfg->args = arg_array;
7527 mono_save_args (cfg, sig, inline_args);
7530 /* FIRST CODE BLOCK */
7531 NEW_BBLOCK (cfg, tblock);
7532 tblock->cil_code = ip;
7536 ADD_BBLOCK (cfg, tblock);
7538 if (cfg->method == method) {
7539 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7540 if (breakpoint_id) {
7541 MONO_INST_NEW (cfg, ins, OP_BREAK);
7542 MONO_ADD_INS (cfg->cbb, ins);
7546 /* we use a separate basic block for the initialization code */
7547 NEW_BBLOCK (cfg, init_localsbb);
7548 if (cfg->method == method)
7549 cfg->bb_init = init_localsbb;
7550 init_localsbb->real_offset = cfg->real_offset;
7551 start_bblock->next_bb = init_localsbb;
7552 init_localsbb->next_bb = cfg->cbb;
7553 link_bblock (cfg, start_bblock, init_localsbb);
7554 link_bblock (cfg, init_localsbb, cfg->cbb);
7556 cfg->cbb = init_localsbb;
7558 if (cfg->gsharedvt && cfg->method == method) {
7559 MonoGSharedVtMethodInfo *info;
7560 MonoInst *var, *locals_var;
7563 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7564 info->method = cfg->method;
7565 info->count_entries = 16;
7566 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7567 cfg->gsharedvt_info = info;
7569 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7570 /* prevent it from being register allocated */
7571 //var->flags |= MONO_INST_VOLATILE;
7572 cfg->gsharedvt_info_var = var;
7574 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7575 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7577 /* Allocate locals */
7578 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7579 /* prevent it from being register allocated */
7580 //locals_var->flags |= MONO_INST_VOLATILE;
7581 cfg->gsharedvt_locals_var = locals_var;
7583 dreg = alloc_ireg (cfg);
7584 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7586 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7587 ins->dreg = locals_var->dreg;
7589 MONO_ADD_INS (cfg->cbb, ins);
7590 cfg->gsharedvt_locals_var_ins = ins;
7592 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7595 ins->flags |= MONO_INST_INIT;
7599 if (mono_security_core_clr_enabled ()) {
7600 /* check if this is native code, e.g. an icall or a p/invoke */
7601 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7602 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7604 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7605 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7607 /* if this ia a native call then it can only be JITted from platform code */
7608 if ((icall || pinvk) && method->klass && method->klass->image) {
7609 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7610 MonoException *ex = icall ? mono_get_exception_security () :
7611 mono_get_exception_method_access ();
7612 emit_throw_exception (cfg, ex);
7619 CHECK_CFG_EXCEPTION;
7621 if (header->code_size == 0)
7624 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7629 if (cfg->method == method)
7630 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7632 for (n = 0; n < header->num_locals; ++n) {
7633 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7638 /* We force the vtable variable here for all shared methods
7639 for the possibility that they might show up in a stack
7640 trace where their exact instantiation is needed. */
7641 if (cfg->gshared && method == cfg->method) {
7642 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7643 mini_method_get_context (method)->method_inst ||
7644 method->klass->valuetype) {
7645 mono_get_vtable_var (cfg);
7647 /* FIXME: Is there a better way to do this?
7648 We need the variable live for the duration
7649 of the whole method. */
7650 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7654 /* add a check for this != NULL to inlined methods */
7655 if (is_virtual_call) {
7658 NEW_ARGLOAD (cfg, arg_ins, 0);
7659 MONO_ADD_INS (cfg->cbb, arg_ins);
7660 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7663 skip_dead_blocks = !dont_verify;
7664 if (skip_dead_blocks) {
7665 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7670 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7671 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7674 start_new_bblock = 0;
7676 if (cfg->method == method)
7677 cfg->real_offset = ip - header->code;
7679 cfg->real_offset = inline_offset;
7684 if (start_new_bblock) {
7685 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7686 if (start_new_bblock == 2) {
7687 g_assert (ip == tblock->cil_code);
7689 GET_BBLOCK (cfg, tblock, ip);
7691 cfg->cbb->next_bb = tblock;
7693 start_new_bblock = 0;
7694 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7695 if (cfg->verbose_level > 3)
7696 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7697 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7701 g_slist_free (class_inits);
7704 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7705 link_bblock (cfg, cfg->cbb, tblock);
7706 if (sp != stack_start) {
7707 handle_stack_args (cfg, stack_start, sp - stack_start);
7709 CHECK_UNVERIFIABLE (cfg);
7711 cfg->cbb->next_bb = tblock;
7713 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7714 if (cfg->verbose_level > 3)
7715 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7716 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7719 g_slist_free (class_inits);
7724 if (skip_dead_blocks) {
7725 int ip_offset = ip - header->code;
7727 if (ip_offset == bb->end)
7731 int op_size = mono_opcode_size (ip, end);
7732 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7734 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7736 if (ip_offset + op_size == bb->end) {
7737 MONO_INST_NEW (cfg, ins, OP_NOP);
7738 MONO_ADD_INS (cfg->cbb, ins);
7739 start_new_bblock = 1;
7747 * Sequence points are points where the debugger can place a breakpoint.
7748 * Currently, we generate these automatically at points where the IL
7751 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7753 * Make methods interruptable at the beginning, and at the targets of
7754 * backward branches.
7755 * Also, do this at the start of every bblock in methods with clauses too,
7756 * to be able to handle instructions with inprecise control flow like
7758 * Backward branches are handled at the end of method-to-ir ().
7760 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7761 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7763 /* Avoid sequence points on empty IL like .volatile */
7764 // FIXME: Enable this
7765 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7766 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7767 if ((sp != stack_start) && !sym_seq_point)
7768 ins->flags |= MONO_INST_NONEMPTY_STACK;
7769 MONO_ADD_INS (cfg->cbb, ins);
7772 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7775 cfg->cbb->real_offset = cfg->real_offset;
7777 if ((cfg->method == method) && cfg->coverage_info) {
7778 guint32 cil_offset = ip - header->code;
7779 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
7780 cfg->coverage_info->data [cil_offset].cil_code = ip;
7782 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
7783 MonoInst *one_ins, *load_ins;
7785 EMIT_NEW_PCONST (cfg, load_ins, counter);
7786 EMIT_NEW_ICONST (cfg, one_ins, 1);
7787 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
7788 ins->dreg = mono_alloc_ireg (cfg);
7789 ins->inst_basereg = load_ins->dreg;
7790 ins->inst_offset = 0;
7791 ins->sreg2 = one_ins->dreg;
7792 ins->type = STACK_I4;
7793 MONO_ADD_INS (cfg->cbb, ins);
7795 EMIT_NEW_PCONST (cfg, ins, counter);
7796 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7800 if (cfg->verbose_level > 3)
7801 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7805 if (seq_points && !sym_seq_points && sp != stack_start) {
7807 * The C# compiler uses these nops to notify the JIT that it should
7808 * insert seq points.
7810 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7811 MONO_ADD_INS (cfg->cbb, ins);
7813 if (cfg->keep_cil_nops)
7814 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7816 MONO_INST_NEW (cfg, ins, OP_NOP);
7818 MONO_ADD_INS (cfg->cbb, ins);
7821 if (mini_should_insert_breakpoint (cfg->method)) {
7822 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7824 MONO_INST_NEW (cfg, ins, OP_NOP);
7827 MONO_ADD_INS (cfg->cbb, ins);
7833 CHECK_STACK_OVF (1);
7834 n = (*ip)-CEE_LDARG_0;
7836 EMIT_NEW_ARGLOAD (cfg, ins, n);
7844 CHECK_STACK_OVF (1);
7845 n = (*ip)-CEE_LDLOC_0;
7847 EMIT_NEW_LOCLOAD (cfg, ins, n);
7856 n = (*ip)-CEE_STLOC_0;
7859 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7861 emit_stloc_ir (cfg, sp, header, n);
7868 CHECK_STACK_OVF (1);
7871 EMIT_NEW_ARGLOAD (cfg, ins, n);
7877 CHECK_STACK_OVF (1);
7880 NEW_ARGLOADA (cfg, ins, n);
7881 MONO_ADD_INS (cfg->cbb, ins);
7891 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7893 emit_starg_ir (cfg, sp, n);
7898 CHECK_STACK_OVF (1);
7901 if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
7902 /* Avoid loading a struct just to load one of its fields */
7903 EMIT_NEW_LOCLOADA (cfg, ins, n);
7905 EMIT_NEW_LOCLOAD (cfg, ins, n);
7910 case CEE_LDLOCA_S: {
7911 unsigned char *tmp_ip;
7913 CHECK_STACK_OVF (1);
7914 CHECK_LOCAL (ip [1]);
7916 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7922 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7931 CHECK_LOCAL (ip [1]);
7932 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7934 emit_stloc_ir (cfg, sp, header, ip [1]);
7939 CHECK_STACK_OVF (1);
7940 EMIT_NEW_PCONST (cfg, ins, NULL);
7941 ins->type = STACK_OBJ;
7946 CHECK_STACK_OVF (1);
7947 EMIT_NEW_ICONST (cfg, ins, -1);
7960 CHECK_STACK_OVF (1);
7961 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7967 CHECK_STACK_OVF (1);
7969 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7975 CHECK_STACK_OVF (1);
7976 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7982 CHECK_STACK_OVF (1);
7983 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7984 ins->type = STACK_I8;
7985 ins->dreg = alloc_dreg (cfg, STACK_I8);
7987 ins->inst_l = (gint64)read64 (ip);
7988 MONO_ADD_INS (cfg->cbb, ins);
7994 gboolean use_aotconst = FALSE;
7996 #ifdef TARGET_POWERPC
7997 /* FIXME: Clean this up */
7998 if (cfg->compile_aot)
7999 use_aotconst = TRUE;
8002 /* FIXME: we should really allocate this only late in the compilation process */
8003 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8005 CHECK_STACK_OVF (1);
8011 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8013 dreg = alloc_freg (cfg);
8014 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8015 ins->type = cfg->r4_stack_type;
8017 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8018 ins->type = cfg->r4_stack_type;
8019 ins->dreg = alloc_dreg (cfg, STACK_R8);
8021 MONO_ADD_INS (cfg->cbb, ins);
8031 gboolean use_aotconst = FALSE;
8033 #ifdef TARGET_POWERPC
8034 /* FIXME: Clean this up */
8035 if (cfg->compile_aot)
8036 use_aotconst = TRUE;
8039 /* FIXME: we should really allocate this only late in the compilation process */
8040 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8042 CHECK_STACK_OVF (1);
8048 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8050 dreg = alloc_freg (cfg);
8051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8052 ins->type = STACK_R8;
8054 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8055 ins->type = STACK_R8;
8056 ins->dreg = alloc_dreg (cfg, STACK_R8);
8058 MONO_ADD_INS (cfg->cbb, ins);
8067 MonoInst *temp, *store;
8069 CHECK_STACK_OVF (1);
8073 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8074 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8076 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8079 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8092 if (sp [0]->type == STACK_R8)
8093 /* we need to pop the value from the x86 FP stack */
8094 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8099 MonoMethodSignature *fsig;
8102 INLINE_FAILURE ("jmp");
8103 GSHAREDVT_FAILURE (*ip);
8106 if (stack_start != sp)
8108 token = read32 (ip + 1);
8109 /* FIXME: check the signature matches */
8110 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8113 if (cfg->gshared && mono_method_check_context_used (cmethod))
8114 GENERIC_SHARING_FAILURE (CEE_JMP);
8116 mini_profiler_emit_tail_call (cfg, cmethod);
8118 fsig = mono_method_signature (cmethod);
8119 n = fsig->param_count + fsig->hasthis;
8120 if (cfg->llvm_only) {
8123 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8124 for (i = 0; i < n; ++i)
8125 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8126 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8128 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8129 * have to emit a normal return since llvm expects it.
8132 emit_setret (cfg, ins);
8133 MONO_INST_NEW (cfg, ins, OP_BR);
8134 ins->inst_target_bb = end_bblock;
8135 MONO_ADD_INS (cfg->cbb, ins);
8136 link_bblock (cfg, cfg->cbb, end_bblock);
8139 } else if (cfg->backend->have_op_tail_call) {
8140 /* Handle tail calls similarly to calls */
8143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8144 call->method = cmethod;
8145 call->tail_call = TRUE;
8146 call->signature = mono_method_signature (cmethod);
8147 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8148 call->inst.inst_p0 = cmethod;
8149 for (i = 0; i < n; ++i)
8150 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8152 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8153 call->vret_var = cfg->vret_addr;
8155 mono_arch_emit_call (cfg, call);
8156 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8157 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8159 for (i = 0; i < num_args; ++i)
8160 /* Prevent arguments from being optimized away */
8161 arg_array [i]->flags |= MONO_INST_VOLATILE;
8163 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8164 ins = (MonoInst*)call;
8165 ins->inst_p0 = cmethod;
8166 MONO_ADD_INS (cfg->cbb, ins);
8170 start_new_bblock = 1;
8175 MonoMethodSignature *fsig;
8178 token = read32 (ip + 1);
8182 //GSHAREDVT_FAILURE (*ip);
8187 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8190 if (method->dynamic && fsig->pinvoke) {
8194 * This is a call through a function pointer using a pinvoke
8195 * signature. Have to create a wrapper and call that instead.
8196 * FIXME: This is very slow, need to create a wrapper at JIT time
8197 * instead based on the signature.
8199 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8200 EMIT_NEW_PCONST (cfg, args [1], fsig);
8202 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8205 n = fsig->param_count + fsig->hasthis;
8209 //g_assert (!virtual_ || fsig->hasthis);
8213 inline_costs += 10 * num_calls++;
8216 * Making generic calls out of gsharedvt methods.
8217 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8218 * patching gshared method addresses into a gsharedvt method.
8220 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8222 * We pass the address to the gsharedvt trampoline in the rgctx reg
8224 MonoInst *callee = addr;
8226 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8228 GSHAREDVT_FAILURE (*ip);
8232 GSHAREDVT_FAILURE (*ip);
8234 addr = emit_get_rgctx_sig (cfg, context_used,
8235 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8236 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8240 /* Prevent inlining of methods with indirect calls */
8241 INLINE_FAILURE ("indirect call");
8243 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8244 MonoJumpInfoType info_type;
8248 * Instead of emitting an indirect call, emit a direct call
8249 * with the contents of the aotconst as the patch info.
8251 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8252 info_type = (MonoJumpInfoType)addr->inst_c1;
8253 info_data = addr->inst_p0;
8255 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8256 info_data = addr->inst_right->inst_left;
8259 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8260 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8263 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8264 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8269 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8273 /* End of call, INS should contain the result of the call, if any */
8275 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8277 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8280 CHECK_CFG_EXCEPTION;
8284 constrained_class = NULL;
8288 case CEE_CALLVIRT: {
8289 MonoInst *addr = NULL;
8290 MonoMethodSignature *fsig = NULL;
8292 int virtual_ = *ip == CEE_CALLVIRT;
8293 gboolean pass_imt_from_rgctx = FALSE;
8294 MonoInst *imt_arg = NULL;
8295 MonoInst *keep_this_alive = NULL;
8296 gboolean pass_vtable = FALSE;
8297 gboolean pass_mrgctx = FALSE;
8298 MonoInst *vtable_arg = NULL;
8299 gboolean check_this = FALSE;
8300 gboolean supported_tail_call = FALSE;
8301 gboolean tail_call = FALSE;
8302 gboolean need_seq_point = FALSE;
8303 guint32 call_opcode = *ip;
8304 gboolean emit_widen = TRUE;
8305 gboolean push_res = TRUE;
8306 gboolean skip_ret = FALSE;
8307 gboolean delegate_invoke = FALSE;
8308 gboolean direct_icall = FALSE;
8309 gboolean constrained_partial_call = FALSE;
8310 MonoMethod *cil_method;
8313 token = read32 (ip + 1);
8317 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8320 cil_method = cmethod;
8322 if (constrained_class) {
8323 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8324 if (!mini_is_gsharedvt_klass (constrained_class)) {
8325 g_assert (!cmethod->klass->valuetype);
8326 if (!mini_type_is_reference (&constrained_class->byval_arg))
8327 constrained_partial_call = TRUE;
8331 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8332 if (cfg->verbose_level > 2)
8333 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8334 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8335 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8337 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8341 if (cfg->verbose_level > 2)
8342 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8344 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8346 * This is needed since get_method_constrained can't find
8347 * the method in klass representing a type var.
8348 * The type var is guaranteed to be a reference type in this
8351 if (!mini_is_gsharedvt_klass (constrained_class))
8352 g_assert (!cmethod->klass->valuetype);
8354 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8359 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8360 /* Use the corresponding method from the base type to avoid boxing */
8361 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8362 g_assert (base_type);
8363 constrained_class = mono_class_from_mono_type (base_type);
8364 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8369 if (!dont_verify && !cfg->skip_visibility) {
8370 MonoMethod *target_method = cil_method;
8371 if (method->is_inflated) {
8372 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8375 if (!mono_method_can_access_method (method_definition, target_method) &&
8376 !mono_method_can_access_method (method, cil_method))
8377 emit_method_access_failure (cfg, method, cil_method);
8380 if (mono_security_core_clr_enabled ())
8381 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8383 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8384 /* MS.NET seems to silently convert this to a callvirt */
8389 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8390 * converts to a callvirt.
8392 * tests/bug-515884.il is an example of this behavior
8394 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8395 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8396 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8400 if (!cmethod->klass->inited)
8401 if (!mono_class_init (cmethod->klass))
8402 TYPE_LOAD_ERROR (cmethod->klass);
8404 fsig = mono_method_signature (cmethod);
8407 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8408 mini_class_is_system_array (cmethod->klass)) {
8409 array_rank = cmethod->klass->rank;
8410 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8411 direct_icall = TRUE;
8412 } else if (fsig->pinvoke) {
8413 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8414 fsig = mono_method_signature (wrapper);
8415 } else if (constrained_class) {
8417 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8421 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8422 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8424 /* See code below */
8425 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8426 MonoBasicBlock *tbb;
8428 GET_BBLOCK (cfg, tbb, ip + 5);
8429 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8431 * We want to extend the try block to cover the call, but we can't do it if the
8432 * call is made directly since its followed by an exception check.
8434 direct_icall = FALSE;
8438 mono_save_token_info (cfg, image, token, cil_method);
8440 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8441 need_seq_point = TRUE;
8443 /* Don't support calls made using type arguments for now */
8445 if (cfg->gsharedvt) {
8446 if (mini_is_gsharedvt_signature (fsig))
8447 GSHAREDVT_FAILURE (*ip);
8451 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8452 g_assert_not_reached ();
8454 n = fsig->param_count + fsig->hasthis;
8456 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8460 g_assert (!mono_method_check_context_used (cmethod));
8464 //g_assert (!virtual_ || fsig->hasthis);
8468 if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8469 cfg->cbb->out_of_line = TRUE;
8472 * We have the `constrained.' prefix opcode.
8474 if (constrained_class) {
8475 if (mini_is_gsharedvt_klass (constrained_class)) {
8476 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8477 /* The 'Own method' case below */
8478 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8479 /* 'The type parameter is instantiated as a reference type' case below. */
8481 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8482 CHECK_CFG_EXCEPTION;
8488 if (constrained_partial_call) {
8489 gboolean need_box = TRUE;
8492 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8493 * called method is not known at compile time either. The called method could end up being
8494 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8495 * to box the receiver.
8496 * A simple solution would be to box always and make a normal virtual call, but that would
8497 * be bad performance wise.
8499 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8501 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8506 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8507 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8508 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8509 ins->klass = constrained_class;
8510 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8511 CHECK_CFG_EXCEPTION;
8512 } else if (need_box) {
8514 MonoBasicBlock *is_ref_bb, *end_bb;
8515 MonoInst *nonbox_call;
8518 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8520 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8521 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8523 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8525 NEW_BBLOCK (cfg, is_ref_bb);
8526 NEW_BBLOCK (cfg, end_bb);
8528 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8533 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8538 MONO_START_BB (cfg, is_ref_bb);
8539 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8540 ins->klass = constrained_class;
8541 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8542 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8546 MONO_START_BB (cfg, end_bb);
8549 nonbox_call->dreg = ins->dreg;
8552 g_assert (mono_class_is_interface (cmethod->klass));
8553 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8554 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8557 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8559 * The type parameter is instantiated as a valuetype,
8560 * but that type doesn't override the method we're
8561 * calling, so we need to box `this'.
8563 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8564 ins->klass = constrained_class;
8565 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8566 CHECK_CFG_EXCEPTION;
8567 } else if (!constrained_class->valuetype) {
8568 int dreg = alloc_ireg_ref (cfg);
8571 * The type parameter is instantiated as a reference
8572 * type. We have a managed pointer on the stack, so
8573 * we need to dereference it here.
8575 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8576 ins->type = STACK_OBJ;
8579 if (cmethod->klass->valuetype) {
8582 /* Interface method */
8585 mono_class_setup_vtable (constrained_class);
8586 CHECK_TYPELOAD (constrained_class);
8587 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8589 TYPE_LOAD_ERROR (constrained_class);
8590 slot = mono_method_get_vtable_slot (cmethod);
8592 TYPE_LOAD_ERROR (cmethod->klass);
8593 cmethod = constrained_class->vtable [ioffset + slot];
8595 if (cmethod->klass == mono_defaults.enum_class) {
8596 /* Enum implements some interfaces, so treat this as the first case */
8597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8598 ins->klass = constrained_class;
8599 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8600 CHECK_CFG_EXCEPTION;
8605 constrained_class = NULL;
8608 if (check_call_signature (cfg, fsig, sp))
8611 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8612 delegate_invoke = TRUE;
8614 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8615 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8616 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8624 * If the callee is a shared method, then its static cctor
8625 * might not get called after the call was patched.
8627 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8628 emit_class_init (cfg, cmethod->klass);
8629 CHECK_TYPELOAD (cmethod->klass);
8632 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8635 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8637 context_used = mini_method_check_context_used (cfg, cmethod);
8639 if (context_used && mono_class_is_interface (cmethod->klass)) {
8640 /* Generic method interface
8641 calls are resolved via a
8642 helper function and don't
8644 if (!cmethod_context || !cmethod_context->method_inst)
8645 pass_imt_from_rgctx = TRUE;
8649 * If a shared method calls another
8650 * shared method then the caller must
8651 * have a generic sharing context
8652 * because the magic trampoline
8653 * requires it. FIXME: We shouldn't
8654 * have to force the vtable/mrgctx
8655 * variable here. Instead there
8656 * should be a flag in the cfg to
8657 * request a generic sharing context.
8660 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8661 mono_get_vtable_var (cfg);
8666 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8668 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8670 CHECK_TYPELOAD (cmethod->klass);
8671 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8676 g_assert (!vtable_arg);
8678 if (!cfg->compile_aot) {
8680 * emit_get_rgctx_method () calls mono_class_vtable () so check
8681 * for type load errors before.
8683 mono_class_setup_vtable (cmethod->klass);
8684 CHECK_TYPELOAD (cmethod->klass);
8687 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8689 /* !marshalbyref is needed to properly handle generic methods + remoting */
8690 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8691 MONO_METHOD_IS_FINAL (cmethod)) &&
8692 !mono_class_is_marshalbyref (cmethod->klass)) {
8699 if (pass_imt_from_rgctx) {
8700 g_assert (!pass_vtable);
8702 imt_arg = emit_get_rgctx_method (cfg, context_used,
8703 cmethod, MONO_RGCTX_INFO_METHOD);
8707 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8709 /* Calling virtual generic methods */
8710 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8711 !(MONO_METHOD_IS_FINAL (cmethod) &&
8712 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8713 fsig->generic_param_count &&
8714 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8716 MonoInst *this_temp, *this_arg_temp, *store;
8717 MonoInst *iargs [4];
8719 g_assert (fsig->is_inflated);
8721 /* Prevent inlining of methods that contain indirect calls */
8722 INLINE_FAILURE ("virtual generic call");
8724 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8725 GSHAREDVT_FAILURE (*ip);
8727 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8728 g_assert (!imt_arg);
8730 g_assert (cmethod->is_inflated);
8731 imt_arg = emit_get_rgctx_method (cfg, context_used,
8732 cmethod, MONO_RGCTX_INFO_METHOD);
8733 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8735 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8736 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8737 MONO_ADD_INS (cfg->cbb, store);
8739 /* FIXME: This should be a managed pointer */
8740 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8742 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8743 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8744 cmethod, MONO_RGCTX_INFO_METHOD);
8745 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8746 addr = mono_emit_jit_icall (cfg,
8747 mono_helper_compile_generic_method, iargs);
8749 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8751 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8758 * Implement a workaround for the inherent races involved in locking:
8764 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8765 * try block, the Exit () won't be executed, see:
8766 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8767 * To work around this, we extend such try blocks to include the last x bytes
8768 * of the Monitor.Enter () call.
8770 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8771 MonoBasicBlock *tbb;
8773 GET_BBLOCK (cfg, tbb, ip + 5);
8775 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8776 * from Monitor.Enter like ArgumentNullException.
8778 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8779 /* Mark this bblock as needing to be extended */
8780 tbb->extend_try_block = TRUE;
8784 /* Conversion to a JIT intrinsic */
8785 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8786 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8787 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8795 if ((cfg->opt & MONO_OPT_INLINE) &&
8796 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8797 mono_method_check_inlining (cfg, cmethod)) {
8799 gboolean always = FALSE;
8801 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8802 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8803 /* Prevent inlining of methods that call wrappers */
8804 INLINE_FAILURE ("wrapper call");
8805 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8809 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8811 cfg->real_offset += 5;
8813 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8814 /* *sp is already set by inline_method */
8819 inline_costs += costs;
8825 /* Tail recursion elimination */
8826 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8827 gboolean has_vtargs = FALSE;
8830 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8831 INLINE_FAILURE ("tail call");
8833 /* keep it simple */
8834 for (i = fsig->param_count - 1; i >= 0; i--) {
8835 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8840 if (need_seq_point) {
8841 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8842 need_seq_point = FALSE;
8844 for (i = 0; i < n; ++i)
8845 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8846 MONO_INST_NEW (cfg, ins, OP_BR);
8847 MONO_ADD_INS (cfg->cbb, ins);
8848 tblock = start_bblock->out_bb [0];
8849 link_bblock (cfg, cfg->cbb, tblock);
8850 ins->inst_target_bb = tblock;
8851 start_new_bblock = 1;
8853 /* skip the CEE_RET, too */
8854 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8861 inline_costs += 10 * num_calls++;
8864 * Synchronized wrappers.
8865 * Its hard to determine where to replace a method with its synchronized
8866 * wrapper without causing an infinite recursion. The current solution is
8867 * to add the synchronized wrapper in the trampolines, and to
8868 * change the called method to a dummy wrapper, and resolve that wrapper
8869 * to the real method in mono_jit_compile_method ().
8871 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8872 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8873 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8874 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8878 * Making generic calls out of gsharedvt methods.
8879 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8880 * patching gshared method addresses into a gsharedvt method.
8882 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
8883 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
8884 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
8885 MonoRgctxInfoType info_type;
8888 //if (mono_class_is_interface (cmethod->klass))
8889 //GSHAREDVT_FAILURE (*ip);
8890 // disable for possible remoting calls
8891 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8892 GSHAREDVT_FAILURE (*ip);
8893 if (fsig->generic_param_count) {
8894 /* virtual generic call */
8895 g_assert (!imt_arg);
8896 /* Same as the virtual generic case above */
8897 imt_arg = emit_get_rgctx_method (cfg, context_used,
8898 cmethod, MONO_RGCTX_INFO_METHOD);
8899 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8901 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
8902 /* This can happen when we call a fully instantiated iface method */
8903 imt_arg = emit_get_rgctx_method (cfg, context_used,
8904 cmethod, MONO_RGCTX_INFO_METHOD);
8909 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8910 keep_this_alive = sp [0];
8912 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8913 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8915 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8916 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8918 if (cfg->llvm_only) {
8919 // FIXME: Avoid initializing vtable_arg
8920 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8922 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8927 /* Generic sharing */
8930 * Use this if the callee is gsharedvt sharable too, since
8931 * at runtime we might find an instantiation so the call cannot
8932 * be patched (the 'no_patch' code path in mini-trampolines.c).
8934 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8935 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8936 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8937 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
8938 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8939 INLINE_FAILURE ("gshared");
8941 g_assert (cfg->gshared && cmethod);
8945 * We are compiling a call to a
8946 * generic method from shared code,
8947 * which means that we have to look up
8948 * the method in the rgctx and do an
8952 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8954 if (cfg->llvm_only) {
8955 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
8956 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
8958 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8959 // FIXME: Avoid initializing imt_arg/vtable_arg
8960 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8962 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8963 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8968 /* Direct calls to icalls */
8970 MonoMethod *wrapper;
8973 /* Inline the wrapper */
8974 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8976 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
8977 g_assert (costs > 0);
8978 cfg->real_offset += 5;
8980 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8981 /* *sp is already set by inline_method */
8986 inline_costs += costs;
8995 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8996 MonoInst *val = sp [fsig->param_count];
8998 if (val->type == STACK_OBJ) {
8999 MonoInst *iargs [2];
9004 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9007 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9008 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9009 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9010 mini_emit_write_barrier (cfg, addr, val);
9011 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9012 GSHAREDVT_FAILURE (*ip);
9013 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9014 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9016 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9017 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9018 if (!cmethod->klass->element_class->valuetype && !readonly)
9019 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9020 CHECK_TYPELOAD (cmethod->klass);
9023 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9026 g_assert_not_reached ();
9033 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9037 /* Tail prefix / tail call optimization */
9039 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9040 /* FIXME: runtime generic context pointer for jumps? */
9041 /* FIXME: handle this for generic sharing eventually */
9042 if ((ins_flag & MONO_INST_TAILCALL) &&
9043 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9044 supported_tail_call = TRUE;
9046 if (supported_tail_call) {
9049 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9050 INLINE_FAILURE ("tail call");
9052 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9054 if (cfg->backend->have_op_tail_call) {
9055 /* Handle tail calls similarly to normal calls */
9058 mini_profiler_emit_tail_call (cfg, cmethod);
9060 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9061 call->tail_call = TRUE;
9062 call->method = cmethod;
9063 call->signature = mono_method_signature (cmethod);
9066 * We implement tail calls by storing the actual arguments into the
9067 * argument variables, then emitting a CEE_JMP.
9069 for (i = 0; i < n; ++i) {
9070 /* Prevent argument from being register allocated */
9071 arg_array [i]->flags |= MONO_INST_VOLATILE;
9072 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9074 ins = (MonoInst*)call;
9075 ins->inst_p0 = cmethod;
9076 ins->inst_p1 = arg_array [0];
9077 MONO_ADD_INS (cfg->cbb, ins);
9078 link_bblock (cfg, cfg->cbb, end_bblock);
9079 start_new_bblock = 1;
9081 // FIXME: Eliminate unreachable epilogs
9084 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9085 * only reachable from this call.
9087 GET_BBLOCK (cfg, tblock, ip + 5);
9088 if (tblock == cfg->cbb || tblock->in_count == 0)
9097 * Virtual calls in llvm-only mode.
9099 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9100 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9105 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9106 INLINE_FAILURE ("call");
9107 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9108 imt_arg, vtable_arg);
9110 if (tail_call && !cfg->llvm_only) {
9111 link_bblock (cfg, cfg->cbb, end_bblock);
9112 start_new_bblock = 1;
9114 // FIXME: Eliminate unreachable epilogs
9117 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9118 * only reachable from this call.
9120 GET_BBLOCK (cfg, tblock, ip + 5);
9121 if (tblock == cfg->cbb || tblock->in_count == 0)
9128 /* End of call, INS should contain the result of the call, if any */
9130 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9133 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9138 if (keep_this_alive) {
9139 MonoInst *dummy_use;
9141 /* See mono_emit_method_call_full () */
9142 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9145 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9147 * Clang can convert these calls to tail calls which screw up the stack
9148 * walk. This happens even when the -fno-optimize-sibling-calls
9149 * option is passed to clang.
9150 * Work around this by emitting a dummy call.
9152 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9155 CHECK_CFG_EXCEPTION;
9159 g_assert (*ip == CEE_RET);
9163 constrained_class = NULL;
9165 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9169 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
9171 if (cfg->method != method) {
9172 /* return from inlined method */
9174 * If in_count == 0, that means the ret is unreachable due to
9175 * being preceeded by a throw. In that case, inline_method () will
9176 * handle setting the return value
9177 * (test case: test_0_inline_throw ()).
9179 if (return_var && cfg->cbb->in_count) {
9180 MonoType *ret_type = mono_method_signature (method)->ret;
9186 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9189 //g_assert (returnvar != -1);
9190 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9191 cfg->ret_var_set = TRUE;
9194 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9198 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9200 if (seq_points && !sym_seq_points) {
9202 * Place a seq point here too even through the IL stack is not
9203 * empty, so a step over on
9206 * will work correctly.
9208 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9209 MONO_ADD_INS (cfg->cbb, ins);
9212 g_assert (!return_var);
9216 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9219 emit_setret (cfg, *sp);
9222 if (sp != stack_start)
9224 MONO_INST_NEW (cfg, ins, OP_BR);
9226 ins->inst_target_bb = end_bblock;
9227 MONO_ADD_INS (cfg->cbb, ins);
9228 link_bblock (cfg, cfg->cbb, end_bblock);
9229 start_new_bblock = 1;
9233 MONO_INST_NEW (cfg, ins, OP_BR);
9235 target = ip + 1 + (signed char)(*ip);
9237 GET_BBLOCK (cfg, tblock, target);
9238 link_bblock (cfg, cfg->cbb, tblock);
9239 ins->inst_target_bb = tblock;
9240 if (sp != stack_start) {
9241 handle_stack_args (cfg, stack_start, sp - stack_start);
9243 CHECK_UNVERIFIABLE (cfg);
9245 MONO_ADD_INS (cfg->cbb, ins);
9246 start_new_bblock = 1;
9247 inline_costs += BRANCH_COST;
9261 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9263 target = ip + 1 + *(signed char*)ip;
9269 inline_costs += BRANCH_COST;
9273 MONO_INST_NEW (cfg, ins, OP_BR);
9276 target = ip + 4 + (gint32)read32(ip);
9278 GET_BBLOCK (cfg, tblock, target);
9279 link_bblock (cfg, cfg->cbb, tblock);
9280 ins->inst_target_bb = tblock;
9281 if (sp != stack_start) {
9282 handle_stack_args (cfg, stack_start, sp - stack_start);
9284 CHECK_UNVERIFIABLE (cfg);
9287 MONO_ADD_INS (cfg->cbb, ins);
9289 start_new_bblock = 1;
9290 inline_costs += BRANCH_COST;
9297 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9298 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9299 guint32 opsize = is_short ? 1 : 4;
9301 CHECK_OPSIZE (opsize);
9303 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9306 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9311 GET_BBLOCK (cfg, tblock, target);
9312 link_bblock (cfg, cfg->cbb, tblock);
9313 GET_BBLOCK (cfg, tblock, ip);
9314 link_bblock (cfg, cfg->cbb, tblock);
9316 if (sp != stack_start) {
9317 handle_stack_args (cfg, stack_start, sp - stack_start);
9318 CHECK_UNVERIFIABLE (cfg);
9321 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9322 cmp->sreg1 = sp [0]->dreg;
9323 type_from_op (cfg, cmp, sp [0], NULL);
9326 #if SIZEOF_REGISTER == 4
9327 if (cmp->opcode == OP_LCOMPARE_IMM) {
9328 /* Convert it to OP_LCOMPARE */
9329 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9330 ins->type = STACK_I8;
9331 ins->dreg = alloc_dreg (cfg, STACK_I8);
9333 MONO_ADD_INS (cfg->cbb, ins);
9334 cmp->opcode = OP_LCOMPARE;
9335 cmp->sreg2 = ins->dreg;
9338 MONO_ADD_INS (cfg->cbb, cmp);
9340 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9341 type_from_op (cfg, ins, sp [0], NULL);
9342 MONO_ADD_INS (cfg->cbb, ins);
9343 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9344 GET_BBLOCK (cfg, tblock, target);
9345 ins->inst_true_bb = tblock;
9346 GET_BBLOCK (cfg, tblock, ip);
9347 ins->inst_false_bb = tblock;
9348 start_new_bblock = 2;
9351 inline_costs += BRANCH_COST;
9366 MONO_INST_NEW (cfg, ins, *ip);
9368 target = ip + 4 + (gint32)read32(ip);
9374 inline_costs += BRANCH_COST;
9378 MonoBasicBlock **targets;
9379 MonoBasicBlock *default_bblock;
9380 MonoJumpInfoBBTable *table;
9381 int offset_reg = alloc_preg (cfg);
9382 int target_reg = alloc_preg (cfg);
9383 int table_reg = alloc_preg (cfg);
9384 int sum_reg = alloc_preg (cfg);
9385 gboolean use_op_switch;
9389 n = read32 (ip + 1);
9392 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9396 CHECK_OPSIZE (n * sizeof (guint32));
9397 target = ip + n * sizeof (guint32);
9399 GET_BBLOCK (cfg, default_bblock, target);
9400 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9402 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9403 for (i = 0; i < n; ++i) {
9404 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9405 targets [i] = tblock;
9406 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9410 if (sp != stack_start) {
9412 * Link the current bb with the targets as well, so handle_stack_args
9413 * will set their in_stack correctly.
9415 link_bblock (cfg, cfg->cbb, default_bblock);
9416 for (i = 0; i < n; ++i)
9417 link_bblock (cfg, cfg->cbb, targets [i]);
9419 handle_stack_args (cfg, stack_start, sp - stack_start);
9421 CHECK_UNVERIFIABLE (cfg);
9423 /* Undo the links */
9424 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9425 for (i = 0; i < n; ++i)
9426 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9430 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9432 for (i = 0; i < n; ++i)
9433 link_bblock (cfg, cfg->cbb, targets [i]);
9435 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9436 table->table = targets;
9437 table->table_size = n;
9439 use_op_switch = FALSE;
9441 /* ARM implements SWITCH statements differently */
9442 /* FIXME: Make it use the generic implementation */
9443 if (!cfg->compile_aot)
9444 use_op_switch = TRUE;
9447 if (COMPILE_LLVM (cfg))
9448 use_op_switch = TRUE;
9450 cfg->cbb->has_jump_table = 1;
9452 if (use_op_switch) {
9453 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9454 ins->sreg1 = src1->dreg;
9455 ins->inst_p0 = table;
9456 ins->inst_many_bb = targets;
9457 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9458 MONO_ADD_INS (cfg->cbb, ins);
9460 if (sizeof (gpointer) == 8)
9461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9465 #if SIZEOF_REGISTER == 8
9466 /* The upper word might not be zero, and we add it to a 64 bit address later */
9467 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9470 if (cfg->compile_aot) {
9471 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9473 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9474 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9475 ins->inst_p0 = table;
9476 ins->dreg = table_reg;
9477 MONO_ADD_INS (cfg->cbb, ins);
9480 /* FIXME: Use load_memindex */
9481 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9483 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9485 start_new_bblock = 1;
9486 inline_costs += (BRANCH_COST * 2);
9503 ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9519 if (ins_flag & MONO_INST_VOLATILE) {
9520 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9521 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9524 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9525 ins->flags |= ins_flag;
9528 MONO_ADD_INS (cfg->cbb, ins);
9530 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9531 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9540 MONO_INST_NEW (cfg, ins, (*ip));
9542 ins->sreg1 = sp [0]->dreg;
9543 ins->sreg2 = sp [1]->dreg;
9544 type_from_op (cfg, ins, sp [0], sp [1]);
9546 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9548 /* Use the immediate opcodes if possible */
9549 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9550 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9551 if (imm_opcode != -1) {
9552 ins->opcode = imm_opcode;
9553 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9556 NULLIFY_INS (sp [1]);
9560 MONO_ADD_INS ((cfg)->cbb, (ins));
9562 *sp++ = mono_decompose_opcode (cfg, ins);
9579 MONO_INST_NEW (cfg, ins, (*ip));
9581 ins->sreg1 = sp [0]->dreg;
9582 ins->sreg2 = sp [1]->dreg;
9583 type_from_op (cfg, ins, sp [0], sp [1]);
9585 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9586 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9588 /* FIXME: Pass opcode to is_inst_imm */
9590 /* Use the immediate opcodes if possible */
9591 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9592 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9593 if (imm_opcode != -1) {
9594 ins->opcode = imm_opcode;
9595 if (sp [1]->opcode == OP_I8CONST) {
9596 #if SIZEOF_REGISTER == 8
9597 ins->inst_imm = sp [1]->inst_l;
9599 ins->inst_ls_word = sp [1]->inst_ls_word;
9600 ins->inst_ms_word = sp [1]->inst_ms_word;
9604 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9607 /* Might be followed by an instruction added by add_widen_op */
9608 if (sp [1]->next == NULL)
9609 NULLIFY_INS (sp [1]);
9612 MONO_ADD_INS ((cfg)->cbb, (ins));
9614 *sp++ = mono_decompose_opcode (cfg, ins);
9627 case CEE_CONV_OVF_I8:
9628 case CEE_CONV_OVF_U8:
9632 /* Special case this earlier so we have long constants in the IR */
9633 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9634 int data = sp [-1]->inst_c0;
9635 sp [-1]->opcode = OP_I8CONST;
9636 sp [-1]->type = STACK_I8;
9637 #if SIZEOF_REGISTER == 8
9638 if ((*ip) == CEE_CONV_U8)
9639 sp [-1]->inst_c0 = (guint32)data;
9641 sp [-1]->inst_c0 = data;
9643 sp [-1]->inst_ls_word = data;
9644 if ((*ip) == CEE_CONV_U8)
9645 sp [-1]->inst_ms_word = 0;
9647 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9649 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9656 case CEE_CONV_OVF_I4:
9657 case CEE_CONV_OVF_I1:
9658 case CEE_CONV_OVF_I2:
9659 case CEE_CONV_OVF_I:
9660 case CEE_CONV_OVF_U:
9663 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9664 ADD_UNOP (CEE_CONV_OVF_I8);
9671 case CEE_CONV_OVF_U1:
9672 case CEE_CONV_OVF_U2:
9673 case CEE_CONV_OVF_U4:
9676 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9677 ADD_UNOP (CEE_CONV_OVF_U8);
9684 case CEE_CONV_OVF_I1_UN:
9685 case CEE_CONV_OVF_I2_UN:
9686 case CEE_CONV_OVF_I4_UN:
9687 case CEE_CONV_OVF_I8_UN:
9688 case CEE_CONV_OVF_U1_UN:
9689 case CEE_CONV_OVF_U2_UN:
9690 case CEE_CONV_OVF_U4_UN:
9691 case CEE_CONV_OVF_U8_UN:
9692 case CEE_CONV_OVF_I_UN:
9693 case CEE_CONV_OVF_U_UN:
9700 CHECK_CFG_EXCEPTION;
9704 case CEE_ADD_OVF_UN:
9706 case CEE_MUL_OVF_UN:
9708 case CEE_SUB_OVF_UN:
9714 GSHAREDVT_FAILURE (*ip);
9717 token = read32 (ip + 1);
9718 klass = mini_get_class (method, token, generic_context);
9719 CHECK_TYPELOAD (klass);
9721 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9732 token = read32 (ip + 1);
9733 klass = mini_get_class (method, token, generic_context);
9734 CHECK_TYPELOAD (klass);
9736 /* Optimize the common ldobj+stloc combination */
9746 loc_index = ip [5] - CEE_STLOC_0;
9753 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9754 CHECK_LOCAL (loc_index);
9756 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9757 ins->dreg = cfg->locals [loc_index]->dreg;
9758 ins->flags |= ins_flag;
9761 if (ins_flag & MONO_INST_VOLATILE) {
9762 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9763 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9769 /* Optimize the ldobj+stobj combination */
9770 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
9775 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9782 ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9791 CHECK_STACK_OVF (1);
9793 n = read32 (ip + 1);
9795 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9796 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9797 ins->type = STACK_OBJ;
9800 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9801 MonoInst *iargs [1];
9802 char *str = (char *)mono_method_get_wrapper_data (method, n);
9804 if (cfg->compile_aot)
9805 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9807 EMIT_NEW_PCONST (cfg, iargs [0], str);
9808 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9810 if (cfg->opt & MONO_OPT_SHARED) {
9811 MonoInst *iargs [3];
9813 if (cfg->compile_aot) {
9814 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9816 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9817 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9818 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9819 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9820 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9823 if (cfg->cbb->out_of_line) {
9824 MonoInst *iargs [2];
9826 if (image == mono_defaults.corlib) {
9828 * Avoid relocations in AOT and save some space by using a
9829 * version of helper_ldstr specialized to mscorlib.
9831 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9832 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9834 /* Avoid creating the string object */
9835 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9836 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9837 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9841 if (cfg->compile_aot) {
9842 NEW_LDSTRCONST (cfg, ins, image, n);
9844 MONO_ADD_INS (cfg->cbb, ins);
9847 NEW_PCONST (cfg, ins, NULL);
9848 ins->type = STACK_OBJ;
9849 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9853 OUT_OF_MEMORY_FAILURE;
9856 MONO_ADD_INS (cfg->cbb, ins);
9865 MonoInst *iargs [2];
9866 MonoMethodSignature *fsig;
9869 MonoInst *vtable_arg = NULL;
9872 token = read32 (ip + 1);
9873 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9876 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9879 mono_save_token_info (cfg, image, token, cmethod);
9881 if (!mono_class_init (cmethod->klass))
9882 TYPE_LOAD_ERROR (cmethod->klass);
9884 context_used = mini_method_check_context_used (cfg, cmethod);
9886 if (!dont_verify && !cfg->skip_visibility) {
9887 MonoMethod *cil_method = cmethod;
9888 MonoMethod *target_method = cil_method;
9890 if (method->is_inflated) {
9891 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9895 if (!mono_method_can_access_method (method_definition, target_method) &&
9896 !mono_method_can_access_method (method, cil_method))
9897 emit_method_access_failure (cfg, method, cil_method);
9900 if (mono_security_core_clr_enabled ())
9901 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
9903 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9904 emit_class_init (cfg, cmethod->klass);
9905 CHECK_TYPELOAD (cmethod->klass);
9909 if (cfg->gsharedvt) {
9910 if (mini_is_gsharedvt_variable_signature (sig))
9911 GSHAREDVT_FAILURE (*ip);
9915 n = fsig->param_count;
9919 * Generate smaller code for the common newobj <exception> instruction in
9920 * argument checking code.
9922 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9923 is_exception_class (cmethod->klass) && n <= 2 &&
9924 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9925 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9926 MonoInst *iargs [3];
9930 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9933 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9937 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9942 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9945 g_assert_not_reached ();
9953 /* move the args to allow room for 'this' in the first position */
9959 /* check_call_signature () requires sp[0] to be set */
9960 this_ins.type = STACK_OBJ;
9962 if (check_call_signature (cfg, fsig, sp))
9967 if (mini_class_is_system_array (cmethod->klass)) {
9968 *sp = emit_get_rgctx_method (cfg, context_used,
9969 cmethod, MONO_RGCTX_INFO_METHOD);
9971 /* Avoid varargs in the common case */
9972 if (fsig->param_count == 1)
9973 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9974 else if (fsig->param_count == 2)
9975 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9976 else if (fsig->param_count == 3)
9977 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9978 else if (fsig->param_count == 4)
9979 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9981 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9982 } else if (cmethod->string_ctor) {
9983 g_assert (!context_used);
9984 g_assert (!vtable_arg);
9985 /* we simply pass a null pointer */
9986 EMIT_NEW_PCONST (cfg, *sp, NULL);
9987 /* now call the string ctor */
9988 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9990 if (cmethod->klass->valuetype) {
9991 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9992 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9993 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9998 * The code generated by mini_emit_virtual_call () expects
9999 * iargs [0] to be a boxed instance, but luckily the vcall
10000 * will be transformed into a normal call there.
10002 } else if (context_used) {
10003 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10006 MonoVTable *vtable = NULL;
10008 if (!cfg->compile_aot)
10009 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10010 CHECK_TYPELOAD (cmethod->klass);
10013 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10014 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10015 * As a workaround, we call class cctors before allocating objects.
10017 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10018 emit_class_init (cfg, cmethod->klass);
10019 if (cfg->verbose_level > 2)
10020 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10021 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10024 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10027 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10030 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10032 /* Now call the actual ctor */
10033 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10034 CHECK_CFG_EXCEPTION;
10037 if (alloc == NULL) {
10039 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10040 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10048 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10049 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10052 case CEE_CASTCLASS:
10057 token = read32 (ip + 1);
10058 klass = mini_get_class (method, token, generic_context);
10059 CHECK_TYPELOAD (klass);
10060 if (sp [0]->type != STACK_OBJ)
10063 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10064 ins->dreg = alloc_preg (cfg);
10065 ins->sreg1 = (*sp)->dreg;
10066 ins->klass = klass;
10067 ins->type = STACK_OBJ;
10068 MONO_ADD_INS (cfg->cbb, ins);
10070 CHECK_CFG_EXCEPTION;
10074 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10077 case CEE_UNBOX_ANY: {
10078 MonoInst *res, *addr;
10083 token = read32 (ip + 1);
10084 klass = mini_get_class (method, token, generic_context);
10085 CHECK_TYPELOAD (klass);
10087 mono_save_token_info (cfg, image, token, klass);
10089 context_used = mini_class_check_context_used (cfg, klass);
10091 if (mini_is_gsharedvt_klass (klass)) {
10092 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10094 } else if (generic_class_is_reference_type (cfg, klass)) {
10095 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10096 EMIT_NEW_PCONST (cfg, res, NULL);
10097 res->type = STACK_OBJ;
10099 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10100 res->dreg = alloc_preg (cfg);
10101 res->sreg1 = (*sp)->dreg;
10102 res->klass = klass;
10103 res->type = STACK_OBJ;
10104 MONO_ADD_INS (cfg->cbb, res);
10105 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10107 } else if (mono_class_is_nullable (klass)) {
10108 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10110 addr = handle_unbox (cfg, klass, sp, context_used);
10112 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10123 MonoClass *enum_class;
10124 MonoMethod *has_flag;
10130 token = read32 (ip + 1);
10131 klass = mini_get_class (method, token, generic_context);
10132 CHECK_TYPELOAD (klass);
10134 mono_save_token_info (cfg, image, token, klass);
10136 context_used = mini_class_check_context_used (cfg, klass);
10138 if (generic_class_is_reference_type (cfg, klass)) {
10144 if (klass == mono_defaults.void_class)
10146 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10148 /* frequent check in generic code: box (struct), brtrue */
10153 * <push int/long ptr>
10156 * constrained. MyFlags
10157 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10159 * If we find this sequence and the operand types on box and constrained
10160 * are equal, we can emit a specialized instruction sequence instead of
10161 * the very slow HasFlag () call.
10163 if ((cfg->opt & MONO_OPT_INTRINS) &&
10164 /* Cheap checks first. */
10165 ip + 5 + 6 + 5 < end &&
10166 ip [5] == CEE_PREFIX1 &&
10167 ip [6] == CEE_CONSTRAINED_ &&
10168 ip [11] == CEE_CALLVIRT &&
10169 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10170 mono_class_is_enum (klass) &&
10171 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10172 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10173 has_flag->klass == mono_defaults.enum_class &&
10174 !strcmp (has_flag->name, "HasFlag") &&
10175 has_flag->signature->hasthis &&
10176 has_flag->signature->param_count == 1) {
10177 CHECK_TYPELOAD (enum_class);
10179 if (enum_class == klass) {
10180 MonoInst *enum_this, *enum_flag;
10185 enum_this = sp [0];
10186 enum_flag = sp [1];
10188 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10193 // FIXME: LLVM can't handle the inconsistent bb linking
10194 if (!mono_class_is_nullable (klass) &&
10195 !mini_is_gsharedvt_klass (klass) &&
10196 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10197 (ip [5] == CEE_BRTRUE ||
10198 ip [5] == CEE_BRTRUE_S ||
10199 ip [5] == CEE_BRFALSE ||
10200 ip [5] == CEE_BRFALSE_S)) {
10201 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10203 MonoBasicBlock *true_bb, *false_bb;
10207 if (cfg->verbose_level > 3) {
10208 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10209 printf ("<box+brtrue opt>\n");
10214 case CEE_BRFALSE_S:
10217 target = ip + 1 + (signed char)(*ip);
10224 target = ip + 4 + (gint)(read32 (ip));
10228 g_assert_not_reached ();
10232 * We need to link both bblocks, since it is needed for handling stack
10233 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10234 * Branching to only one of them would lead to inconsistencies, so
10235 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10237 GET_BBLOCK (cfg, true_bb, target);
10238 GET_BBLOCK (cfg, false_bb, ip);
10240 mono_link_bblock (cfg, cfg->cbb, true_bb);
10241 mono_link_bblock (cfg, cfg->cbb, false_bb);
10243 if (sp != stack_start) {
10244 handle_stack_args (cfg, stack_start, sp - stack_start);
10246 CHECK_UNVERIFIABLE (cfg);
10249 if (COMPILE_LLVM (cfg)) {
10250 dreg = alloc_ireg (cfg);
10251 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10254 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10256 /* The JIT can't eliminate the iconst+compare */
10257 MONO_INST_NEW (cfg, ins, OP_BR);
10258 ins->inst_target_bb = is_true ? true_bb : false_bb;
10259 MONO_ADD_INS (cfg->cbb, ins);
10262 start_new_bblock = 1;
10266 *sp++ = handle_box (cfg, val, klass, context_used);
10268 CHECK_CFG_EXCEPTION;
10277 token = read32 (ip + 1);
10278 klass = mini_get_class (method, token, generic_context);
10279 CHECK_TYPELOAD (klass);
10281 mono_save_token_info (cfg, image, token, klass);
10283 context_used = mini_class_check_context_used (cfg, klass);
10285 if (mono_class_is_nullable (klass)) {
10288 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10289 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10293 ins = handle_unbox (cfg, klass, sp, context_used);
10306 MonoClassField *field;
10307 #ifndef DISABLE_REMOTING
10311 gboolean is_instance;
10313 gpointer addr = NULL;
10314 gboolean is_special_static;
10316 MonoInst *store_val = NULL;
10317 MonoInst *thread_ins;
10320 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10322 if (op == CEE_STFLD) {
10325 store_val = sp [1];
10330 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10332 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10335 if (op == CEE_STSFLD) {
10338 store_val = sp [0];
10343 token = read32 (ip + 1);
10344 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10345 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10346 klass = field->parent;
10349 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10352 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10353 FIELD_ACCESS_FAILURE (method, field);
10354 mono_class_init (klass);
10356 /* if the class is Critical then transparent code cannot access it's fields */
10357 if (!is_instance && mono_security_core_clr_enabled ())
10358 ensure_method_is_allowed_to_access_field (cfg, method, field);
10360 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10361 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10362 if (mono_security_core_clr_enabled ())
10363 ensure_method_is_allowed_to_access_field (cfg, method, field);
10366 ftype = mono_field_get_type (field);
10369 * LDFLD etc. is usable on static fields as well, so convert those cases to
10372 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10384 g_assert_not_reached ();
10386 is_instance = FALSE;
10389 context_used = mini_class_check_context_used (cfg, klass);
10391 /* INSTANCE CASE */
10393 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10394 if (op == CEE_STFLD) {
10395 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10397 #ifndef DISABLE_REMOTING
10398 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10399 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10400 MonoInst *iargs [5];
10402 GSHAREDVT_FAILURE (op);
10404 iargs [0] = sp [0];
10405 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10406 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10407 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10409 iargs [4] = sp [1];
10411 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10412 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10413 iargs, ip, cfg->real_offset, TRUE);
10414 CHECK_CFG_EXCEPTION;
10415 g_assert (costs > 0);
10417 cfg->real_offset += 5;
10419 inline_costs += costs;
10421 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10426 MonoInst *store, *wbarrier_ptr_ins = NULL;
10428 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10430 if (ins_flag & MONO_INST_VOLATILE) {
10431 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10432 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10435 if (mini_is_gsharedvt_klass (klass)) {
10436 MonoInst *offset_ins;
10438 context_used = mini_class_check_context_used (cfg, klass);
10440 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10441 /* The value is offset by 1 */
10442 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10443 dreg = alloc_ireg_mp (cfg);
10444 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10445 wbarrier_ptr_ins = ins;
10446 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
10447 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10449 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10451 if (sp [0]->opcode != OP_LDADDR)
10452 store->flags |= MONO_INST_FAULT;
10454 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10455 if (mini_is_gsharedvt_klass (klass)) {
10456 g_assert (wbarrier_ptr_ins);
10457 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10459 /* insert call to write barrier */
10463 dreg = alloc_ireg_mp (cfg);
10464 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10465 mini_emit_write_barrier (cfg, ptr, sp [1]);
10469 store->flags |= ins_flag;
10476 #ifndef DISABLE_REMOTING
10477 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10478 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10479 MonoInst *iargs [4];
10481 GSHAREDVT_FAILURE (op);
10483 iargs [0] = sp [0];
10484 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10485 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10486 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10487 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10488 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10489 iargs, ip, cfg->real_offset, TRUE);
10490 CHECK_CFG_EXCEPTION;
10491 g_assert (costs > 0);
10493 cfg->real_offset += 5;
10497 inline_costs += costs;
10499 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10505 if (sp [0]->type == STACK_VTYPE) {
10508 /* Have to compute the address of the variable */
10510 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10512 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10514 g_assert (var->klass == klass);
10516 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10520 if (op == CEE_LDFLDA) {
10521 if (sp [0]->type == STACK_OBJ) {
10522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10526 dreg = alloc_ireg_mp (cfg);
10528 if (mini_is_gsharedvt_klass (klass)) {
10529 MonoInst *offset_ins;
10531 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10532 /* The value is offset by 1 */
10533 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10534 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10536 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10538 ins->klass = mono_class_from_mono_type (field->type);
10539 ins->type = STACK_MP;
10544 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10546 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10547 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10556 MonoInst *field_add_inst = sp [0];
10557 if (mini_is_gsharedvt_klass (klass)) {
10558 MonoInst *offset_ins;
10560 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10561 /* The value is offset by 1 */
10562 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10563 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10567 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10569 if (sp [0]->opcode != OP_LDADDR)
10570 load->flags |= MONO_INST_FAULT;
10582 context_used = mini_class_check_context_used (cfg, klass);
10584 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10585 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10589 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10590 * to be called here.
10592 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10593 mono_class_vtable (cfg->domain, klass);
10594 CHECK_TYPELOAD (klass);
10596 mono_domain_lock (cfg->domain);
10597 if (cfg->domain->special_static_fields)
10598 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10599 mono_domain_unlock (cfg->domain);
10601 is_special_static = mono_class_field_is_special_static (field);
10603 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10604 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10608 /* Generate IR to compute the field address */
10609 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10611 * Fast access to TLS data
10612 * Inline version of get_thread_static_data () in
10616 int idx, static_data_reg, array_reg, dreg;
10618 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10619 GSHAREDVT_FAILURE (op);
10621 static_data_reg = alloc_ireg (cfg);
10622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10624 if (cfg->compile_aot) {
10625 int offset_reg, offset2_reg, idx_reg;
10627 /* For TLS variables, this will return the TLS offset */
10628 EMIT_NEW_SFLDACONST (cfg, ins, field);
10629 offset_reg = ins->dreg;
10630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10631 idx_reg = alloc_ireg (cfg);
10632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10633 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10634 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10635 array_reg = alloc_ireg (cfg);
10636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10637 offset2_reg = alloc_ireg (cfg);
10638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10640 dreg = alloc_ireg (cfg);
10641 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10643 offset = (gsize)addr & 0x7fffffff;
10644 idx = offset & 0x3f;
10646 array_reg = alloc_ireg (cfg);
10647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10648 dreg = alloc_ireg (cfg);
10649 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10651 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10652 (cfg->compile_aot && is_special_static) ||
10653 (context_used && is_special_static)) {
10654 MonoInst *iargs [2];
10656 g_assert (field->parent);
10657 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10658 if (context_used) {
10659 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10660 field, MONO_RGCTX_INFO_CLASS_FIELD);
10662 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10664 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10665 } else if (context_used) {
10666 MonoInst *static_data;
10669 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10670 method->klass->name_space, method->klass->name, method->name,
10671 depth, field->offset);
10674 if (mono_class_needs_cctor_run (klass, method))
10675 emit_class_init (cfg, klass);
10678 * The pointer we're computing here is
10680 * super_info.static_data + field->offset
10682 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10683 klass, MONO_RGCTX_INFO_STATIC_DATA);
10685 if (mini_is_gsharedvt_klass (klass)) {
10686 MonoInst *offset_ins;
10688 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10689 /* The value is offset by 1 */
10690 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10691 dreg = alloc_ireg_mp (cfg);
10692 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10693 } else if (field->offset == 0) {
10696 int addr_reg = mono_alloc_preg (cfg);
10697 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10699 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10700 MonoInst *iargs [2];
10702 g_assert (field->parent);
10703 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10704 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10705 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10707 MonoVTable *vtable = NULL;
10709 if (!cfg->compile_aot)
10710 vtable = mono_class_vtable (cfg->domain, klass);
10711 CHECK_TYPELOAD (klass);
10714 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10715 if (!(g_slist_find (class_inits, klass))) {
10716 emit_class_init (cfg, klass);
10717 if (cfg->verbose_level > 2)
10718 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10719 class_inits = g_slist_prepend (class_inits, klass);
10722 if (cfg->run_cctors) {
10723 /* This makes so that inline cannot trigger */
10724 /* .cctors: too many apps depend on them */
10725 /* running with a specific order... */
10727 if (! vtable->initialized)
10728 INLINE_FAILURE ("class init");
10729 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10730 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10731 goto exception_exit;
10735 if (cfg->compile_aot)
10736 EMIT_NEW_SFLDACONST (cfg, ins, field);
10739 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10741 EMIT_NEW_PCONST (cfg, ins, addr);
10744 MonoInst *iargs [1];
10745 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10746 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10750 /* Generate IR to do the actual load/store operation */
10752 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10753 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10754 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10757 if (op == CEE_LDSFLDA) {
10758 ins->klass = mono_class_from_mono_type (ftype);
10759 ins->type = STACK_PTR;
10761 } else if (op == CEE_STSFLD) {
10764 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10765 store->flags |= ins_flag;
10767 gboolean is_const = FALSE;
10768 MonoVTable *vtable = NULL;
10769 gpointer addr = NULL;
10771 if (!context_used) {
10772 vtable = mono_class_vtable (cfg->domain, klass);
10773 CHECK_TYPELOAD (klass);
10775 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10776 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10777 int ro_type = ftype->type;
10779 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10780 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10781 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10784 GSHAREDVT_FAILURE (op);
10786 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10789 case MONO_TYPE_BOOLEAN:
10791 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10795 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10798 case MONO_TYPE_CHAR:
10800 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10804 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10809 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10813 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10818 case MONO_TYPE_PTR:
10819 case MONO_TYPE_FNPTR:
10820 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10821 type_to_eval_stack_type ((cfg), field->type, *sp);
10824 case MONO_TYPE_STRING:
10825 case MONO_TYPE_OBJECT:
10826 case MONO_TYPE_CLASS:
10827 case MONO_TYPE_SZARRAY:
10828 case MONO_TYPE_ARRAY:
10829 if (!mono_gc_is_moving ()) {
10830 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10831 type_to_eval_stack_type ((cfg), field->type, *sp);
10839 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10844 case MONO_TYPE_VALUETYPE:
10854 CHECK_STACK_OVF (1);
10856 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10857 load->flags |= ins_flag;
10863 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10864 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10865 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10876 token = read32 (ip + 1);
10877 klass = mini_get_class (method, token, generic_context);
10878 CHECK_TYPELOAD (klass);
10880 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10881 mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
10892 const char *data_ptr;
10894 guint32 field_token;
10900 token = read32 (ip + 1);
10902 klass = mini_get_class (method, token, generic_context);
10903 CHECK_TYPELOAD (klass);
10904 if (klass->byval_arg.type == MONO_TYPE_VOID)
10907 context_used = mini_class_check_context_used (cfg, klass);
10909 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10910 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10911 ins->sreg1 = sp [0]->dreg;
10912 ins->type = STACK_I4;
10913 ins->dreg = alloc_ireg (cfg);
10914 MONO_ADD_INS (cfg->cbb, ins);
10915 *sp = mono_decompose_opcode (cfg, ins);
10918 if (context_used) {
10919 MonoInst *args [3];
10920 MonoClass *array_class = mono_array_class_get (klass, 1);
10921 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10923 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10926 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
10927 array_class, MONO_RGCTX_INFO_VTABLE);
10932 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10934 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
10936 if (cfg->opt & MONO_OPT_SHARED) {
10937 /* Decompose now to avoid problems with references to the domainvar */
10938 MonoInst *iargs [3];
10940 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10941 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10942 iargs [2] = sp [0];
10944 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
10946 /* Decompose later since it is needed by abcrem */
10947 MonoClass *array_type = mono_array_class_get (klass, 1);
10948 mono_class_vtable (cfg->domain, array_type);
10949 CHECK_TYPELOAD (array_type);
10951 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10952 ins->dreg = alloc_ireg_ref (cfg);
10953 ins->sreg1 = sp [0]->dreg;
10954 ins->inst_newa_class = klass;
10955 ins->type = STACK_OBJ;
10956 ins->klass = array_type;
10957 MONO_ADD_INS (cfg->cbb, ins);
10958 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10959 cfg->cbb->has_array_access = TRUE;
10961 /* Needed so mono_emit_load_get_addr () gets called */
10962 mono_get_got_var (cfg);
10972 * we inline/optimize the initialization sequence if possible.
10973 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10974 * for small sizes open code the memcpy
10975 * ensure the rva field is big enough
10977 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10978 MonoMethod *memcpy_method = mini_get_memcpy_method ();
10979 MonoInst *iargs [3];
10980 int add_reg = alloc_ireg_mp (cfg);
10982 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10983 if (cfg->compile_aot) {
10984 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10986 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10988 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10989 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10998 if (sp [0]->type != STACK_OBJ)
11001 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11002 ins->dreg = alloc_preg (cfg);
11003 ins->sreg1 = sp [0]->dreg;
11004 ins->type = STACK_I4;
11005 /* This flag will be inherited by the decomposition */
11006 ins->flags |= MONO_INST_FAULT;
11007 MONO_ADD_INS (cfg->cbb, ins);
11008 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11009 cfg->cbb->has_array_access = TRUE;
11017 if (sp [0]->type != STACK_OBJ)
11020 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11022 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11023 CHECK_TYPELOAD (klass);
11024 /* we need to make sure that this array is exactly the type it needs
11025 * to be for correctness. the wrappers are lax with their usage
11026 * so we need to ignore them here
11028 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11029 MonoClass *array_class = mono_array_class_get (klass, 1);
11030 mini_emit_check_array_type (cfg, sp [0], array_class);
11031 CHECK_TYPELOAD (array_class);
11035 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11040 case CEE_LDELEM_I1:
11041 case CEE_LDELEM_U1:
11042 case CEE_LDELEM_I2:
11043 case CEE_LDELEM_U2:
11044 case CEE_LDELEM_I4:
11045 case CEE_LDELEM_U4:
11046 case CEE_LDELEM_I8:
11048 case CEE_LDELEM_R4:
11049 case CEE_LDELEM_R8:
11050 case CEE_LDELEM_REF: {
11056 if (*ip == CEE_LDELEM) {
11058 token = read32 (ip + 1);
11059 klass = mini_get_class (method, token, generic_context);
11060 CHECK_TYPELOAD (klass);
11061 mono_class_init (klass);
11064 klass = array_access_to_klass (*ip);
11066 if (sp [0]->type != STACK_OBJ)
11069 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11071 if (mini_is_gsharedvt_variable_klass (klass)) {
11072 // FIXME-VT: OP_ICONST optimization
11073 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11074 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11075 ins->opcode = OP_LOADV_MEMBASE;
11076 } else if (sp [1]->opcode == OP_ICONST) {
11077 int array_reg = sp [0]->dreg;
11078 int index_reg = sp [1]->dreg;
11079 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11081 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11082 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11084 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11085 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11087 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11088 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11091 if (*ip == CEE_LDELEM)
11098 case CEE_STELEM_I1:
11099 case CEE_STELEM_I2:
11100 case CEE_STELEM_I4:
11101 case CEE_STELEM_I8:
11102 case CEE_STELEM_R4:
11103 case CEE_STELEM_R8:
11104 case CEE_STELEM_REF:
11109 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11111 if (*ip == CEE_STELEM) {
11113 token = read32 (ip + 1);
11114 klass = mini_get_class (method, token, generic_context);
11115 CHECK_TYPELOAD (klass);
11116 mono_class_init (klass);
11119 klass = array_access_to_klass (*ip);
11121 if (sp [0]->type != STACK_OBJ)
11124 emit_array_store (cfg, klass, sp, TRUE);
11126 if (*ip == CEE_STELEM)
11133 case CEE_CKFINITE: {
11137 if (cfg->llvm_only) {
11138 MonoInst *iargs [1];
11140 iargs [0] = sp [0];
11141 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11143 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11144 ins->sreg1 = sp [0]->dreg;
11145 ins->dreg = alloc_freg (cfg);
11146 ins->type = STACK_R8;
11147 MONO_ADD_INS (cfg->cbb, ins);
11149 *sp++ = mono_decompose_opcode (cfg, ins);
11155 case CEE_REFANYVAL: {
11156 MonoInst *src_var, *src;
11158 int klass_reg = alloc_preg (cfg);
11159 int dreg = alloc_preg (cfg);
11161 GSHAREDVT_FAILURE (*ip);
11164 MONO_INST_NEW (cfg, ins, *ip);
11167 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11168 CHECK_TYPELOAD (klass);
11170 context_used = mini_class_check_context_used (cfg, klass);
11173 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11175 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11176 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11179 if (context_used) {
11180 MonoInst *klass_ins;
11182 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11183 klass, MONO_RGCTX_INFO_KLASS);
11186 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11187 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11189 mini_emit_class_check (cfg, klass_reg, klass);
11191 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11192 ins->type = STACK_MP;
11193 ins->klass = klass;
11198 case CEE_MKREFANY: {
11199 MonoInst *loc, *addr;
11201 GSHAREDVT_FAILURE (*ip);
11204 MONO_INST_NEW (cfg, ins, *ip);
11207 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11208 CHECK_TYPELOAD (klass);
11210 context_used = mini_class_check_context_used (cfg, klass);
11212 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11213 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11215 if (context_used) {
11216 MonoInst *const_ins;
11217 int type_reg = alloc_preg (cfg);
11219 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11224 int const_reg = alloc_preg (cfg);
11225 int type_reg = alloc_preg (cfg);
11227 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11228 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11232 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11234 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11235 ins->type = STACK_VTYPE;
11236 ins->klass = mono_defaults.typed_reference_class;
11241 case CEE_LDTOKEN: {
11243 MonoClass *handle_class;
11245 CHECK_STACK_OVF (1);
11248 n = read32 (ip + 1);
11250 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11251 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11252 handle = mono_method_get_wrapper_data (method, n);
11253 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11254 if (handle_class == mono_defaults.typehandle_class)
11255 handle = &((MonoClass*)handle)->byval_arg;
11258 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11263 mono_class_init (handle_class);
11264 if (cfg->gshared) {
11265 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11266 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11267 /* This case handles ldtoken
11268 of an open type, like for
11271 } else if (handle_class == mono_defaults.typehandle_class) {
11272 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11273 } else if (handle_class == mono_defaults.fieldhandle_class)
11274 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11275 else if (handle_class == mono_defaults.methodhandle_class)
11276 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11278 g_assert_not_reached ();
11281 if ((cfg->opt & MONO_OPT_SHARED) &&
11282 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11283 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11284 MonoInst *addr, *vtvar, *iargs [3];
11285 int method_context_used;
11287 method_context_used = mini_method_check_context_used (cfg, method);
11289 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11291 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11292 EMIT_NEW_ICONST (cfg, iargs [1], n);
11293 if (method_context_used) {
11294 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11295 method, MONO_RGCTX_INFO_METHOD);
11296 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11298 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11299 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11301 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11305 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11307 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11308 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11309 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11310 (cmethod->klass == mono_defaults.systemtype_class) &&
11311 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11312 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11314 mono_class_init (tclass);
11315 if (context_used) {
11316 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11317 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11318 } else if (cfg->compile_aot) {
11319 if (method->wrapper_type) {
11320 error_init (&error); //got to do it since there are multiple conditionals below
11321 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11322 /* Special case for static synchronized wrappers */
11323 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11325 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11326 /* FIXME: n is not a normal token */
11328 EMIT_NEW_PCONST (cfg, ins, NULL);
11331 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11334 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11336 EMIT_NEW_PCONST (cfg, ins, rt);
11338 ins->type = STACK_OBJ;
11339 ins->klass = cmethod->klass;
11342 MonoInst *addr, *vtvar;
11344 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11346 if (context_used) {
11347 if (handle_class == mono_defaults.typehandle_class) {
11348 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11349 mono_class_from_mono_type ((MonoType *)handle),
11350 MONO_RGCTX_INFO_TYPE);
11351 } else if (handle_class == mono_defaults.methodhandle_class) {
11352 ins = emit_get_rgctx_method (cfg, context_used,
11353 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11354 } else if (handle_class == mono_defaults.fieldhandle_class) {
11355 ins = emit_get_rgctx_field (cfg, context_used,
11356 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11358 g_assert_not_reached ();
11360 } else if (cfg->compile_aot) {
11361 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11363 EMIT_NEW_PCONST (cfg, ins, handle);
11365 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11366 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11367 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11377 if (sp [-1]->type != STACK_OBJ)
11380 MONO_INST_NEW (cfg, ins, OP_THROW);
11382 ins->sreg1 = sp [0]->dreg;
11384 cfg->cbb->out_of_line = TRUE;
11385 MONO_ADD_INS (cfg->cbb, ins);
11386 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11387 MONO_ADD_INS (cfg->cbb, ins);
11390 link_bblock (cfg, cfg->cbb, end_bblock);
11391 start_new_bblock = 1;
11392 /* This can complicate code generation for llvm since the return value might not be defined */
11393 if (COMPILE_LLVM (cfg))
11394 INLINE_FAILURE ("throw");
11396 case CEE_ENDFINALLY:
11397 if (!ip_in_finally_clause (cfg, ip - header->code))
11399 /* mono_save_seq_point_info () depends on this */
11400 if (sp != stack_start)
11401 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11402 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11403 MONO_ADD_INS (cfg->cbb, ins);
11405 start_new_bblock = 1;
11408 * Control will leave the method so empty the stack, otherwise
11409 * the next basic block will start with a nonempty stack.
11411 while (sp != stack_start) {
11416 case CEE_LEAVE_S: {
11419 if (*ip == CEE_LEAVE) {
11421 target = ip + 5 + (gint32)read32(ip + 1);
11424 target = ip + 2 + (signed char)(ip [1]);
11427 /* empty the stack */
11428 while (sp != stack_start) {
11433 * If this leave statement is in a catch block, check for a
11434 * pending exception, and rethrow it if necessary.
11435 * We avoid doing this in runtime invoke wrappers, since those are called
11436 * by native code which excepts the wrapper to catch all exceptions.
11438 for (i = 0; i < header->num_clauses; ++i) {
11439 MonoExceptionClause *clause = &header->clauses [i];
11442 * Use <= in the final comparison to handle clauses with multiple
11443 * leave statements, like in bug #78024.
11444 * The ordering of the exception clauses guarantees that we find the
11445 * innermost clause.
11447 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11449 MonoBasicBlock *dont_throw;
11454 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11457 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11459 NEW_BBLOCK (cfg, dont_throw);
11462 * Currently, we always rethrow the abort exception, despite the
11463 * fact that this is not correct. See thread6.cs for an example.
11464 * But propagating the abort exception is more important than
11465 * getting the sematics right.
11467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11469 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11471 MONO_START_BB (cfg, dont_throw);
11476 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11479 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11482 for (tmp = handlers; tmp; tmp = tmp->next) {
11483 MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
11484 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
11485 MonoBasicBlock *dont_throw;
11487 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11489 link_bblock (cfg, cfg->cbb, tblock);
11491 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
11493 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11494 ins->inst_target_bb = tblock;
11495 ins->inst_eh_block = clause;
11496 MONO_ADD_INS (cfg->cbb, ins);
11497 cfg->cbb->has_call_handler = 1;
11499 /* Throw exception if exvar is set */
11500 /* FIXME Do we need this for calls from catch/filter ? */
11501 NEW_BBLOCK (cfg, dont_throw);
11502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
11503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11504 mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
11505 cfg->cbb->clause_hole = clause;
11507 MONO_START_BB (cfg, dont_throw);
11508 cfg->cbb->clause_hole = clause;
11510 if (COMPILE_LLVM (cfg)) {
11511 MonoBasicBlock *target_bb;
11514 * Link the finally bblock with the target, since it will
11515 * conceptually branch there.
11517 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11518 GET_BBLOCK (cfg, target_bb, target);
11519 link_bblock (cfg, tblock, target_bb);
11522 g_list_free (handlers);
11525 MONO_INST_NEW (cfg, ins, OP_BR);
11526 MONO_ADD_INS (cfg->cbb, ins);
11527 GET_BBLOCK (cfg, tblock, target);
11528 link_bblock (cfg, cfg->cbb, tblock);
11529 ins->inst_target_bb = tblock;
11531 start_new_bblock = 1;
11533 if (*ip == CEE_LEAVE)
11542 * Mono specific opcodes
11544 case MONO_CUSTOM_PREFIX: {
11546 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11550 case CEE_MONO_ICALL: {
11552 MonoJitICallInfo *info;
11554 token = read32 (ip + 2);
11555 func = mono_method_get_wrapper_data (method, token);
11556 info = mono_find_jit_icall_by_addr (func);
11558 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11561 CHECK_STACK (info->sig->param_count);
11562 sp -= info->sig->param_count;
11564 if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
11568 * This is called on unattached threads, so it cannot go through the trampoline
11569 * infrastructure. Use an indirect call through a got slot initialized at load time
11572 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
11573 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
11575 ins = mono_emit_jit_icall (cfg, info->func, sp);
11578 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11582 inline_costs += 10 * num_calls++;
11586 case CEE_MONO_LDPTR_CARD_TABLE:
11587 case CEE_MONO_LDPTR_NURSERY_START:
11588 case CEE_MONO_LDPTR_NURSERY_BITS:
11589 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11590 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
11591 CHECK_STACK_OVF (1);
11594 case CEE_MONO_LDPTR_CARD_TABLE:
11595 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11597 case CEE_MONO_LDPTR_NURSERY_START:
11598 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11600 case CEE_MONO_LDPTR_NURSERY_BITS:
11601 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11603 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11604 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11606 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
11607 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
11610 g_assert_not_reached ();
11616 inline_costs += 10 * num_calls++;
11619 case CEE_MONO_LDPTR: {
11622 CHECK_STACK_OVF (1);
11624 token = read32 (ip + 2);
11626 ptr = mono_method_get_wrapper_data (method, token);
11627 EMIT_NEW_PCONST (cfg, ins, ptr);
11630 inline_costs += 10 * num_calls++;
11631 /* Can't embed random pointers into AOT code */
11635 case CEE_MONO_JIT_ICALL_ADDR: {
11636 MonoJitICallInfo *callinfo;
11639 CHECK_STACK_OVF (1);
11641 token = read32 (ip + 2);
11643 ptr = mono_method_get_wrapper_data (method, token);
11644 callinfo = mono_find_jit_icall_by_addr (ptr);
11645 g_assert (callinfo);
11646 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11649 inline_costs += 10 * num_calls++;
11652 case CEE_MONO_ICALL_ADDR: {
11653 MonoMethod *cmethod;
11656 CHECK_STACK_OVF (1);
11658 token = read32 (ip + 2);
11660 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11662 if (cfg->compile_aot) {
11663 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11665 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11666 * before the call, its not needed when using direct pinvoke.
11667 * This is not an optimization, but its used to avoid looking up pinvokes
11668 * on platforms which don't support dlopen ().
11670 EMIT_NEW_PCONST (cfg, ins, NULL);
11672 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11675 ptr = mono_lookup_internal_call (cmethod);
11677 EMIT_NEW_PCONST (cfg, ins, ptr);
11683 case CEE_MONO_VTADDR: {
11684 MonoInst *src_var, *src;
11690 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11691 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11696 case CEE_MONO_NEWOBJ: {
11697 MonoInst *iargs [2];
11699 CHECK_STACK_OVF (1);
11701 token = read32 (ip + 2);
11702 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11703 mono_class_init (klass);
11704 NEW_DOMAINCONST (cfg, iargs [0]);
11705 MONO_ADD_INS (cfg->cbb, iargs [0]);
11706 NEW_CLASSCONST (cfg, iargs [1], klass);
11707 MONO_ADD_INS (cfg->cbb, iargs [1]);
11708 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11710 inline_costs += 10 * num_calls++;
11713 case CEE_MONO_OBJADDR:
11716 MONO_INST_NEW (cfg, ins, OP_MOVE);
11717 ins->dreg = alloc_ireg_mp (cfg);
11718 ins->sreg1 = sp [0]->dreg;
11719 ins->type = STACK_MP;
11720 MONO_ADD_INS (cfg->cbb, ins);
11724 case CEE_MONO_LDNATIVEOBJ:
11726 * Similar to LDOBJ, but instead load the unmanaged
11727 * representation of the vtype to the stack.
11732 token = read32 (ip + 2);
11733 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11734 g_assert (klass->valuetype);
11735 mono_class_init (klass);
11738 MonoInst *src, *dest, *temp;
11741 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11742 temp->backend.is_pinvoke = 1;
11743 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11744 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
11746 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11747 dest->type = STACK_VTYPE;
11748 dest->klass = klass;
11754 case CEE_MONO_RETOBJ: {
11756 * Same as RET, but return the native representation of a vtype
11759 g_assert (cfg->ret);
11760 g_assert (mono_method_signature (method)->pinvoke);
11765 token = read32 (ip + 2);
11766 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11768 if (!cfg->vret_addr) {
11769 g_assert (cfg->ret_var_is_local);
11771 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11773 EMIT_NEW_RETLOADA (cfg, ins);
11775 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
11777 if (sp != stack_start)
11780 MONO_INST_NEW (cfg, ins, OP_BR);
11781 ins->inst_target_bb = end_bblock;
11782 MONO_ADD_INS (cfg->cbb, ins);
11783 link_bblock (cfg, cfg->cbb, end_bblock);
11784 start_new_bblock = 1;
11788 case CEE_MONO_SAVE_LMF:
11789 case CEE_MONO_RESTORE_LMF:
11792 case CEE_MONO_CLASSCONST:
11793 CHECK_STACK_OVF (1);
11795 token = read32 (ip + 2);
11796 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11799 inline_costs += 10 * num_calls++;
11801 case CEE_MONO_NOT_TAKEN:
11802 cfg->cbb->out_of_line = TRUE;
11805 case CEE_MONO_TLS: {
11808 CHECK_STACK_OVF (1);
11810 key = (MonoTlsKey)read32 (ip + 2);
11811 g_assert (key < TLS_KEY_NUM);
11813 ins = mono_create_tls_get (cfg, key);
11815 ins->type = STACK_PTR;
11820 case CEE_MONO_DYN_CALL: {
11821 MonoCallInst *call;
11823 /* It would be easier to call a trampoline, but that would put an
11824 * extra frame on the stack, confusing exception handling. So
11825 * implement it inline using an opcode for now.
11828 if (!cfg->dyn_call_var) {
11829 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11830 /* prevent it from being register allocated */
11831 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11834 /* Has to use a call inst since it local regalloc expects it */
11835 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11836 ins = (MonoInst*)call;
11838 ins->sreg1 = sp [0]->dreg;
11839 ins->sreg2 = sp [1]->dreg;
11840 MONO_ADD_INS (cfg->cbb, ins);
11842 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11845 inline_costs += 10 * num_calls++;
11849 case CEE_MONO_MEMORY_BARRIER: {
11851 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11855 case CEE_MONO_ATOMIC_STORE_I4: {
11856 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11862 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11863 ins->dreg = sp [0]->dreg;
11864 ins->sreg1 = sp [1]->dreg;
11865 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11866 MONO_ADD_INS (cfg->cbb, ins);
11871 case CEE_MONO_JIT_ATTACH: {
11872 MonoInst *args [16], *domain_ins;
11873 MonoInst *ad_ins, *jit_tls_ins;
11874 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11876 g_assert (!mono_threads_is_blocking_transition_enabled ());
11878 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11880 EMIT_NEW_PCONST (cfg, ins, NULL);
11881 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11883 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11884 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11886 if (ad_ins && jit_tls_ins) {
11887 NEW_BBLOCK (cfg, next_bb);
11888 NEW_BBLOCK (cfg, call_bb);
11890 if (cfg->compile_aot) {
11891 /* AOT code is only used in the root domain */
11892 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11894 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11896 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11903 MONO_START_BB (cfg, call_bb);
11906 /* AOT code is only used in the root domain */
11907 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
11908 if (cfg->compile_aot) {
11912 * This is called on unattached threads, so it cannot go through the trampoline
11913 * infrastructure. Use an indirect call through a got slot initialized at load time
11916 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
11917 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
11919 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11921 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11924 MONO_START_BB (cfg, next_bb);
11929 case CEE_MONO_JIT_DETACH: {
11930 MonoInst *args [16];
11932 /* Restore the original domain */
11933 dreg = alloc_ireg (cfg);
11934 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11935 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11939 case CEE_MONO_CALLI_EXTRA_ARG: {
11941 MonoMethodSignature *fsig;
11945 * This is the same as CEE_CALLI, but passes an additional argument
11946 * to the called method in llvmonly mode.
11947 * This is only used by delegate invoke wrappers to call the
11948 * actual delegate method.
11950 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
11953 token = read32 (ip + 2);
11961 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
11964 if (cfg->llvm_only)
11965 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
11967 n = fsig->param_count + fsig->hasthis + 1;
11974 if (cfg->llvm_only) {
11976 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
11977 * cconv. This is set by mono_init_delegate ().
11979 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
11980 MonoInst *callee = addr;
11981 MonoInst *call, *localloc_ins;
11982 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11983 int low_bit_reg = alloc_preg (cfg);
11985 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11986 NEW_BBLOCK (cfg, end_bb);
11988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11989 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11990 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11992 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
11993 addr = emit_get_rgctx_sig (cfg, context_used,
11994 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11996 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
11998 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
11999 ins->dreg = alloc_preg (cfg);
12000 ins->inst_imm = 2 * SIZEOF_VOID_P;
12001 MONO_ADD_INS (cfg->cbb, ins);
12002 localloc_ins = ins;
12003 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12004 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12005 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12007 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12010 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12011 MONO_START_BB (cfg, is_gsharedvt_bb);
12012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12013 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12014 ins->dreg = call->dreg;
12016 MONO_START_BB (cfg, end_bb);
12018 /* Caller uses a normal calling conv */
12020 MonoInst *callee = addr;
12021 MonoInst *call, *localloc_ins;
12022 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12023 int low_bit_reg = alloc_preg (cfg);
12025 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12026 NEW_BBLOCK (cfg, end_bb);
12028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12032 /* Normal case: callee uses a normal cconv, no conversion is needed */
12033 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12035 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12036 MONO_START_BB (cfg, is_gsharedvt_bb);
12037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12038 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12039 MONO_ADD_INS (cfg->cbb, addr);
12041 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12043 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12044 ins->dreg = alloc_preg (cfg);
12045 ins->inst_imm = 2 * SIZEOF_VOID_P;
12046 MONO_ADD_INS (cfg->cbb, ins);
12047 localloc_ins = ins;
12048 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12049 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12050 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12052 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12053 ins->dreg = call->dreg;
12054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12056 MONO_START_BB (cfg, end_bb);
12059 /* Same as CEE_CALLI */
12060 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12062 * We pass the address to the gsharedvt trampoline in the rgctx reg
12064 MonoInst *callee = addr;
12066 addr = emit_get_rgctx_sig (cfg, context_used,
12067 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12068 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12070 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12074 if (!MONO_TYPE_IS_VOID (fsig->ret))
12075 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12077 CHECK_CFG_EXCEPTION;
12081 constrained_class = NULL;
12084 case CEE_MONO_LDDOMAIN:
12085 CHECK_STACK_OVF (1);
12086 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12090 case CEE_MONO_GET_LAST_ERROR:
12092 CHECK_STACK_OVF (1);
12094 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12095 ins->dreg = alloc_dreg (cfg, STACK_I4);
12096 ins->type = STACK_I4;
12097 MONO_ADD_INS (cfg->cbb, ins);
12102 case CEE_MONO_GET_RGCTX_ARG:
12104 CHECK_STACK_OVF (1);
12106 mono_create_rgctx_var (cfg);
12108 MONO_INST_NEW (cfg, ins, OP_MOVE);
12109 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12110 ins->sreg1 = cfg->rgctx_var->dreg;
12111 ins->type = STACK_PTR;
12112 MONO_ADD_INS (cfg->cbb, ins);
12118 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12124 case CEE_PREFIX1: {
12127 case CEE_ARGLIST: {
12128 /* somewhat similar to LDTOKEN */
12129 MonoInst *addr, *vtvar;
12130 CHECK_STACK_OVF (1);
12131 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12133 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12134 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12136 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12137 ins->type = STACK_VTYPE;
12138 ins->klass = mono_defaults.argumenthandle_class;
12148 MonoInst *cmp, *arg1, *arg2;
12156 * The following transforms:
12157 * CEE_CEQ into OP_CEQ
12158 * CEE_CGT into OP_CGT
12159 * CEE_CGT_UN into OP_CGT_UN
12160 * CEE_CLT into OP_CLT
12161 * CEE_CLT_UN into OP_CLT_UN
12163 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12165 MONO_INST_NEW (cfg, ins, cmp->opcode);
12166 cmp->sreg1 = arg1->dreg;
12167 cmp->sreg2 = arg2->dreg;
12168 type_from_op (cfg, cmp, arg1, arg2);
12170 add_widen_op (cfg, cmp, &arg1, &arg2);
12171 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12172 cmp->opcode = OP_LCOMPARE;
12173 else if (arg1->type == STACK_R4)
12174 cmp->opcode = OP_RCOMPARE;
12175 else if (arg1->type == STACK_R8)
12176 cmp->opcode = OP_FCOMPARE;
12178 cmp->opcode = OP_ICOMPARE;
12179 MONO_ADD_INS (cfg->cbb, cmp);
12180 ins->type = STACK_I4;
12181 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12182 type_from_op (cfg, ins, arg1, arg2);
12184 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12186 * The backends expect the fceq opcodes to do the
12189 ins->sreg1 = cmp->sreg1;
12190 ins->sreg2 = cmp->sreg2;
12193 MONO_ADD_INS (cfg->cbb, ins);
12199 MonoInst *argconst;
12200 MonoMethod *cil_method;
12202 CHECK_STACK_OVF (1);
12204 n = read32 (ip + 2);
12205 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12208 mono_class_init (cmethod->klass);
12210 mono_save_token_info (cfg, image, n, cmethod);
12212 context_used = mini_method_check_context_used (cfg, cmethod);
12214 cil_method = cmethod;
12215 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12216 emit_method_access_failure (cfg, method, cil_method);
12218 if (mono_security_core_clr_enabled ())
12219 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12222 * Optimize the common case of ldftn+delegate creation
12224 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12225 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12226 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12227 MonoInst *target_ins, *handle_ins;
12228 MonoMethod *invoke;
12229 int invoke_context_used;
12231 invoke = mono_get_delegate_invoke (ctor_method->klass);
12232 if (!invoke || !mono_method_signature (invoke))
12235 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12237 target_ins = sp [-1];
12239 if (mono_security_core_clr_enabled ())
12240 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12242 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12243 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12244 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12246 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12250 /* FIXME: SGEN support */
12251 if (invoke_context_used == 0 || cfg->llvm_only) {
12253 if (cfg->verbose_level > 3)
12254 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12255 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12258 CHECK_CFG_EXCEPTION;
12268 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12269 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12273 inline_costs += 10 * num_calls++;
12276 case CEE_LDVIRTFTN: {
12277 MonoInst *args [2];
12281 n = read32 (ip + 2);
12282 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12285 mono_class_init (cmethod->klass);
12287 context_used = mini_method_check_context_used (cfg, cmethod);
12289 if (mono_security_core_clr_enabled ())
12290 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12293 * Optimize the common case of ldvirtftn+delegate creation
12295 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12296 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12297 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12298 MonoInst *target_ins, *handle_ins;
12299 MonoMethod *invoke;
12300 int invoke_context_used;
12301 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12303 invoke = mono_get_delegate_invoke (ctor_method->klass);
12304 if (!invoke || !mono_method_signature (invoke))
12307 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12309 target_ins = sp [-1];
12311 if (mono_security_core_clr_enabled ())
12312 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12314 /* FIXME: SGEN support */
12315 if (invoke_context_used == 0 || cfg->llvm_only) {
12317 if (cfg->verbose_level > 3)
12318 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12319 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12322 CHECK_CFG_EXCEPTION;
12335 args [1] = emit_get_rgctx_method (cfg, context_used,
12336 cmethod, MONO_RGCTX_INFO_METHOD);
12339 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12341 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12344 inline_costs += 10 * num_calls++;
12348 CHECK_STACK_OVF (1);
12350 n = read16 (ip + 2);
12352 EMIT_NEW_ARGLOAD (cfg, ins, n);
12357 CHECK_STACK_OVF (1);
12359 n = read16 (ip + 2);
12361 NEW_ARGLOADA (cfg, ins, n);
12362 MONO_ADD_INS (cfg->cbb, ins);
12370 n = read16 (ip + 2);
12372 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12374 emit_starg_ir (cfg, sp, n);
12378 CHECK_STACK_OVF (1);
12380 n = read16 (ip + 2);
12382 if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
12383 /* Avoid loading a struct just to load one of its fields */
12384 EMIT_NEW_LOCLOADA (cfg, ins, n);
12386 EMIT_NEW_LOCLOAD (cfg, ins, n);
12392 unsigned char *tmp_ip;
12393 CHECK_STACK_OVF (1);
12395 n = read16 (ip + 2);
12398 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12404 EMIT_NEW_LOCLOADA (cfg, ins, n);
12413 n = read16 (ip + 2);
12415 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12417 emit_stloc_ir (cfg, sp, header, n);
12421 case CEE_LOCALLOC: {
12423 MonoBasicBlock *non_zero_bb, *end_bb;
12424 int alloc_ptr = alloc_preg (cfg);
12426 if (sp != stack_start)
12428 if (cfg->method != method)
12430 * Inlining this into a loop in a parent could lead to
12431 * stack overflows which is different behavior than the
12432 * non-inlined case, thus disable inlining in this case.
12434 INLINE_FAILURE("localloc");
12436 NEW_BBLOCK (cfg, non_zero_bb);
12437 NEW_BBLOCK (cfg, end_bb);
12439 /* if size != zero */
12440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12443 //size is zero, so result is NULL
12444 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12447 MONO_START_BB (cfg, non_zero_bb);
12448 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12449 ins->dreg = alloc_ptr;
12450 ins->sreg1 = sp [0]->dreg;
12451 ins->type = STACK_PTR;
12452 MONO_ADD_INS (cfg->cbb, ins);
12454 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12456 ins->flags |= MONO_INST_INIT;
12458 MONO_START_BB (cfg, end_bb);
12459 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12460 ins->type = STACK_PTR;
12466 case CEE_ENDFILTER: {
12467 MonoExceptionClause *clause, *nearest;
12472 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12474 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12475 ins->sreg1 = (*sp)->dreg;
12476 MONO_ADD_INS (cfg->cbb, ins);
12477 start_new_bblock = 1;
12481 for (cc = 0; cc < header->num_clauses; ++cc) {
12482 clause = &header->clauses [cc];
12483 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12484 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12485 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12488 g_assert (nearest);
12489 if ((ip - header->code) != nearest->handler_offset)
12494 case CEE_UNALIGNED_:
12495 ins_flag |= MONO_INST_UNALIGNED;
12496 /* FIXME: record alignment? we can assume 1 for now */
12500 case CEE_VOLATILE_:
12501 ins_flag |= MONO_INST_VOLATILE;
12505 ins_flag |= MONO_INST_TAILCALL;
12506 cfg->flags |= MONO_CFG_HAS_TAIL;
12507 /* Can't inline tail calls at this time */
12508 inline_costs += 100000;
12515 token = read32 (ip + 2);
12516 klass = mini_get_class (method, token, generic_context);
12517 CHECK_TYPELOAD (klass);
12518 if (generic_class_is_reference_type (cfg, klass))
12519 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12521 mini_emit_initobj (cfg, *sp, NULL, klass);
12525 case CEE_CONSTRAINED_:
12527 token = read32 (ip + 2);
12528 constrained_class = mini_get_class (method, token, generic_context);
12529 CHECK_TYPELOAD (constrained_class);
12535 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12543 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12551 ins_flag |= MONO_INST_NOTYPECHECK;
12553 ins_flag |= MONO_INST_NORANGECHECK;
12554 /* we ignore the no-nullcheck for now since we
12555 * really do it explicitly only when doing callvirt->call
12559 case CEE_RETHROW: {
12561 int handler_offset = -1;
12563 for (i = 0; i < header->num_clauses; ++i) {
12564 MonoExceptionClause *clause = &header->clauses [i];
12565 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12566 handler_offset = clause->handler_offset;
12571 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12573 if (handler_offset == -1)
12576 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12577 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12578 ins->sreg1 = load->dreg;
12579 MONO_ADD_INS (cfg->cbb, ins);
12581 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12582 MONO_ADD_INS (cfg->cbb, ins);
12585 link_bblock (cfg, cfg->cbb, end_bblock);
12586 start_new_bblock = 1;
12594 CHECK_STACK_OVF (1);
12596 token = read32 (ip + 2);
12597 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12598 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12601 val = mono_type_size (type, &ialign);
12603 MonoClass *klass = mini_get_class (method, token, generic_context);
12604 CHECK_TYPELOAD (klass);
12606 val = mono_type_size (&klass->byval_arg, &ialign);
12608 if (mini_is_gsharedvt_klass (klass))
12609 GSHAREDVT_FAILURE (*ip);
12611 EMIT_NEW_ICONST (cfg, ins, val);
12616 case CEE_REFANYTYPE: {
12617 MonoInst *src_var, *src;
12619 GSHAREDVT_FAILURE (*ip);
12625 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12627 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12628 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12629 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12634 case CEE_READONLY_:
12647 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12657 g_warning ("opcode 0x%02x not handled", *ip);
12661 if (start_new_bblock != 1)
12664 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12665 if (cfg->cbb->next_bb) {
12666 /* This could already be set because of inlining, #693905 */
12667 MonoBasicBlock *bb = cfg->cbb;
12669 while (bb->next_bb)
12671 bb->next_bb = end_bblock;
12673 cfg->cbb->next_bb = end_bblock;
12676 if (cfg->method == method && cfg->domainvar) {
12678 MonoInst *get_domain;
12680 cfg->cbb = init_localsbb;
12682 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12683 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12684 MONO_ADD_INS (cfg->cbb, store);
12687 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12688 if (cfg->compile_aot)
12689 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12690 mono_get_got_var (cfg);
12693 if (cfg->method == method && cfg->got_var)
12694 mono_emit_load_got_addr (cfg);
12696 if (init_localsbb) {
12697 cfg->cbb = init_localsbb;
12699 for (i = 0; i < header->num_locals; ++i) {
12700 emit_init_local (cfg, i, header->locals [i], init_locals);
12704 if (cfg->init_ref_vars && cfg->method == method) {
12705 /* Emit initialization for ref vars */
12706 // FIXME: Avoid duplication initialization for IL locals.
12707 for (i = 0; i < cfg->num_varinfo; ++i) {
12708 MonoInst *ins = cfg->varinfo [i];
12710 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12711 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12715 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12716 cfg->cbb = init_localsbb;
12717 emit_push_lmf (cfg);
12720 cfg->cbb = init_localsbb;
12721 mini_profiler_emit_enter (cfg);
12724 MonoBasicBlock *bb;
12727 * Make seq points at backward branch targets interruptable.
12729 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12730 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12731 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12734 /* Add a sequence point for method entry/exit events */
12735 if (seq_points && cfg->gen_sdb_seq_points) {
12736 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12737 MONO_ADD_INS (init_localsbb, ins);
12738 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12739 MONO_ADD_INS (cfg->bb_exit, ins);
12743 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12744 * the code they refer to was dead (#11880).
12746 if (sym_seq_points) {
12747 for (i = 0; i < header->code_size; ++i) {
12748 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12751 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12752 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12759 if (cfg->method == method) {
12760 MonoBasicBlock *bb;
12761 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12762 if (bb == cfg->bb_init)
12765 bb->region = mono_find_block_region (cfg, bb->real_offset);
12767 mono_create_spvar_for_region (cfg, bb->region);
12768 if (cfg->verbose_level > 2)
12769 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12772 MonoBasicBlock *bb;
12773 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12774 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12775 bb->real_offset = inline_offset;
12779 if (inline_costs < 0) {
12782 /* Method is too large */
12783 mname = mono_method_full_name (method, TRUE);
12784 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12788 if ((cfg->verbose_level > 2) && (cfg->method == method))
12789 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12794 g_assert (!mono_error_ok (&cfg->error));
12798 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12802 set_exception_type_from_invalid_il (cfg, method, ip);
12806 g_slist_free (class_inits);
12807 mono_basic_block_free (original_bb);
12808 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12809 if (cfg->exception_type)
12812 return inline_costs;
12816 store_membase_reg_to_store_membase_imm (int opcode)
12819 case OP_STORE_MEMBASE_REG:
12820 return OP_STORE_MEMBASE_IMM;
12821 case OP_STOREI1_MEMBASE_REG:
12822 return OP_STOREI1_MEMBASE_IMM;
12823 case OP_STOREI2_MEMBASE_REG:
12824 return OP_STOREI2_MEMBASE_IMM;
12825 case OP_STOREI4_MEMBASE_REG:
12826 return OP_STOREI4_MEMBASE_IMM;
12827 case OP_STOREI8_MEMBASE_REG:
12828 return OP_STOREI8_MEMBASE_IMM;
12830 g_assert_not_reached ();
12837 mono_op_to_op_imm (int opcode)
12841 return OP_IADD_IMM;
12843 return OP_ISUB_IMM;
12845 return OP_IDIV_IMM;
12847 return OP_IDIV_UN_IMM;
12849 return OP_IREM_IMM;
12851 return OP_IREM_UN_IMM;
12853 return OP_IMUL_IMM;
12855 return OP_IAND_IMM;
12859 return OP_IXOR_IMM;
12861 return OP_ISHL_IMM;
12863 return OP_ISHR_IMM;
12865 return OP_ISHR_UN_IMM;
12868 return OP_LADD_IMM;
12870 return OP_LSUB_IMM;
12872 return OP_LAND_IMM;
12876 return OP_LXOR_IMM;
12878 return OP_LSHL_IMM;
12880 return OP_LSHR_IMM;
12882 return OP_LSHR_UN_IMM;
12883 #if SIZEOF_REGISTER == 8
12885 return OP_LREM_IMM;
12889 return OP_COMPARE_IMM;
12891 return OP_ICOMPARE_IMM;
12893 return OP_LCOMPARE_IMM;
12895 case OP_STORE_MEMBASE_REG:
12896 return OP_STORE_MEMBASE_IMM;
12897 case OP_STOREI1_MEMBASE_REG:
12898 return OP_STOREI1_MEMBASE_IMM;
12899 case OP_STOREI2_MEMBASE_REG:
12900 return OP_STOREI2_MEMBASE_IMM;
12901 case OP_STOREI4_MEMBASE_REG:
12902 return OP_STOREI4_MEMBASE_IMM;
12904 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12906 return OP_X86_PUSH_IMM;
12907 case OP_X86_COMPARE_MEMBASE_REG:
12908 return OP_X86_COMPARE_MEMBASE_IMM;
12910 #if defined(TARGET_AMD64)
12911 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12912 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12914 case OP_VOIDCALL_REG:
12915 return OP_VOIDCALL;
12923 return OP_LOCALLOC_IMM;
12930 ldind_to_load_membase (int opcode)
12934 return OP_LOADI1_MEMBASE;
12936 return OP_LOADU1_MEMBASE;
12938 return OP_LOADI2_MEMBASE;
12940 return OP_LOADU2_MEMBASE;
12942 return OP_LOADI4_MEMBASE;
12944 return OP_LOADU4_MEMBASE;
12946 return OP_LOAD_MEMBASE;
12947 case CEE_LDIND_REF:
12948 return OP_LOAD_MEMBASE;
12950 return OP_LOADI8_MEMBASE;
12952 return OP_LOADR4_MEMBASE;
12954 return OP_LOADR8_MEMBASE;
12956 g_assert_not_reached ();
12963 stind_to_store_membase (int opcode)
12967 return OP_STOREI1_MEMBASE_REG;
12969 return OP_STOREI2_MEMBASE_REG;
12971 return OP_STOREI4_MEMBASE_REG;
12973 case CEE_STIND_REF:
12974 return OP_STORE_MEMBASE_REG;
12976 return OP_STOREI8_MEMBASE_REG;
12978 return OP_STORER4_MEMBASE_REG;
12980 return OP_STORER8_MEMBASE_REG;
12982 g_assert_not_reached ();
12989 mono_load_membase_to_load_mem (int opcode)
12991 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12992 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12994 case OP_LOAD_MEMBASE:
12995 return OP_LOAD_MEM;
12996 case OP_LOADU1_MEMBASE:
12997 return OP_LOADU1_MEM;
12998 case OP_LOADU2_MEMBASE:
12999 return OP_LOADU2_MEM;
13000 case OP_LOADI4_MEMBASE:
13001 return OP_LOADI4_MEM;
13002 case OP_LOADU4_MEMBASE:
13003 return OP_LOADU4_MEM;
13004 #if SIZEOF_REGISTER == 8
13005 case OP_LOADI8_MEMBASE:
13006 return OP_LOADI8_MEM;
13015 op_to_op_dest_membase (int store_opcode, int opcode)
13017 #if defined(TARGET_X86)
13018 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13023 return OP_X86_ADD_MEMBASE_REG;
13025 return OP_X86_SUB_MEMBASE_REG;
13027 return OP_X86_AND_MEMBASE_REG;
13029 return OP_X86_OR_MEMBASE_REG;
13031 return OP_X86_XOR_MEMBASE_REG;
13034 return OP_X86_ADD_MEMBASE_IMM;
13037 return OP_X86_SUB_MEMBASE_IMM;
13040 return OP_X86_AND_MEMBASE_IMM;
13043 return OP_X86_OR_MEMBASE_IMM;
13046 return OP_X86_XOR_MEMBASE_IMM;
13052 #if defined(TARGET_AMD64)
13053 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13058 return OP_X86_ADD_MEMBASE_REG;
13060 return OP_X86_SUB_MEMBASE_REG;
13062 return OP_X86_AND_MEMBASE_REG;
13064 return OP_X86_OR_MEMBASE_REG;
13066 return OP_X86_XOR_MEMBASE_REG;
13068 return OP_X86_ADD_MEMBASE_IMM;
13070 return OP_X86_SUB_MEMBASE_IMM;
13072 return OP_X86_AND_MEMBASE_IMM;
13074 return OP_X86_OR_MEMBASE_IMM;
13076 return OP_X86_XOR_MEMBASE_IMM;
13078 return OP_AMD64_ADD_MEMBASE_REG;
13080 return OP_AMD64_SUB_MEMBASE_REG;
13082 return OP_AMD64_AND_MEMBASE_REG;
13084 return OP_AMD64_OR_MEMBASE_REG;
13086 return OP_AMD64_XOR_MEMBASE_REG;
13089 return OP_AMD64_ADD_MEMBASE_IMM;
13092 return OP_AMD64_SUB_MEMBASE_IMM;
13095 return OP_AMD64_AND_MEMBASE_IMM;
13098 return OP_AMD64_OR_MEMBASE_IMM;
13101 return OP_AMD64_XOR_MEMBASE_IMM;
13111 op_to_op_store_membase (int store_opcode, int opcode)
13113 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13116 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13117 return OP_X86_SETEQ_MEMBASE;
13119 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13120 return OP_X86_SETNE_MEMBASE;
13128 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13131 /* FIXME: This has sign extension issues */
13133 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13134 return OP_X86_COMPARE_MEMBASE8_IMM;
13137 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13142 return OP_X86_PUSH_MEMBASE;
13143 case OP_COMPARE_IMM:
13144 case OP_ICOMPARE_IMM:
13145 return OP_X86_COMPARE_MEMBASE_IMM;
13148 return OP_X86_COMPARE_MEMBASE_REG;
13152 #ifdef TARGET_AMD64
13153 /* FIXME: This has sign extension issues */
13155 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13156 return OP_X86_COMPARE_MEMBASE8_IMM;
13161 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13162 return OP_X86_PUSH_MEMBASE;
13164 /* FIXME: This only works for 32 bit immediates
13165 case OP_COMPARE_IMM:
13166 case OP_LCOMPARE_IMM:
13167 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13168 return OP_AMD64_COMPARE_MEMBASE_IMM;
13170 case OP_ICOMPARE_IMM:
13171 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13172 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13176 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13177 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13178 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13179 return OP_AMD64_COMPARE_MEMBASE_REG;
13182 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13183 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13192 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13195 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13201 return OP_X86_COMPARE_REG_MEMBASE;
13203 return OP_X86_ADD_REG_MEMBASE;
13205 return OP_X86_SUB_REG_MEMBASE;
13207 return OP_X86_AND_REG_MEMBASE;
13209 return OP_X86_OR_REG_MEMBASE;
13211 return OP_X86_XOR_REG_MEMBASE;
13215 #ifdef TARGET_AMD64
13216 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13219 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13221 return OP_X86_ADD_REG_MEMBASE;
13223 return OP_X86_SUB_REG_MEMBASE;
13225 return OP_X86_AND_REG_MEMBASE;
13227 return OP_X86_OR_REG_MEMBASE;
13229 return OP_X86_XOR_REG_MEMBASE;
13231 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13235 return OP_AMD64_COMPARE_REG_MEMBASE;
13237 return OP_AMD64_ADD_REG_MEMBASE;
13239 return OP_AMD64_SUB_REG_MEMBASE;
13241 return OP_AMD64_AND_REG_MEMBASE;
13243 return OP_AMD64_OR_REG_MEMBASE;
13245 return OP_AMD64_XOR_REG_MEMBASE;
13254 mono_op_to_op_imm_noemul (int opcode)
13257 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13263 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13270 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13275 return mono_op_to_op_imm (opcode);
13280 * mono_handle_global_vregs:
13282 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13286 mono_handle_global_vregs (MonoCompile *cfg)
13288 gint32 *vreg_to_bb;
13289 MonoBasicBlock *bb;
13292 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13294 #ifdef MONO_ARCH_SIMD_INTRINSICS
13295 if (cfg->uses_simd_intrinsics)
13296 mono_simd_simplify_indirection (cfg);
13299 /* Find local vregs used in more than one bb */
13300 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13301 MonoInst *ins = bb->code;
13302 int block_num = bb->block_num;
13304 if (cfg->verbose_level > 2)
13305 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13308 for (; ins; ins = ins->next) {
13309 const char *spec = INS_INFO (ins->opcode);
13310 int regtype = 0, regindex;
13313 if (G_UNLIKELY (cfg->verbose_level > 2))
13314 mono_print_ins (ins);
13316 g_assert (ins->opcode >= MONO_CEE_LAST);
13318 for (regindex = 0; regindex < 4; regindex ++) {
13321 if (regindex == 0) {
13322 regtype = spec [MONO_INST_DEST];
13323 if (regtype == ' ')
13326 } else if (regindex == 1) {
13327 regtype = spec [MONO_INST_SRC1];
13328 if (regtype == ' ')
13331 } else if (regindex == 2) {
13332 regtype = spec [MONO_INST_SRC2];
13333 if (regtype == ' ')
13336 } else if (regindex == 3) {
13337 regtype = spec [MONO_INST_SRC3];
13338 if (regtype == ' ')
13343 #if SIZEOF_REGISTER == 4
13344 /* In the LLVM case, the long opcodes are not decomposed */
13345 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13347 * Since some instructions reference the original long vreg,
13348 * and some reference the two component vregs, it is quite hard
13349 * to determine when it needs to be global. So be conservative.
13351 if (!get_vreg_to_inst (cfg, vreg)) {
13352 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13354 if (cfg->verbose_level > 2)
13355 printf ("LONG VREG R%d made global.\n", vreg);
13359 * Make the component vregs volatile since the optimizations can
13360 * get confused otherwise.
13362 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13363 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13367 g_assert (vreg != -1);
13369 prev_bb = vreg_to_bb [vreg];
13370 if (prev_bb == 0) {
13371 /* 0 is a valid block num */
13372 vreg_to_bb [vreg] = block_num + 1;
13373 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13374 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13377 if (!get_vreg_to_inst (cfg, vreg)) {
13378 if (G_UNLIKELY (cfg->verbose_level > 2))
13379 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13383 if (vreg_is_ref (cfg, vreg))
13384 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13386 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13389 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13392 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13396 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13399 g_assert_not_reached ();
13403 /* Flag as having been used in more than one bb */
13404 vreg_to_bb [vreg] = -1;
13410 /* If a variable is used in only one bblock, convert it into a local vreg */
13411 for (i = 0; i < cfg->num_varinfo; i++) {
13412 MonoInst *var = cfg->varinfo [i];
13413 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13415 switch (var->type) {
13421 #if SIZEOF_REGISTER == 8
13424 #if !defined(TARGET_X86)
13425 /* Enabling this screws up the fp stack on x86 */
13428 if (mono_arch_is_soft_float ())
13432 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13436 /* Arguments are implicitly global */
13437 /* Putting R4 vars into registers doesn't work currently */
13438 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13439 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13441 * Make that the variable's liveness interval doesn't contain a call, since
13442 * that would cause the lvreg to be spilled, making the whole optimization
13445 /* This is too slow for JIT compilation */
13447 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13449 int def_index, call_index, ins_index;
13450 gboolean spilled = FALSE;
13455 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13456 const char *spec = INS_INFO (ins->opcode);
13458 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13459 def_index = ins_index;
13461 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13462 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13463 if (call_index > def_index) {
13469 if (MONO_IS_CALL (ins))
13470 call_index = ins_index;
13480 if (G_UNLIKELY (cfg->verbose_level > 2))
13481 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13482 var->flags |= MONO_INST_IS_DEAD;
13483 cfg->vreg_to_inst [var->dreg] = NULL;
13490 * Compress the varinfo and vars tables so the liveness computation is faster and
13491 * takes up less space.
13494 for (i = 0; i < cfg->num_varinfo; ++i) {
13495 MonoInst *var = cfg->varinfo [i];
13496 if (pos < i && cfg->locals_start == i)
13497 cfg->locals_start = pos;
13498 if (!(var->flags & MONO_INST_IS_DEAD)) {
13500 cfg->varinfo [pos] = cfg->varinfo [i];
13501 cfg->varinfo [pos]->inst_c0 = pos;
13502 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13503 cfg->vars [pos].idx = pos;
13504 #if SIZEOF_REGISTER == 4
13505 if (cfg->varinfo [pos]->type == STACK_I8) {
13506 /* Modify the two component vars too */
13509 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13510 var1->inst_c0 = pos;
13511 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13512 var1->inst_c0 = pos;
13519 cfg->num_varinfo = pos;
13520 if (cfg->locals_start > cfg->num_varinfo)
13521 cfg->locals_start = cfg->num_varinfo;
13525 * mono_allocate_gsharedvt_vars:
13527 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13528 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13531 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13535 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13537 for (i = 0; i < cfg->num_varinfo; ++i) {
13538 MonoInst *ins = cfg->varinfo [i];
13541 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13542 if (i >= cfg->locals_start) {
13544 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13545 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13546 ins->opcode = OP_GSHAREDVT_LOCAL;
13547 ins->inst_imm = idx;
13550 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13551 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13558 * mono_spill_global_vars:
13560 * Generate spill code for variables which are not allocated to registers,
13561 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13562 * code is generated which could be optimized by the local optimization passes.
13565 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13567 MonoBasicBlock *bb;
13569 int orig_next_vreg;
13570 guint32 *vreg_to_lvreg;
13572 guint32 i, lvregs_len, lvregs_size;
13573 gboolean dest_has_lvreg = FALSE;
13574 MonoStackType stacktypes [128];
13575 MonoInst **live_range_start, **live_range_end;
13576 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13578 *need_local_opts = FALSE;
13580 memset (spec2, 0, sizeof (spec2));
13582 /* FIXME: Move this function to mini.c */
13583 stacktypes ['i'] = STACK_PTR;
13584 stacktypes ['l'] = STACK_I8;
13585 stacktypes ['f'] = STACK_R8;
13586 #ifdef MONO_ARCH_SIMD_INTRINSICS
13587 stacktypes ['x'] = STACK_VTYPE;
13590 #if SIZEOF_REGISTER == 4
13591 /* Create MonoInsts for longs */
13592 for (i = 0; i < cfg->num_varinfo; i++) {
13593 MonoInst *ins = cfg->varinfo [i];
13595 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13596 switch (ins->type) {
13601 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13604 g_assert (ins->opcode == OP_REGOFFSET);
13606 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13608 tree->opcode = OP_REGOFFSET;
13609 tree->inst_basereg = ins->inst_basereg;
13610 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13612 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13614 tree->opcode = OP_REGOFFSET;
13615 tree->inst_basereg = ins->inst_basereg;
13616 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13626 if (cfg->compute_gc_maps) {
13627 /* registers need liveness info even for !non refs */
13628 for (i = 0; i < cfg->num_varinfo; i++) {
13629 MonoInst *ins = cfg->varinfo [i];
13631 if (ins->opcode == OP_REGVAR)
13632 ins->flags |= MONO_INST_GC_TRACK;
13636 /* FIXME: widening and truncation */
13639 * As an optimization, when a variable allocated to the stack is first loaded into
13640 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13641 * the variable again.
13643 orig_next_vreg = cfg->next_vreg;
13644 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13645 lvregs_size = 1024;
13646 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13650 * These arrays contain the first and last instructions accessing a given
13652 * Since we emit bblocks in the same order we process them here, and we
13653 * don't split live ranges, these will precisely describe the live range of
13654 * the variable, i.e. the instruction range where a valid value can be found
13655 * in the variables location.
13656 * The live range is computed using the liveness info computed by the liveness pass.
13657 * We can't use vmv->range, since that is an abstract live range, and we need
13658 * one which is instruction precise.
13659 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13661 /* FIXME: Only do this if debugging info is requested */
13662 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13663 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13664 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13665 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13667 /* Add spill loads/stores */
13668 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13671 if (cfg->verbose_level > 2)
13672 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13674 /* Clear vreg_to_lvreg array */
13675 for (i = 0; i < lvregs_len; i++)
13676 vreg_to_lvreg [lvregs [i]] = 0;
13680 MONO_BB_FOR_EACH_INS (bb, ins) {
13681 const char *spec = INS_INFO (ins->opcode);
13682 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13683 gboolean store, no_lvreg;
13684 int sregs [MONO_MAX_SRC_REGS];
13686 if (G_UNLIKELY (cfg->verbose_level > 2))
13687 mono_print_ins (ins);
13689 if (ins->opcode == OP_NOP)
13693 * We handle LDADDR here as well, since it can only be decomposed
13694 * when variable addresses are known.
13696 if (ins->opcode == OP_LDADDR) {
13697 MonoInst *var = (MonoInst *)ins->inst_p0;
13699 if (var->opcode == OP_VTARG_ADDR) {
13700 /* Happens on SPARC/S390 where vtypes are passed by reference */
13701 MonoInst *vtaddr = var->inst_left;
13702 if (vtaddr->opcode == OP_REGVAR) {
13703 ins->opcode = OP_MOVE;
13704 ins->sreg1 = vtaddr->dreg;
13706 else if (var->inst_left->opcode == OP_REGOFFSET) {
13707 ins->opcode = OP_LOAD_MEMBASE;
13708 ins->inst_basereg = vtaddr->inst_basereg;
13709 ins->inst_offset = vtaddr->inst_offset;
13712 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13713 /* gsharedvt arg passed by ref */
13714 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13716 ins->opcode = OP_LOAD_MEMBASE;
13717 ins->inst_basereg = var->inst_basereg;
13718 ins->inst_offset = var->inst_offset;
13719 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13720 MonoInst *load, *load2, *load3;
13721 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13722 int reg1, reg2, reg3;
13723 MonoInst *info_var = cfg->gsharedvt_info_var;
13724 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13728 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13731 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13733 g_assert (info_var);
13734 g_assert (locals_var);
13736 /* Mark the instruction used to compute the locals var as used */
13737 cfg->gsharedvt_locals_var_ins = NULL;
13739 /* Load the offset */
13740 if (info_var->opcode == OP_REGOFFSET) {
13741 reg1 = alloc_ireg (cfg);
13742 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13743 } else if (info_var->opcode == OP_REGVAR) {
13745 reg1 = info_var->dreg;
13747 g_assert_not_reached ();
13749 reg2 = alloc_ireg (cfg);
13750 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13751 /* Load the locals area address */
13752 reg3 = alloc_ireg (cfg);
13753 if (locals_var->opcode == OP_REGOFFSET) {
13754 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13755 } else if (locals_var->opcode == OP_REGVAR) {
13756 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13758 g_assert_not_reached ();
13760 /* Compute the address */
13761 ins->opcode = OP_PADD;
13765 mono_bblock_insert_before_ins (bb, ins, load3);
13766 mono_bblock_insert_before_ins (bb, load3, load2);
13768 mono_bblock_insert_before_ins (bb, load2, load);
13770 g_assert (var->opcode == OP_REGOFFSET);
13772 ins->opcode = OP_ADD_IMM;
13773 ins->sreg1 = var->inst_basereg;
13774 ins->inst_imm = var->inst_offset;
13777 *need_local_opts = TRUE;
13778 spec = INS_INFO (ins->opcode);
13781 if (ins->opcode < MONO_CEE_LAST) {
13782 mono_print_ins (ins);
13783 g_assert_not_reached ();
13787 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13791 if (MONO_IS_STORE_MEMBASE (ins)) {
13792 tmp_reg = ins->dreg;
13793 ins->dreg = ins->sreg2;
13794 ins->sreg2 = tmp_reg;
13797 spec2 [MONO_INST_DEST] = ' ';
13798 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13799 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13800 spec2 [MONO_INST_SRC3] = ' ';
13802 } else if (MONO_IS_STORE_MEMINDEX (ins))
13803 g_assert_not_reached ();
13808 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13809 printf ("\t %.3s %d", spec, ins->dreg);
13810 num_sregs = mono_inst_get_src_registers (ins, sregs);
13811 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13812 printf (" %d", sregs [srcindex]);
13819 regtype = spec [MONO_INST_DEST];
13820 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13823 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13824 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13825 MonoInst *store_ins;
13827 MonoInst *def_ins = ins;
13828 int dreg = ins->dreg; /* The original vreg */
13830 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13832 if (var->opcode == OP_REGVAR) {
13833 ins->dreg = var->dreg;
13834 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13836 * Instead of emitting a load+store, use a _membase opcode.
13838 g_assert (var->opcode == OP_REGOFFSET);
13839 if (ins->opcode == OP_MOVE) {
13843 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13844 ins->inst_basereg = var->inst_basereg;
13845 ins->inst_offset = var->inst_offset;
13848 spec = INS_INFO (ins->opcode);
13852 g_assert (var->opcode == OP_REGOFFSET);
13854 prev_dreg = ins->dreg;
13856 /* Invalidate any previous lvreg for this vreg */
13857 vreg_to_lvreg [ins->dreg] = 0;
13861 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13863 store_opcode = OP_STOREI8_MEMBASE_REG;
13866 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13868 #if SIZEOF_REGISTER != 8
13869 if (regtype == 'l') {
13870 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13871 mono_bblock_insert_after_ins (bb, ins, store_ins);
13872 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13873 mono_bblock_insert_after_ins (bb, ins, store_ins);
13874 def_ins = store_ins;
13879 g_assert (store_opcode != OP_STOREV_MEMBASE);
13881 /* Try to fuse the store into the instruction itself */
13882 /* FIXME: Add more instructions */
13883 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13884 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13885 ins->inst_imm = ins->inst_c0;
13886 ins->inst_destbasereg = var->inst_basereg;
13887 ins->inst_offset = var->inst_offset;
13888 spec = INS_INFO (ins->opcode);
13889 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13890 ins->opcode = store_opcode;
13891 ins->inst_destbasereg = var->inst_basereg;
13892 ins->inst_offset = var->inst_offset;
13896 tmp_reg = ins->dreg;
13897 ins->dreg = ins->sreg2;
13898 ins->sreg2 = tmp_reg;
13901 spec2 [MONO_INST_DEST] = ' ';
13902 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13903 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13904 spec2 [MONO_INST_SRC3] = ' ';
13906 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13907 // FIXME: The backends expect the base reg to be in inst_basereg
13908 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13910 ins->inst_basereg = var->inst_basereg;
13911 ins->inst_offset = var->inst_offset;
13912 spec = INS_INFO (ins->opcode);
13914 /* printf ("INS: "); mono_print_ins (ins); */
13915 /* Create a store instruction */
13916 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13918 /* Insert it after the instruction */
13919 mono_bblock_insert_after_ins (bb, ins, store_ins);
13921 def_ins = store_ins;
13924 * We can't assign ins->dreg to var->dreg here, since the
13925 * sregs could use it. So set a flag, and do it after
13928 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13929 dest_has_lvreg = TRUE;
13934 if (def_ins && !live_range_start [dreg]) {
13935 live_range_start [dreg] = def_ins;
13936 live_range_start_bb [dreg] = bb;
13939 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13942 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13943 tmp->inst_c1 = dreg;
13944 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13951 num_sregs = mono_inst_get_src_registers (ins, sregs);
13952 for (srcindex = 0; srcindex < 3; ++srcindex) {
13953 regtype = spec [MONO_INST_SRC1 + srcindex];
13954 sreg = sregs [srcindex];
13956 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13957 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13958 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13959 MonoInst *use_ins = ins;
13960 MonoInst *load_ins;
13961 guint32 load_opcode;
13963 if (var->opcode == OP_REGVAR) {
13964 sregs [srcindex] = var->dreg;
13965 //mono_inst_set_src_registers (ins, sregs);
13966 live_range_end [sreg] = use_ins;
13967 live_range_end_bb [sreg] = bb;
13969 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13972 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13973 /* var->dreg is a hreg */
13974 tmp->inst_c1 = sreg;
13975 mono_bblock_insert_after_ins (bb, ins, tmp);
13981 g_assert (var->opcode == OP_REGOFFSET);
13983 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13985 g_assert (load_opcode != OP_LOADV_MEMBASE);
13987 if (vreg_to_lvreg [sreg]) {
13988 g_assert (vreg_to_lvreg [sreg] != -1);
13990 /* The variable is already loaded to an lvreg */
13991 if (G_UNLIKELY (cfg->verbose_level > 2))
13992 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13993 sregs [srcindex] = vreg_to_lvreg [sreg];
13994 //mono_inst_set_src_registers (ins, sregs);
13998 /* Try to fuse the load into the instruction */
13999 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14000 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14001 sregs [0] = var->inst_basereg;
14002 //mono_inst_set_src_registers (ins, sregs);
14003 ins->inst_offset = var->inst_offset;
14004 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14005 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14006 sregs [1] = var->inst_basereg;
14007 //mono_inst_set_src_registers (ins, sregs);
14008 ins->inst_offset = var->inst_offset;
14010 if (MONO_IS_REAL_MOVE (ins)) {
14011 ins->opcode = OP_NOP;
14014 //printf ("%d ", srcindex); mono_print_ins (ins);
14016 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14018 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14019 if (var->dreg == prev_dreg) {
14021 * sreg refers to the value loaded by the load
14022 * emitted below, but we need to use ins->dreg
14023 * since it refers to the store emitted earlier.
14027 g_assert (sreg != -1);
14028 vreg_to_lvreg [var->dreg] = sreg;
14029 if (lvregs_len >= lvregs_size) {
14030 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14031 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14032 lvregs = new_lvregs;
14035 lvregs [lvregs_len ++] = var->dreg;
14039 sregs [srcindex] = sreg;
14040 //mono_inst_set_src_registers (ins, sregs);
14042 #if SIZEOF_REGISTER != 8
14043 if (regtype == 'l') {
14044 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14045 mono_bblock_insert_before_ins (bb, ins, load_ins);
14046 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14047 mono_bblock_insert_before_ins (bb, ins, load_ins);
14048 use_ins = load_ins;
14053 #if SIZEOF_REGISTER == 4
14054 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14056 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14057 mono_bblock_insert_before_ins (bb, ins, load_ins);
14058 use_ins = load_ins;
14062 if (var->dreg < orig_next_vreg) {
14063 live_range_end [var->dreg] = use_ins;
14064 live_range_end_bb [var->dreg] = bb;
14067 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14070 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14071 tmp->inst_c1 = var->dreg;
14072 mono_bblock_insert_after_ins (bb, ins, tmp);
14076 mono_inst_set_src_registers (ins, sregs);
14078 if (dest_has_lvreg) {
14079 g_assert (ins->dreg != -1);
14080 vreg_to_lvreg [prev_dreg] = ins->dreg;
14081 if (lvregs_len >= lvregs_size) {
14082 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14083 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14084 lvregs = new_lvregs;
14087 lvregs [lvregs_len ++] = prev_dreg;
14088 dest_has_lvreg = FALSE;
14092 tmp_reg = ins->dreg;
14093 ins->dreg = ins->sreg2;
14094 ins->sreg2 = tmp_reg;
14097 if (MONO_IS_CALL (ins)) {
14098 /* Clear vreg_to_lvreg array */
14099 for (i = 0; i < lvregs_len; i++)
14100 vreg_to_lvreg [lvregs [i]] = 0;
14102 } else if (ins->opcode == OP_NOP) {
14104 MONO_INST_NULLIFY_SREGS (ins);
14107 if (cfg->verbose_level > 2)
14108 mono_print_ins_index (1, ins);
14111 /* Extend the live range based on the liveness info */
14112 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14113 for (i = 0; i < cfg->num_varinfo; i ++) {
14114 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14116 if (vreg_is_volatile (cfg, vi->vreg))
14117 /* The liveness info is incomplete */
14120 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14121 /* Live from at least the first ins of this bb */
14122 live_range_start [vi->vreg] = bb->code;
14123 live_range_start_bb [vi->vreg] = bb;
14126 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14127 /* Live at least until the last ins of this bb */
14128 live_range_end [vi->vreg] = bb->last_ins;
14129 live_range_end_bb [vi->vreg] = bb;
14136 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14137 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14139 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14140 for (i = 0; i < cfg->num_varinfo; ++i) {
14141 int vreg = MONO_VARINFO (cfg, i)->vreg;
14144 if (live_range_start [vreg]) {
14145 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14147 ins->inst_c1 = vreg;
14148 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14150 if (live_range_end [vreg]) {
14151 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14153 ins->inst_c1 = vreg;
14154 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14155 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14157 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14162 if (cfg->gsharedvt_locals_var_ins) {
14163 /* Nullify if unused */
14164 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14165 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14168 g_free (live_range_start);
14169 g_free (live_range_end);
14170 g_free (live_range_start_bb);
14171 g_free (live_range_end_bb);
14177 * - use 'iadd' instead of 'int_add'
14178 * - handling ovf opcodes: decompose in method_to_ir.
14179 * - unify iregs/fregs
14180 * -> partly done, the missing parts are:
14181 * - a more complete unification would involve unifying the hregs as well, so
14182 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14183 * would no longer map to the machine hregs, so the code generators would need to
14184 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14185 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14186 * fp/non-fp branches speeds it up by about 15%.
14187 * - use sext/zext opcodes instead of shifts
14189 * - get rid of TEMPLOADs if possible and use vregs instead
14190 * - clean up usage of OP_P/OP_ opcodes
14191 * - cleanup usage of DUMMY_USE
14192 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14194 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14195 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14196 * - make sure handle_stack_args () is called before the branch is emitted
14197 * - when the new IR is done, get rid of all unused stuff
14198 * - COMPARE/BEQ as separate instructions or unify them ?
14199 * - keeping them separate allows specialized compare instructions like
14200 * compare_imm, compare_membase
14201 * - most back ends unify fp compare+branch, fp compare+ceq
14202 * - integrate mono_save_args into inline_method
14203 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14204 * - handle long shift opts on 32 bit platforms somehow: they require
14205 * 3 sregs (2 for arg1 and 1 for arg2)
14206 * - make byref a 'normal' type.
14207 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14208 * variable if needed.
14209 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14210 * like inline_method.
14211 * - remove inlining restrictions
14212 * - fix LNEG and enable cfold of INEG
14213 * - generalize x86 optimizations like ldelema as a peephole optimization
14214 * - add store_mem_imm for amd64
14215 * - optimize the loading of the interruption flag in the managed->native wrappers
14216 * - avoid special handling of OP_NOP in passes
14217 * - move code inserting instructions into one function/macro.
14218 * - try a coalescing phase after liveness analysis
14219 * - add float -> vreg conversion + local optimizations on !x86
14220 * - figure out how to handle decomposed branches during optimizations, ie.
14221 * compare+branch, op_jump_table+op_br etc.
14222 * - promote RuntimeXHandles to vregs
14223 * - vtype cleanups:
14224 * - add a NEW_VARLOADA_VREG macro
14225 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14226 * accessing vtype fields.
14227 * - get rid of I8CONST on 64 bit platforms
14228 * - dealing with the increase in code size due to branches created during opcode
14230 * - use extended basic blocks
14231 * - all parts of the JIT
14232 * - handle_global_vregs () && local regalloc
14233 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14234 * - sources of increase in code size:
14237 * - isinst and castclass
14238 * - lvregs not allocated to global registers even if used multiple times
14239 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14241 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14242 * - add all micro optimizations from the old JIT
14243 * - put tree optimizations into the deadce pass
14244 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14245 * specific function.
14246 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14247 * fcompare + branchCC.
14248 * - create a helper function for allocating a stack slot, taking into account
14249 * MONO_CFG_HAS_SPILLUP.
14251 * - optimize mono_regstate2_alloc_int/float.
14252 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14253 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14254 * parts of the tree could be separated by other instructions, killing the tree
14255 * arguments, or stores killing loads etc. Also, should we fold loads into other
14256 * instructions if the result of the load is used multiple times ?
14257 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14258 * - LAST MERGE: 108395.
14259 * - when returning vtypes in registers, generate IR and append it to the end of the
14260 * last bb instead of doing it in the epilog.
14261 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14269 - When to decompose opcodes:
14270 - earlier: this makes some optimizations hard to implement, since the low level IR
14271 no longer contains the neccessary information. But it is easier to do.
14272 - later: harder to implement, enables more optimizations.
14273 - Branches inside bblocks:
14274 - created when decomposing complex opcodes.
14275 - branches to another bblock: harmless, but not tracked by the branch
14276 optimizations, so need to branch to a label at the start of the bblock.
14277 - branches to inside the same bblock: very problematic, trips up the local
14278 reg allocator. Can be fixed by spitting the current bblock, but that is a
14279 complex operation, since some local vregs can become global vregs etc.
14280 - Local/global vregs:
14281 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14282 local register allocator.
14283 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14284 structure, created by mono_create_var (). Assigned to hregs or the stack by
14285 the global register allocator.
14286 - When to do optimizations like alu->alu_imm:
14287 - earlier -> saves work later on since the IR will be smaller/simpler
14288 - later -> can work on more instructions
14289 - Handling of valuetypes:
14290 - When a vtype is pushed on the stack, a new temporary is created, an
14291 instruction computing its address (LDADDR) is emitted and pushed on
14292 the stack. Need to optimize cases when the vtype is used immediately as in
14293 argument passing, stloc etc.
14294 - Instead of the to_end stuff in the old JIT, simply call the function handling
14295 the values on the stack before emitting the last instruction of the bb.
14298 #endif /* !DISABLE_JIT */