3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1165 ldind_to_type (int op)
1168 case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1169 case CEE_LDIND_U1: return mono_defaults.byte_class;
1170 case CEE_LDIND_I2: return mono_defaults.int16_class;
1171 case CEE_LDIND_U2: return mono_defaults.uint16_class;
1172 case CEE_LDIND_I4: return mono_defaults.int32_class;
1173 case CEE_LDIND_U4: return mono_defaults.uint32_class;
1174 case CEE_LDIND_I8: return mono_defaults.int64_class;
1175 case CEE_LDIND_I: return mono_defaults.int_class;
1176 case CEE_LDIND_R4: return mono_defaults.single_class;
1177 case CEE_LDIND_R8: return mono_defaults.double_class;
1178 case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1179 default: g_error ("Unknown ldind type %d", op);
1186 param_table [STACK_MAX] [STACK_MAX] = {
1191 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1196 switch (args->type) {
1206 for (i = 0; i < sig->param_count; ++i) {
1207 switch (args [i].type) {
1211 if (!sig->params [i]->byref)
1215 if (sig->params [i]->byref)
1217 switch (sig->params [i]->type) {
1218 case MONO_TYPE_CLASS:
1219 case MONO_TYPE_STRING:
1220 case MONO_TYPE_OBJECT:
1221 case MONO_TYPE_SZARRAY:
1222 case MONO_TYPE_ARRAY:
1229 if (sig->params [i]->byref)
1231 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1240 /*if (!param_table [args [i].type] [sig->params [i]->type])
1248 * When we need a pointer to the current domain many times in a method, we
1249 * call mono_domain_get() once and we store the result in a local variable.
1250 * This function returns the variable that represents the MonoDomain*.
1252 inline static MonoInst *
1253 mono_get_domainvar (MonoCompile *cfg)
1255 if (!cfg->domainvar)
1256 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 return cfg->domainvar;
1261 * The got_var contains the address of the Global Offset Table when AOT
1265 mono_get_got_var (MonoCompile *cfg)
1267 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1269 if (!cfg->got_var) {
1270 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 return cfg->got_var;
1276 mono_create_rgctx_var (MonoCompile *cfg)
1278 if (!cfg->rgctx_var) {
1279 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1280 /* force the var to be stack allocated */
1281 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1286 mono_get_vtable_var (MonoCompile *cfg)
1288 g_assert (cfg->gshared);
1290 mono_create_rgctx_var (cfg);
1292 return cfg->rgctx_var;
1296 type_from_stack_type (MonoInst *ins) {
1297 switch (ins->type) {
1298 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1299 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1300 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1301 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1302 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1304 return &ins->klass->this_arg;
1305 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1306 case STACK_VTYPE: return &ins->klass->byval_arg;
1308 g_error ("stack type %d to monotype not handled\n", ins->type);
1313 static G_GNUC_UNUSED int
1314 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1316 t = mono_type_get_underlying_type (t);
1328 case MONO_TYPE_FNPTR:
1330 case MONO_TYPE_CLASS:
1331 case MONO_TYPE_STRING:
1332 case MONO_TYPE_OBJECT:
1333 case MONO_TYPE_SZARRAY:
1334 case MONO_TYPE_ARRAY:
1340 return cfg->r4_stack_type;
1343 case MONO_TYPE_VALUETYPE:
1344 case MONO_TYPE_TYPEDBYREF:
1346 case MONO_TYPE_GENERICINST:
1347 if (mono_type_generic_inst_is_valuetype (t))
1353 g_assert_not_reached ();
1360 array_access_to_klass (int opcode)
1364 return mono_defaults.byte_class;
1366 return mono_defaults.uint16_class;
1369 return mono_defaults.int_class;
1372 return mono_defaults.sbyte_class;
1375 return mono_defaults.int16_class;
1378 return mono_defaults.int32_class;
1380 return mono_defaults.uint32_class;
1383 return mono_defaults.int64_class;
1386 return mono_defaults.single_class;
1389 return mono_defaults.double_class;
1390 case CEE_LDELEM_REF:
1391 case CEE_STELEM_REF:
1392 return mono_defaults.object_class;
1394 g_assert_not_reached ();
1400 * We try to share variables when possible
1403 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1408 /* inlining can result in deeper stacks */
1409 if (slot >= cfg->header->max_stack)
1410 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 pos = ins->type - 1 + slot * STACK_MAX;
1414 switch (ins->type) {
1421 if ((vnum = cfg->intvars [pos]))
1422 return cfg->varinfo [vnum];
1423 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1424 cfg->intvars [pos] = res->inst_c0;
1427 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1433 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 * Don't use this if a generic_context is set, since that means AOT can't
1437 * look up the method using just the image+token.
1438 * table == 0 means this is a reference made from a wrapper.
1440 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1441 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1442 jump_info_token->image = image;
1443 jump_info_token->token = token;
1444 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1449 * This function is called to handle items that are left on the evaluation stack
1450 * at basic block boundaries. What happens is that we save the values to local variables
1451 * and we reload them later when first entering the target basic block (with the
1452 * handle_loaded_temps () function).
1453 * A single joint point will use the same variables (stored in the array bb->out_stack or
1454 * bb->in_stack, if the basic block is before or after the joint point).
1456 * This function needs to be called _before_ emitting the last instruction of
1457 * the bb (i.e. before emitting a branch).
1458 * If the stack merge fails at a join point, cfg->unverifiable is set.
1461 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 MonoBasicBlock *bb = cfg->cbb;
1465 MonoBasicBlock *outb;
1466 MonoInst *inst, **locals;
1471 if (cfg->verbose_level > 3)
1472 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1473 if (!bb->out_scount) {
1474 bb->out_scount = count;
1475 //printf ("bblock %d has out:", bb->block_num);
1477 for (i = 0; i < bb->out_count; ++i) {
1478 outb = bb->out_bb [i];
1479 /* exception handlers are linked, but they should not be considered for stack args */
1480 if (outb->flags & BB_EXCEPTION_HANDLER)
1482 //printf (" %d", outb->block_num);
1483 if (outb->in_stack) {
1485 bb->out_stack = outb->in_stack;
1491 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1492 for (i = 0; i < count; ++i) {
1494 * try to reuse temps already allocated for this purpouse, if they occupy the same
1495 * stack slot and if they are of the same type.
1496 * This won't cause conflicts since if 'local' is used to
1497 * store one of the values in the in_stack of a bblock, then
1498 * the same variable will be used for the same outgoing stack
1500 * This doesn't work when inlining methods, since the bblocks
1501 * in the inlined methods do not inherit their in_stack from
1502 * the bblock they are inlined to. See bug #58863 for an
1505 if (cfg->inlined_method)
1506 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1508 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1513 for (i = 0; i < bb->out_count; ++i) {
1514 outb = bb->out_bb [i];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER)
1518 if (outb->in_scount) {
1519 if (outb->in_scount != bb->out_scount) {
1520 cfg->unverifiable = TRUE;
1523 continue; /* check they are the same locals */
1525 outb->in_scount = count;
1526 outb->in_stack = bb->out_stack;
1529 locals = bb->out_stack;
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1540 * It is possible that the out bblocks already have in_stack assigned, and
1541 * the in_stacks differ. In this case, we will store to all the different
1548 /* Find a bblock which has a different in_stack */
1550 while (bindex < bb->out_count) {
1551 outb = bb->out_bb [bindex];
1552 /* exception handlers are linked, but they should not be considered for stack args */
1553 if (outb->flags & BB_EXCEPTION_HANDLER) {
1557 if (outb->in_stack != locals) {
1558 for (i = 0; i < count; ++i) {
1559 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1560 inst->cil_code = sp [i]->cil_code;
1561 sp [i] = locals [i];
1562 if (cfg->verbose_level > 3)
1563 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1565 locals = outb->in_stack;
1575 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1579 if (cfg->compile_aot) {
1580 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1586 ji.type = patch_type;
1587 ji.data.target = data;
1588 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1589 mono_error_assert_ok (&error);
1591 EMIT_NEW_PCONST (cfg, ins, target);
1597 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1599 int tls_offset = mono_tls_get_tls_offset (key);
1601 if (cfg->compile_aot)
1604 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1606 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1607 ins->dreg = mono_alloc_preg (cfg);
1608 ins->inst_offset = tls_offset;
1615 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1617 int tls_offset = mono_tls_get_tls_offset (key);
1619 if (cfg->compile_aot)
1622 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1624 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1625 ins->sreg1 = value->dreg;
1626 ins->inst_offset = tls_offset;
1634 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1636 MonoInst *fast_tls = NULL;
1638 if (!mini_get_debug_options ()->use_fallback_tls)
1639 fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 MONO_ADD_INS (cfg->cbb, fast_tls);
1646 if (cfg->compile_aot) {
1649 * tls getters are critical pieces of code and we don't want to resolve them
1650 * through the standard plt/tramp mechanism since we might expose ourselves
1651 * to crashes and infinite recursions.
1653 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1654 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1656 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1657 return mono_emit_jit_icall (cfg, getter, NULL);
1662 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1664 MonoInst *fast_tls = NULL;
1666 if (!mini_get_debug_options ()->use_fallback_tls)
1667 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 MONO_ADD_INS (cfg->cbb, fast_tls);
1674 if (cfg->compile_aot) {
1676 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1677 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1679 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1680 return mono_emit_jit_icall (cfg, setter, &value);
1687 * Emit IR to push the current LMF onto the LMF stack.
1690 emit_push_lmf (MonoCompile *cfg)
1693 * Emit IR to push the LMF:
1694 * lmf_addr = <lmf_addr from tls>
1695 * lmf->lmf_addr = lmf_addr
1696 * lmf->prev_lmf = *lmf_addr
1699 MonoInst *ins, *lmf_ins;
1704 int lmf_reg, prev_lmf_reg;
1706 * Store lmf_addr in a variable, so it can be allocated to a global register.
1708 if (!cfg->lmf_addr_var)
1709 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1714 int jit_tls_dreg = ins->dreg;
1716 lmf_reg = alloc_preg (cfg);
1717 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1719 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1724 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1725 lmf_reg = ins->dreg;
1727 prev_lmf_reg = alloc_preg (cfg);
1728 /* Save previous_lmf */
1729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1730 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1738 * Emit IR to pop the current LMF from the LMF stack.
1741 emit_pop_lmf (MonoCompile *cfg)
1743 int lmf_reg, lmf_addr_reg;
1749 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1750 lmf_reg = ins->dreg;
1754 * Emit IR to pop the LMF:
1755 * *(lmf->lmf_addr) = lmf->prev_lmf
1757 /* This could be called before emit_push_lmf () */
1758 if (!cfg->lmf_addr_var)
1759 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1760 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1762 prev_lmf_reg = alloc_preg (cfg);
1763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1764 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1768 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1771 type = mini_get_underlying_type (type);
1772 switch (type->type) {
1773 case MONO_TYPE_VOID:
1774 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1781 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1785 case MONO_TYPE_FNPTR:
1786 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1787 case MONO_TYPE_CLASS:
1788 case MONO_TYPE_STRING:
1789 case MONO_TYPE_OBJECT:
1790 case MONO_TYPE_SZARRAY:
1791 case MONO_TYPE_ARRAY:
1792 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1795 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1798 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1800 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = mono_class_enum_basetype (type->data.klass);
1808 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 case MONO_TYPE_MVAR:
1817 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1819 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1824 //XXX this ignores if t is byref
1825 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1828 * target_type_is_incompatible:
1829 * @cfg: MonoCompile context
1831 * Check that the item @arg on the evaluation stack can be stored
1832 * in the target type (can be a local, or field, etc).
1833 * The cfg arg can be used to check if we need verification or just
1836 * Returns: non-0 value if arg can't be stored on a target.
1839 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1841 MonoType *simple_type;
1844 if (target->byref) {
1845 /* FIXME: check that the pointed to types match */
1846 if (arg->type == STACK_MP) {
1847 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1848 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1849 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1851 /* if the target is native int& or same type */
1852 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1855 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1856 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1857 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1861 if (arg->type == STACK_PTR)
1866 simple_type = mini_get_underlying_type (target);
1867 switch (simple_type->type) {
1868 case MONO_TYPE_VOID:
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1880 /* STACK_MP is needed when setting pinned locals */
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1886 case MONO_TYPE_FNPTR:
1888 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1889 * in native int. (#688008).
1891 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1894 case MONO_TYPE_CLASS:
1895 case MONO_TYPE_STRING:
1896 case MONO_TYPE_OBJECT:
1897 case MONO_TYPE_SZARRAY:
1898 case MONO_TYPE_ARRAY:
1899 if (arg->type != STACK_OBJ)
1901 /* FIXME: check type compatibility */
1905 if (arg->type != STACK_I8)
1909 if (arg->type != cfg->r4_stack_type)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 MonoClass *target_class;
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 target_class = mono_class_from_mono_type (target);
1937 /* The second cases is needed when doing partial sharing */
1938 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1942 if (arg->type != STACK_OBJ)
1944 /* FIXME: check type compatibility */
1948 case MONO_TYPE_MVAR:
1949 g_assert (cfg->gshared);
1950 if (mini_type_var_is_vt (simple_type)) {
1951 if (arg->type != STACK_VTYPE)
1954 if (arg->type != STACK_OBJ)
1959 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1965 * Prepare arguments for passing to a function call.
1966 * Return a non-zero value if the arguments can't be passed to the given
1968 * The type checks are not yet complete and some conversions may need
1969 * casts on 32 or 64 bit architectures.
1971 * FIXME: implement this using target_type_is_incompatible ()
1974 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1976 MonoType *simple_type;
1980 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1984 for (i = 0; i < sig->param_count; ++i) {
1985 if (sig->params [i]->byref) {
1986 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1990 simple_type = mini_get_underlying_type (sig->params [i]);
1992 switch (simple_type->type) {
1993 case MONO_TYPE_VOID:
2002 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2008 case MONO_TYPE_FNPTR:
2009 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 if (args [i]->type != STACK_OBJ)
2022 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != cfg->r4_stack_type)
2030 if (args [i]->type != STACK_R8)
2033 case MONO_TYPE_VALUETYPE:
2034 if (simple_type->data.klass->enumtype) {
2035 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_TYPEDBYREF:
2042 if (args [i]->type != STACK_VTYPE)
2045 case MONO_TYPE_GENERICINST:
2046 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2049 case MONO_TYPE_MVAR:
2051 if (args [i]->type != STACK_VTYPE)
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2066 case OP_CALL_MEMBASE:
2068 case OP_VOIDCALL_MEMBASE:
2070 case OP_FCALL_MEMBASE:
2072 case OP_RCALL_MEMBASE:
2074 case OP_VCALL_MEMBASE:
2076 case OP_LCALL_MEMBASE:
2079 g_assert_not_reached ();
2086 callvirt_to_call_reg (int opcode)
2089 case OP_CALL_MEMBASE:
2091 case OP_VOIDCALL_MEMBASE:
2092 return OP_VOIDCALL_REG;
2093 case OP_FCALL_MEMBASE:
2094 return OP_FCALL_REG;
2095 case OP_RCALL_MEMBASE:
2096 return OP_RCALL_REG;
2097 case OP_VCALL_MEMBASE:
2098 return OP_VCALL_REG;
2099 case OP_LCALL_MEMBASE:
2100 return OP_LCALL_REG;
2102 g_assert_not_reached ();
2108 /* Either METHOD or IMT_ARG needs to be set */
2110 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2114 if (COMPILE_LLVM (cfg)) {
2116 method_reg = alloc_preg (cfg);
2117 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2119 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2120 method_reg = ins->dreg;
2124 call->imt_arg_reg = method_reg;
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2131 method_reg = alloc_preg (cfg);
2132 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2134 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2135 method_reg = ins->dreg;
2138 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2154 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2157 return mono_class_check_context_used (klass);
2163 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2166 return mono_method_check_context_used (method);
2172 * check_method_sharing:
2174 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2177 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2179 gboolean pass_vtable = FALSE;
2180 gboolean pass_mrgctx = FALSE;
2182 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2183 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2184 gboolean sharable = FALSE;
2186 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2190 * Pass vtable iff target method might
2191 * be shared, which means that sharing
2192 * is enabled for its class and its
2193 * context is sharable (and it's not a
2196 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2200 if (mini_method_get_context (cmethod) &&
2201 mini_method_get_context (cmethod)->method_inst) {
2202 g_assert (!pass_vtable);
2204 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2207 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2212 if (out_pass_vtable)
2213 *out_pass_vtable = pass_vtable;
2214 if (out_pass_mrgctx)
2215 *out_pass_mrgctx = pass_mrgctx;
2218 inline static MonoCallInst *
2219 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2220 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
2224 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2232 mini_profiler_emit_tail_call (cfg, target);
2234 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2236 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2239 call->signature = sig;
2240 call->rgctx_reg = rgctx;
2241 sig_ret = mini_get_underlying_type (sig->ret);
2243 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2246 if (mini_type_is_vtype (sig_ret)) {
2247 call->vret_var = cfg->vret_addr;
2248 //g_assert_not_reached ();
2250 } else if (mini_type_is_vtype (sig_ret)) {
2251 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2254 temp->backend.is_pinvoke = sig->pinvoke;
2257 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2258 * address of return value to increase optimization opportunities.
2259 * Before vtype decomposition, the dreg of the call ins itself represents the
2260 * fact the call modifies the return value. After decomposition, the call will
2261 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2262 * will be transformed into an LDADDR.
2264 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2265 loada->dreg = alloc_preg (cfg);
2266 loada->inst_p0 = temp;
2267 /* We reference the call too since call->dreg could change during optimization */
2268 loada->inst_p1 = call;
2269 MONO_ADD_INS (cfg->cbb, loada);
2271 call->inst.dreg = temp->dreg;
2273 call->vret_var = loada;
2274 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2275 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2277 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2278 if (COMPILE_SOFT_FLOAT (cfg)) {
2280 * If the call has a float argument, we would need to do an r8->r4 conversion using
2281 * an icall, but that cannot be done during the call sequence since it would clobber
2282 * the call registers + the stack. So we do it before emitting the call.
2284 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2286 MonoInst *in = call->args [i];
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mono_type_get_underlying_type (t);
2294 if (!t->byref && t->type == MONO_TYPE_R4) {
2295 MonoInst *iargs [1];
2299 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2301 /* The result will be in an int vreg */
2302 call->args [i] = conv;
2308 call->need_unbox_trampoline = unbox_trampoline;
2311 if (COMPILE_LLVM (cfg))
2312 mono_llvm_emit_call (cfg, call);
2314 mono_arch_emit_call (cfg, call);
2316 mono_arch_emit_call (cfg, call);
2319 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2320 cfg->flags |= MONO_CFG_HAS_CALLS;
2326 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2328 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2329 cfg->uses_rgctx_reg = TRUE;
2330 call->rgctx_reg = TRUE;
2332 call->rgctx_arg_reg = rgctx_reg;
2337 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2342 gboolean check_sp = FALSE;
2344 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2345 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2347 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2352 rgctx_reg = mono_alloc_preg (cfg);
2353 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2357 if (!cfg->stack_inbalance_var)
2358 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2360 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2361 ins->dreg = cfg->stack_inbalance_var->dreg;
2362 MONO_ADD_INS (cfg->cbb, ins);
2365 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
2367 call->inst.sreg1 = addr->dreg;
2370 emit_imt_argument (cfg, call, NULL, imt_arg);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 sp_reg = mono_alloc_preg (cfg);
2379 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2381 MONO_ADD_INS (cfg->cbb, ins);
2383 /* Restore the stack so we don't crash when throwing the exception */
2384 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2385 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2386 MONO_ADD_INS (cfg->cbb, ins);
2388 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2389 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2393 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2395 return (MonoInst*)call;
2399 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2402 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2403 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2405 #ifndef DISABLE_REMOTING
2406 gboolean might_be_remote = FALSE;
2408 gboolean virtual_ = this_ins != NULL;
2409 gboolean enable_for_aot = TRUE;
2412 MonoInst *call_target = NULL;
2414 gboolean need_unbox_trampoline;
2417 sig = mono_method_signature (method);
2419 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2420 g_assert_not_reached ();
2423 rgctx_reg = mono_alloc_preg (cfg);
2424 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2427 if (method->string_ctor) {
2428 /* Create the real signature */
2429 /* FIXME: Cache these */
2430 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2431 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2436 context_used = mini_method_check_context_used (cfg, method);
2438 #ifndef DISABLE_REMOTING
2439 might_be_remote = this_ins && sig->hasthis &&
2440 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2441 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2443 if (might_be_remote && context_used) {
2446 g_assert (cfg->gshared);
2448 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2450 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2454 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2455 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2457 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2459 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
2461 #ifndef DISABLE_REMOTING
2462 if (might_be_remote)
2463 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2466 call->method = method;
2467 call->inst.flags |= MONO_INST_HAS_METHOD;
2468 call->inst.inst_left = this_ins;
2469 call->tail_call = tail;
2472 int vtable_reg, slot_reg, this_reg;
2475 this_reg = this_ins->dreg;
2477 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2478 MonoInst *dummy_use;
2480 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2482 /* Make a call to delegate->invoke_impl */
2483 call->inst.inst_basereg = this_reg;
2484 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2485 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2487 /* We must emit a dummy use here because the delegate trampoline will
2488 replace the 'this' argument with the delegate target making this activation
2489 no longer a root for the delegate.
2490 This is an issue for delegates that target collectible code such as dynamic
2491 methods of GC'able assemblies.
2493 For a test case look into #667921.
2495 FIXME: a dummy use is not the best way to do it as the local register allocator
2496 will put it on a caller save register and spil it around the call.
2497 Ideally, we would either put it on a callee save register or only do the store part.
2499 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2501 return (MonoInst*)call;
2504 if ((!cfg->compile_aot || enable_for_aot) &&
2505 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2506 (MONO_METHOD_IS_FINAL (method) &&
2507 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2508 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2510 * the method is not virtual, we just need to ensure this is not null
2511 * and then we can call the method directly.
2513 #ifndef DISABLE_REMOTING
2514 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2516 * The check above ensures method is not gshared, this is needed since
2517 * gshared methods can't have wrappers.
2519 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2523 if (!method->string_ctor)
2524 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2526 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2527 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2529 * the method is virtual, but we can statically dispatch since either
2530 * it's class or the method itself are sealed.
2531 * But first we need to ensure it's not a null reference.
2533 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2535 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2536 } else if (call_target) {
2537 vtable_reg = alloc_preg (cfg);
2538 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2540 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2541 call->inst.sreg1 = call_target->dreg;
2542 call->inst.flags &= !MONO_INST_HAS_METHOD;
2544 vtable_reg = alloc_preg (cfg);
2545 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2546 if (mono_class_is_interface (method->klass)) {
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2552 slot_reg = vtable_reg;
2553 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2554 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2556 g_assert (mono_method_signature (method)->generic_param_count);
2557 emit_imt_argument (cfg, call, call->method, imt_arg);
2561 call->inst.sreg1 = slot_reg;
2562 call->inst.inst_offset = offset;
2563 call->is_virtual = TRUE;
2567 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2572 return (MonoInst*)call;
2576 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2578 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2582 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2589 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
2592 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2594 return (MonoInst*)call;
2598 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2600 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2604 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2608 * mono_emit_abs_call:
2610 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2612 inline static MonoInst*
2613 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2614 MonoMethodSignature *sig, MonoInst **args)
2616 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2620 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 if (cfg->abs_patches == NULL)
2624 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2625 g_hash_table_insert (cfg->abs_patches, ji, ji);
2626 ins = mono_emit_native_call (cfg, ji, sig, args);
2627 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2631 static MonoMethodSignature*
2632 sig_to_rgctx_sig (MonoMethodSignature *sig)
2634 // FIXME: memory allocation
2635 MonoMethodSignature *res;
2638 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2639 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2640 res->param_count = sig->param_count + 1;
2641 for (i = 0; i < sig->param_count; ++i)
2642 res->params [i] = sig->params [i];
2643 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2647 /* Make an indirect call to FSIG passing an additional argument */
2649 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2651 MonoMethodSignature *csig;
2652 MonoInst *args_buf [16];
2654 int i, pindex, tmp_reg;
2656 /* Make a call with an rgctx/extra arg */
2657 if (fsig->param_count + 2 < 16)
2660 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2663 args [pindex ++] = orig_args [0];
2664 for (i = 0; i < fsig->param_count; ++i)
2665 args [pindex ++] = orig_args [fsig->hasthis + i];
2666 tmp_reg = alloc_preg (cfg);
2667 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2668 csig = sig_to_rgctx_sig (fsig);
2669 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2672 /* Emit an indirect call to the function descriptor ADDR */
2674 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2676 int addr_reg, arg_reg;
2677 MonoInst *call_target;
2679 g_assert (cfg->llvm_only);
2682 * addr points to a <addr, arg> pair, load both of them, and
2683 * make a call to addr, passing arg as an extra arg.
2685 addr_reg = alloc_preg (cfg);
2686 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2687 arg_reg = alloc_preg (cfg);
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2690 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2694 direct_icalls_enabled (MonoCompile *cfg)
2698 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2700 if (cfg->compile_llvm && !cfg->llvm_only)
2703 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2709 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2712 * Call the jit icall without a wrapper if possible.
2713 * The wrapper is needed for the following reasons:
2714 * - to handle exceptions thrown using mono_raise_exceptions () from the
2715 * icall function. The EH code needs the lmf frame pushed by the
2716 * wrapper to be able to unwind back to managed code.
2717 * - to be able to do stack walks for asynchronously suspended
2718 * threads when debugging.
2720 if (info->no_raise && direct_icalls_enabled (cfg)) {
2724 if (!info->wrapper_method) {
2725 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2726 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2728 mono_memory_barrier ();
2732 * Inline the wrapper method, which is basically a call to the C icall, and
2733 * an exception check.
2735 costs = inline_method (cfg, info->wrapper_method, NULL,
2736 args, NULL, il_offset, TRUE);
2737 g_assert (costs > 0);
2738 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2742 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2747 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2749 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2750 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2754 * Native code might return non register sized integers
2755 * without initializing the upper bits.
2757 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2758 case OP_LOADI1_MEMBASE:
2759 widen_op = OP_ICONV_TO_I1;
2761 case OP_LOADU1_MEMBASE:
2762 widen_op = OP_ICONV_TO_U1;
2764 case OP_LOADI2_MEMBASE:
2765 widen_op = OP_ICONV_TO_I2;
2767 case OP_LOADU2_MEMBASE:
2768 widen_op = OP_ICONV_TO_U2;
2774 if (widen_op != -1) {
2775 int dreg = alloc_preg (cfg);
2778 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2779 widen->type = ins->type;
2790 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2792 MonoInst *args [16];
2794 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2795 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2797 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2801 mini_get_memcpy_method (void)
2803 static MonoMethod *memcpy_method = NULL;
2804 if (!memcpy_method) {
2805 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2807 g_error ("Old corlib found. Install a new one");
2809 return memcpy_method;
2813 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2815 int card_table_shift_bits;
2816 gpointer card_table_mask;
2818 MonoInst *dummy_use;
2819 int nursery_shift_bits;
2820 size_t nursery_size;
2822 if (!cfg->gen_write_barriers)
2825 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2827 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2829 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2831 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2834 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2835 wbarrier->sreg1 = ptr->dreg;
2836 wbarrier->sreg2 = value->dreg;
2837 MONO_ADD_INS (cfg->cbb, wbarrier);
2838 } else if (card_table) {
2839 int offset_reg = alloc_preg (cfg);
2844 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2845 * collector case, so, for the serial collector, it might slightly slow down nursery
2846 * collections. We also expect that the host system and the target system have the same card
2847 * table configuration, which is the case if they have the same pointer size.
2850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2851 if (card_table_mask)
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2854 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2855 * IMM's larger than 32bits.
2857 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2858 card_reg = ins->dreg;
2860 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2863 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2864 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2867 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2871 mini_get_memset_method (void)
2873 static MonoMethod *memset_method = NULL;
2874 if (!memset_method) {
2875 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2877 g_error ("Old corlib found. Install a new one");
2879 return memset_method;
2883 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2885 MonoInst *iargs [3];
2888 MonoMethod *memset_method;
2889 MonoInst *size_ins = NULL;
2890 MonoInst *bzero_ins = NULL;
2891 static MonoMethod *bzero_method;
2893 /* FIXME: Optimize this for the case when dest is an LDADDR */
2894 mono_class_init (klass);
2895 if (mini_is_gsharedvt_klass (klass)) {
2896 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2897 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2899 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
2900 g_assert (bzero_method);
2902 iargs [1] = size_ins;
2903 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
2907 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2909 n = mono_class_value_size (klass, &align);
2911 if (n <= sizeof (gpointer) * 8) {
2912 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2915 memset_method = mini_get_memset_method ();
2917 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2918 EMIT_NEW_ICONST (cfg, iargs [2], n);
2919 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2926 * Emit IR to return either the this pointer for instance method,
2927 * or the mrgctx for static methods.
2930 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2932 MonoInst *this_ins = NULL;
2934 g_assert (cfg->gshared);
2936 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2937 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2938 !method->klass->valuetype)
2939 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
2941 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2942 MonoInst *mrgctx_loc, *mrgctx_var;
2944 g_assert (!this_ins);
2945 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2947 mrgctx_loc = mono_get_vtable_var (cfg);
2948 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2951 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
2952 MonoInst *mrgctx_loc, *mrgctx_var;
2954 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
2955 mrgctx_loc = mono_get_vtable_var (cfg);
2956 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2958 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
2961 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2962 MonoInst *vtable_loc, *vtable_var;
2964 g_assert (!this_ins);
2966 vtable_loc = mono_get_vtable_var (cfg);
2967 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2969 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2970 MonoInst *mrgctx_var = vtable_var;
2973 vtable_reg = alloc_preg (cfg);
2974 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2975 vtable_var->type = STACK_PTR;
2983 vtable_reg = alloc_preg (cfg);
2984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2989 static MonoJumpInfoRgctxEntry *
2990 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2992 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2993 res->method = method;
2994 res->in_mrgctx = in_mrgctx;
2995 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2996 res->data->type = patch_type;
2997 res->data->data.target = patch_data;
2998 res->info_type = info_type;
3003 static inline MonoInst*
3004 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3006 MonoInst *args [16];
3009 // FIXME: No fastpath since the slot is not a compile time constant
3011 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3012 if (entry->in_mrgctx)
3013 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3015 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3019 * FIXME: This can be called during decompose, which is a problem since it creates
3021 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3023 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3025 MonoBasicBlock *is_null_bb, *end_bb;
3026 MonoInst *res, *ins, *call;
3029 slot = mini_get_rgctx_entry_slot (entry);
3031 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3032 index = MONO_RGCTX_SLOT_INDEX (slot);
3034 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3035 for (depth = 0; ; ++depth) {
3036 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3038 if (index < size - 1)
3043 NEW_BBLOCK (cfg, end_bb);
3044 NEW_BBLOCK (cfg, is_null_bb);
3047 rgctx_reg = rgctx->dreg;
3049 rgctx_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3052 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3053 NEW_BBLOCK (cfg, is_null_bb);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3059 for (i = 0; i < depth; ++i) {
3060 int array_reg = alloc_preg (cfg);
3062 /* load ptr to next array */
3063 if (mrgctx && i == 0)
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3067 rgctx_reg = array_reg;
3068 /* is the ptr null? */
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3070 /* if yes, jump to actual trampoline */
3071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3075 val_reg = alloc_preg (cfg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3077 /* is the slot null? */
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3079 /* if yes, jump to actual trampoline */
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3083 res_reg = alloc_preg (cfg);
3084 MONO_INST_NEW (cfg, ins, OP_MOVE);
3085 ins->dreg = res_reg;
3086 ins->sreg1 = val_reg;
3087 MONO_ADD_INS (cfg->cbb, ins);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3092 MONO_START_BB (cfg, is_null_bb);
3094 EMIT_NEW_ICONST (cfg, args [1], index);
3096 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3098 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3099 MONO_INST_NEW (cfg, ins, OP_MOVE);
3100 ins->dreg = res_reg;
3101 ins->sreg1 = call->dreg;
3102 MONO_ADD_INS (cfg->cbb, ins);
3103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3105 MONO_START_BB (cfg, end_bb);
3114 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3117 static inline MonoInst*
3118 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3121 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3123 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3127 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3128 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3130 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3131 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3133 return emit_rgctx_fetch (cfg, rgctx, entry);
3137 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3138 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3140 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3141 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3143 return emit_rgctx_fetch (cfg, rgctx, entry);
3147 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3148 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3150 MonoJumpInfoGSharedVtCall *call_info;
3151 MonoJumpInfoRgctxEntry *entry;
3154 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3155 call_info->sig = sig;
3156 call_info->method = cmethod;
3158 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3159 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3161 return emit_rgctx_fetch (cfg, rgctx, entry);
3165 * emit_get_rgctx_virt_method:
3167 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3170 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3171 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3173 MonoJumpInfoVirtMethod *info;
3174 MonoJumpInfoRgctxEntry *entry;
3177 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3178 info->klass = klass;
3179 info->method = virt_method;
3181 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3182 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3184 return emit_rgctx_fetch (cfg, rgctx, entry);
3188 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3189 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3191 MonoJumpInfoRgctxEntry *entry;
3194 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3195 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3197 return emit_rgctx_fetch (cfg, rgctx, entry);
3201 * emit_get_rgctx_method:
3203 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3204 * normal constants, else emit a load from the rgctx.
3207 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3208 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3210 if (!context_used) {
3213 switch (rgctx_type) {
3214 case MONO_RGCTX_INFO_METHOD:
3215 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3217 case MONO_RGCTX_INFO_METHOD_RGCTX:
3218 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3221 g_assert_not_reached ();
3224 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3225 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3227 return emit_rgctx_fetch (cfg, rgctx, entry);
3232 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3233 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3235 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3236 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3238 return emit_rgctx_fetch (cfg, rgctx, entry);
3242 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3244 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3245 MonoRuntimeGenericContextInfoTemplate *template_;
3250 for (i = 0; i < info->num_entries; ++i) {
3251 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3253 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3257 if (info->num_entries == info->count_entries) {
3258 MonoRuntimeGenericContextInfoTemplate *new_entries;
3259 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3261 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3263 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3264 info->entries = new_entries;
3265 info->count_entries = new_count_entries;
3268 idx = info->num_entries;
3269 template_ = &info->entries [idx];
3270 template_->info_type = rgctx_type;
3271 template_->data = data;
3273 info->num_entries ++;
3279 * emit_get_gsharedvt_info:
3281 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3284 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3289 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3290 /* Load info->entries [idx] */
3291 dreg = alloc_preg (cfg);
3292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3298 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3300 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3304 * On return the caller must check @klass for load errors.
3307 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3309 MonoInst *vtable_arg;
3312 context_used = mini_class_check_context_used (cfg, klass);
3315 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3316 klass, MONO_RGCTX_INFO_VTABLE);
3318 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3322 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3325 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3329 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3330 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3332 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3333 ins->sreg1 = vtable_arg->dreg;
3334 MONO_ADD_INS (cfg->cbb, ins);
3337 MonoBasicBlock *inited_bb;
3338 MonoInst *args [16];
3340 inited_reg = alloc_ireg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3344 NEW_BBLOCK (cfg, inited_bb);
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3349 args [0] = vtable_arg;
3350 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3352 MONO_START_BB (cfg, inited_bb);
3357 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3361 if (cfg->gen_seq_points && cfg->method == method) {
3362 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3364 ins->flags |= MONO_INST_NONEMPTY_STACK;
3365 MONO_ADD_INS (cfg->cbb, ins);
3370 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 int vtable_reg = alloc_preg (cfg);
3374 int klass_reg = alloc_preg (cfg);
3375 MonoBasicBlock *is_null_bb = NULL;
3377 int to_klass_reg, context_used;
3380 NEW_BBLOCK (cfg, is_null_bb);
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3386 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3388 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3397 context_used = mini_class_check_context_used (cfg, klass);
3399 MonoInst *class_ins;
3401 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3402 to_klass_reg = class_ins->dreg;
3404 to_klass_reg = alloc_preg (cfg);
3405 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3410 MONO_START_BB (cfg, is_null_bb);
3415 mini_reset_cast_details (MonoCompile *cfg)
3417 /* Reset the variables holding the cast details */
3418 if (mini_get_debug_options ()->better_cast_details) {
3419 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3420 /* It is enough to reset the from field */
3421 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3426 * On return the caller must check @array_class for load errors
3429 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3431 int vtable_reg = alloc_preg (cfg);
3434 context_used = mini_class_check_context_used (cfg, array_class);
3436 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3438 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3440 if (cfg->opt & MONO_OPT_SHARED) {
3441 int class_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3445 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3447 } else if (context_used) {
3448 MonoInst *vtable_ins;
3450 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3453 if (cfg->compile_aot) {
3457 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3459 vt_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3461 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3464 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3470 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3472 mini_reset_cast_details (cfg);
3476 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3477 * generic code is generated.
3480 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3482 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3485 MonoInst *rgctx, *addr;
3487 /* FIXME: What if the class is shared? We might not
3488 have to get the address of the method from the
3490 addr = emit_get_rgctx_method (cfg, context_used, method,
3491 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3492 if (cfg->llvm_only) {
3493 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3494 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3496 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3498 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3501 gboolean pass_vtable, pass_mrgctx;
3502 MonoInst *rgctx_arg = NULL;
3504 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3505 g_assert (!pass_mrgctx);
3508 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3511 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3514 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3519 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3523 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3524 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3525 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3526 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3528 obj_reg = sp [0]->dreg;
3529 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3532 /* FIXME: generics */
3533 g_assert (klass->rank == 0);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3543 MonoInst *element_class;
3545 /* This assertion is from the unboxcast insn */
3546 g_assert (klass->rank == 0);
3548 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3549 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3554 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3555 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3556 mini_reset_cast_details (cfg);
3559 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3560 MONO_ADD_INS (cfg->cbb, add);
3561 add->type = STACK_MP;
3568 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3570 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3571 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3575 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3581 args [1] = klass_inst;
3584 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3586 NEW_BBLOCK (cfg, is_ref_bb);
3587 NEW_BBLOCK (cfg, is_nullable_bb);
3588 NEW_BBLOCK (cfg, end_bb);
3589 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3596 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3597 addr_reg = alloc_dreg (cfg, STACK_MP);
3601 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3602 MONO_ADD_INS (cfg->cbb, addr);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_ref_bb);
3609 /* Save the ref to a temporary */
3610 dreg = alloc_ireg (cfg);
3611 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3612 addr->dreg = addr_reg;
3613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3617 MONO_START_BB (cfg, is_nullable_bb);
3620 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3621 MonoInst *unbox_call;
3622 MonoMethodSignature *unbox_sig;
3624 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3625 unbox_sig->ret = &klass->byval_arg;
3626 unbox_sig->param_count = 1;
3627 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3630 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3632 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3634 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3635 addr->dreg = addr_reg;
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3641 MONO_START_BB (cfg, end_bb);
3644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3650 * Returns NULL and set the cfg exception on error.
3653 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3655 MonoInst *iargs [2];
3660 MonoRgctxInfoType rgctx_info;
3661 MonoInst *iargs [2];
3662 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3664 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3666 if (cfg->opt & MONO_OPT_SHARED)
3667 rgctx_info = MONO_RGCTX_INFO_KLASS;
3669 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3670 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3672 if (cfg->opt & MONO_OPT_SHARED) {
3673 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3675 alloc_ftn = ves_icall_object_new;
3678 alloc_ftn = ves_icall_object_new_specific;
3681 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3682 if (known_instance_size) {
3683 int size = mono_class_instance_size (klass);
3684 if (size < sizeof (MonoObject))
3685 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3687 EMIT_NEW_ICONST (cfg, iargs [1], size);
3689 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3692 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3695 if (cfg->opt & MONO_OPT_SHARED) {
3696 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3697 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3699 alloc_ftn = ves_icall_object_new;
3700 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3701 /* This happens often in argument checking code, eg. throw new FooException... */
3702 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3703 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3704 alloc_ftn = mono_helper_newobj_mscorlib;
3706 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3707 MonoMethod *managed_alloc = NULL;
3710 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3711 cfg->exception_ptr = klass;
3715 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3717 if (managed_alloc) {
3718 int size = mono_class_instance_size (klass);
3719 if (size < sizeof (MonoObject))
3720 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3722 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3723 EMIT_NEW_ICONST (cfg, iargs [1], size);
3724 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3726 alloc_ftn = ves_icall_object_new_specific;
3727 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3730 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3734 * Returns NULL and set the cfg exception on error.
3737 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3739 MonoInst *alloc, *ins;
3741 if (mono_class_is_nullable (klass)) {
3742 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3745 if (cfg->llvm_only && cfg->gsharedvt) {
3746 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3747 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3748 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3750 /* FIXME: What if the class is shared? We might not
3751 have to get the method address from the RGCTX. */
3752 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3753 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3754 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3756 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3759 gboolean pass_vtable, pass_mrgctx;
3760 MonoInst *rgctx_arg = NULL;
3762 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3763 g_assert (!pass_mrgctx);
3766 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3769 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3772 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3776 if (mini_is_gsharedvt_klass (klass)) {
3777 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3778 MonoInst *res, *is_ref, *src_var, *addr;
3781 dreg = alloc_ireg (cfg);
3783 NEW_BBLOCK (cfg, is_ref_bb);
3784 NEW_BBLOCK (cfg, is_nullable_bb);
3785 NEW_BBLOCK (cfg, end_bb);
3786 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3794 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3797 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3798 ins->opcode = OP_STOREV_MEMBASE;
3800 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3801 res->type = STACK_OBJ;
3803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3806 MONO_START_BB (cfg, is_ref_bb);
3808 /* val is a vtype, so has to load the value manually */
3809 src_var = get_vreg_to_inst (cfg, val->dreg);
3811 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3812 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3817 MONO_START_BB (cfg, is_nullable_bb);
3820 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3821 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3823 MonoMethodSignature *box_sig;
3826 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3827 * construct that method at JIT time, so have to do things by hand.
3829 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3830 box_sig->ret = &mono_defaults.object_class->byval_arg;
3831 box_sig->param_count = 1;
3832 box_sig->params [0] = &klass->byval_arg;
3835 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
3837 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3838 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3839 res->type = STACK_OBJ;
3843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3845 MONO_START_BB (cfg, end_bb);
3849 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3853 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3858 static GHashTable* direct_icall_type_hash;
3861 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
3863 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3864 if (!direct_icalls_enabled (cfg))
3868 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
3869 * Whitelist a few icalls for now.
3871 if (!direct_icall_type_hash) {
3872 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
3874 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
3875 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
3876 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
3877 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
3878 mono_memory_barrier ();
3879 direct_icall_type_hash = h;
3882 if (cmethod->klass == mono_defaults.math_class)
3884 /* No locking needed */
3885 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
3891 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3893 if (cmethod->klass == mono_defaults.systemtype_class) {
3894 if (!strcmp (cmethod->name, "GetType"))
3900 static G_GNUC_UNUSED MonoInst*
3901 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
3903 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
3904 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3907 switch (enum_type->type) {
3910 #if SIZEOF_REGISTER == 8
3922 MonoInst *load, *and_, *cmp, *ceq;
3923 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3924 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3925 int dest_reg = alloc_ireg (cfg);
3927 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3928 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3929 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3930 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3932 ceq->type = STACK_I4;
3935 load = mono_decompose_opcode (cfg, load);
3936 and_ = mono_decompose_opcode (cfg, and_);
3937 cmp = mono_decompose_opcode (cfg, cmp);
3938 ceq = mono_decompose_opcode (cfg, ceq);
3946 * Returns NULL and set the cfg exception on error.
3948 static G_GNUC_UNUSED MonoInst*
3949 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
3953 gpointer trampoline;
3954 MonoInst *obj, *method_ins, *tramp_ins;
3958 if (virtual_ && !cfg->llvm_only) {
3959 MonoMethod *invoke = mono_get_delegate_invoke (klass);
3962 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
3966 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
3970 /* Inline the contents of mono_delegate_ctor */
3972 /* Set target field */
3973 /* Optimize away setting of NULL target */
3974 if (!MONO_INS_IS_PCONST_NULL (target)) {
3975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3976 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3977 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3978 if (cfg->gen_write_barriers) {
3979 dreg = alloc_preg (cfg);
3980 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3981 mini_emit_write_barrier (cfg, ptr, target);
3985 /* Set method field */
3986 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3987 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3990 * To avoid looking up the compiled code belonging to the target method
3991 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3992 * store it, and we fill it after the method has been compiled.
3994 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3995 MonoInst *code_slot_ins;
3998 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4000 domain = mono_domain_get ();
4001 mono_domain_lock (domain);
4002 if (!domain_jit_info (domain)->method_code_hash)
4003 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4004 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4006 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4007 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4009 mono_domain_unlock (domain);
4011 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4013 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4016 if (cfg->llvm_only) {
4017 MonoInst *args [16];
4022 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4023 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4026 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4032 if (cfg->compile_aot) {
4033 MonoDelegateClassMethodPair *del_tramp;
4035 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4036 del_tramp->klass = klass;
4037 del_tramp->method = context_used ? NULL : method;
4038 del_tramp->is_virtual = virtual_;
4039 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4042 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4044 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4045 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4048 /* Set invoke_impl field */
4050 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4052 dreg = alloc_preg (cfg);
4053 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4054 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4056 dreg = alloc_preg (cfg);
4057 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4058 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4061 dreg = alloc_preg (cfg);
4062 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4063 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4065 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4071 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4073 MonoJitICallInfo *info;
4075 /* Need to register the icall so it gets an icall wrapper */
4076 info = mono_get_array_new_va_icall (rank);
4078 cfg->flags |= MONO_CFG_HAS_VARARGS;
4080 /* mono_array_new_va () needs a vararg calling convention */
4081 cfg->exception_message = g_strdup ("array-new");
4082 cfg->disable_llvm = TRUE;
4084 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4085 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4089 * handle_constrained_gsharedvt_call:
4091 * Handle constrained calls where the receiver is a gsharedvt type.
4092 * Return the instruction representing the call. Set the cfg exception on failure.
4095 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4096 gboolean *ref_emit_widen)
4098 MonoInst *ins = NULL;
4099 gboolean emit_widen = *ref_emit_widen;
4102 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4103 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4104 * pack the arguments into an array, and do the rest of the work in in an icall.
4106 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4107 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4108 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4109 MonoInst *args [16];
4112 * This case handles calls to
4113 * - object:ToString()/Equals()/GetHashCode(),
4114 * - System.IComparable<T>:CompareTo()
4115 * - System.IEquatable<T>:Equals ()
4116 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4120 if (mono_method_check_context_used (cmethod))
4121 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4123 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4124 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4126 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4127 if (fsig->hasthis && fsig->param_count) {
4128 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4129 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4130 ins->dreg = alloc_preg (cfg);
4131 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4132 MONO_ADD_INS (cfg->cbb, ins);
4135 if (mini_is_gsharedvt_type (fsig->params [0])) {
4136 int addr_reg, deref_arg_reg;
4138 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4139 deref_arg_reg = alloc_preg (cfg);
4140 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4141 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4143 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4144 addr_reg = ins->dreg;
4145 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4147 EMIT_NEW_ICONST (cfg, args [3], 0);
4148 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4151 EMIT_NEW_ICONST (cfg, args [3], 0);
4152 EMIT_NEW_ICONST (cfg, args [4], 0);
4154 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4157 if (mini_is_gsharedvt_type (fsig->ret)) {
4158 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4159 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4163 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4164 MONO_ADD_INS (cfg->cbb, add);
4166 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4167 MONO_ADD_INS (cfg->cbb, ins);
4168 /* ins represents the call result */
4171 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4174 *ref_emit_widen = emit_widen;
4183 mono_emit_load_got_addr (MonoCompile *cfg)
4185 MonoInst *getaddr, *dummy_use;
4187 if (!cfg->got_var || cfg->got_var_allocated)
4190 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4191 getaddr->cil_code = cfg->header->code;
4192 getaddr->dreg = cfg->got_var->dreg;
4194 /* Add it to the start of the first bblock */
4195 if (cfg->bb_entry->code) {
4196 getaddr->next = cfg->bb_entry->code;
4197 cfg->bb_entry->code = getaddr;
4200 MONO_ADD_INS (cfg->bb_entry, getaddr);
4202 cfg->got_var_allocated = TRUE;
4205 * Add a dummy use to keep the got_var alive, since real uses might
4206 * only be generated by the back ends.
4207 * Add it to end_bblock, so the variable's lifetime covers the whole
4209 * It would be better to make the usage of the got var explicit in all
4210 * cases when the backend needs it (i.e. calls, throw etc.), so this
4211 * wouldn't be needed.
4213 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4214 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4217 static int inline_limit;
4218 static gboolean inline_limit_inited;
4221 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4223 MonoMethodHeaderSummary header;
4225 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4226 MonoMethodSignature *sig = mono_method_signature (method);
4230 if (cfg->disable_inline)
4235 if (cfg->inline_depth > 10)
4238 if (!mono_method_get_header_summary (method, &header))
4241 /*runtime, icall and pinvoke are checked by summary call*/
4242 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4243 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4244 (mono_class_is_marshalbyref (method->klass)) ||
4248 /* also consider num_locals? */
4249 /* Do the size check early to avoid creating vtables */
4250 if (!inline_limit_inited) {
4252 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4253 inline_limit = atoi (inlinelimit);
4254 g_free (inlinelimit);
4256 inline_limit = INLINE_LENGTH_LIMIT;
4257 inline_limit_inited = TRUE;
4259 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4263 * if we can initialize the class of the method right away, we do,
4264 * otherwise we don't allow inlining if the class needs initialization,
4265 * since it would mean inserting a call to mono_runtime_class_init()
4266 * inside the inlined code
4268 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4271 if (!(cfg->opt & MONO_OPT_SHARED)) {
4272 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4273 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4274 if (method->klass->has_cctor) {
4275 vtable = mono_class_vtable (cfg->domain, method->klass);
4278 if (!cfg->compile_aot) {
4280 if (!mono_runtime_class_init_full (vtable, &error)) {
4281 mono_error_cleanup (&error);
4286 } else if (mono_class_is_before_field_init (method->klass)) {
4287 if (cfg->run_cctors && method->klass->has_cctor) {
4288 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4289 if (!method->klass->runtime_info)
4290 /* No vtable created yet */
4292 vtable = mono_class_vtable (cfg->domain, method->klass);
4295 /* This makes so that inline cannot trigger */
4296 /* .cctors: too many apps depend on them */
4297 /* running with a specific order... */
4298 if (! vtable->initialized)
4301 if (!mono_runtime_class_init_full (vtable, &error)) {
4302 mono_error_cleanup (&error);
4306 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4307 if (!method->klass->runtime_info)
4308 /* No vtable created yet */
4310 vtable = mono_class_vtable (cfg->domain, method->klass);
4313 if (!vtable->initialized)
4318 * If we're compiling for shared code
4319 * the cctor will need to be run at aot method load time, for example,
4320 * or at the end of the compilation of the inlining method.
4322 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4326 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4327 if (mono_arch_is_soft_float ()) {
4329 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4331 for (i = 0; i < sig->param_count; ++i)
4332 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4337 if (g_list_find (cfg->dont_inline, method))
4340 if (mono_profiler_get_call_instrumentation_flags (method))
4347 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4349 if (!cfg->compile_aot) {
4351 if (vtable->initialized)
4355 if (mono_class_is_before_field_init (klass)) {
4356 if (cfg->method == method)
4360 if (!mono_class_needs_cctor_run (klass, method))
4363 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4364 /* The initialization is already done before the method is called */
4371 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4375 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4378 if (mini_is_gsharedvt_variable_klass (klass)) {
4381 mono_class_init (klass);
4382 size = mono_class_array_element_size (klass);
4385 mult_reg = alloc_preg (cfg);
4386 array_reg = arr->dreg;
4387 index_reg = index->dreg;
4389 #if SIZEOF_REGISTER == 8
4390 /* The array reg is 64 bits but the index reg is only 32 */
4391 if (COMPILE_LLVM (cfg)) {
4393 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4394 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4395 * of OP_X86_LEA for llvm.
4397 index2_reg = index_reg;
4399 index2_reg = alloc_preg (cfg);
4400 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4403 if (index->type == STACK_I8) {
4404 index2_reg = alloc_preg (cfg);
4405 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4407 index2_reg = index_reg;
4412 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4414 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4415 if (size == 1 || size == 2 || size == 4 || size == 8) {
4416 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4418 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4419 ins->klass = mono_class_get_element_class (klass);
4420 ins->type = STACK_MP;
4426 add_reg = alloc_ireg_mp (cfg);
4429 MonoInst *rgctx_ins;
4432 g_assert (cfg->gshared);
4433 context_used = mini_class_check_context_used (cfg, klass);
4434 g_assert (context_used);
4435 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4436 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4440 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4441 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4442 ins->klass = mono_class_get_element_class (klass);
4443 ins->type = STACK_MP;
4444 MONO_ADD_INS (cfg->cbb, ins);
4450 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4452 int bounds_reg = alloc_preg (cfg);
4453 int add_reg = alloc_ireg_mp (cfg);
4454 int mult_reg = alloc_preg (cfg);
4455 int mult2_reg = alloc_preg (cfg);
4456 int low1_reg = alloc_preg (cfg);
4457 int low2_reg = alloc_preg (cfg);
4458 int high1_reg = alloc_preg (cfg);
4459 int high2_reg = alloc_preg (cfg);
4460 int realidx1_reg = alloc_preg (cfg);
4461 int realidx2_reg = alloc_preg (cfg);
4462 int sum_reg = alloc_preg (cfg);
4463 int index1, index2, tmpreg;
4467 mono_class_init (klass);
4468 size = mono_class_array_element_size (klass);
4470 index1 = index_ins1->dreg;
4471 index2 = index_ins2->dreg;
4473 #if SIZEOF_REGISTER == 8
4474 /* The array reg is 64 bits but the index reg is only 32 */
4475 if (COMPILE_LLVM (cfg)) {
4478 tmpreg = alloc_preg (cfg);
4479 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4481 tmpreg = alloc_preg (cfg);
4482 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4486 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4490 /* range checking */
4491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4492 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4495 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4496 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4498 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4500 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4503 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4504 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4506 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4508 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4510 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4511 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4513 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4514 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4516 ins->type = STACK_MP;
4518 MONO_ADD_INS (cfg->cbb, ins);
4524 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4528 MonoMethod *addr_method;
4530 MonoClass *eclass = cmethod->klass->element_class;
4532 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4535 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4537 /* emit_ldelema_2 depends on OP_LMUL */
4538 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4539 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4542 if (mini_is_gsharedvt_variable_klass (eclass))
4545 element_size = mono_class_array_element_size (eclass);
4546 addr_method = mono_marshal_get_array_address (rank, element_size);
4547 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4552 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4554 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4556 MonoInst *addr, *store, *load;
4557 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4559 /* the bounds check is already done by the callers */
4560 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4562 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4563 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4564 if (mini_type_is_reference (&eklass->byval_arg))
4565 mini_emit_write_barrier (cfg, addr, load);
4567 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4568 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4575 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4577 return mini_type_is_reference (&klass->byval_arg);
4581 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4583 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4584 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4585 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4586 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4587 MonoInst *iargs [3];
4590 mono_class_setup_vtable (obj_array);
4591 g_assert (helper->slot);
4593 if (sp [0]->type != STACK_OBJ)
4595 if (sp [2]->type != STACK_OBJ)
4602 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4606 if (mini_is_gsharedvt_variable_klass (klass)) {
4609 // FIXME-VT: OP_ICONST optimization
4610 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4611 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4612 ins->opcode = OP_STOREV_MEMBASE;
4613 } else if (sp [1]->opcode == OP_ICONST) {
4614 int array_reg = sp [0]->dreg;
4615 int index_reg = sp [1]->dreg;
4616 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4618 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4619 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4622 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4623 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4625 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4626 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4627 if (generic_class_is_reference_type (cfg, klass))
4628 mini_emit_write_barrier (cfg, addr, sp [2]);
4635 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4640 eklass = mono_class_from_mono_type (fsig->params [2]);
4642 eklass = mono_class_from_mono_type (fsig->ret);
4645 return emit_array_store (cfg, eklass, args, FALSE);
4647 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4654 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4657 int param_size, return_size;
4659 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4660 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4662 if (cfg->verbose_level > 3)
4663 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4665 //Don't allow mixing reference types with value types
4666 if (param_klass->valuetype != return_klass->valuetype) {
4667 if (cfg->verbose_level > 3)
4668 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4672 if (!param_klass->valuetype) {
4673 if (cfg->verbose_level > 3)
4674 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4679 if (param_klass->has_references || return_klass->has_references)
4682 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4683 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4684 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4685 if (cfg->verbose_level > 3)
4686 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4690 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4691 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4692 if (cfg->verbose_level > 3)
4693 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4697 param_size = mono_class_value_size (param_klass, &align);
4698 return_size = mono_class_value_size (return_klass, &align);
4700 //We can do it if sizes match
4701 if (param_size == return_size) {
4702 if (cfg->verbose_level > 3)
4703 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4707 //No simple way to handle struct if sizes don't match
4708 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4709 if (cfg->verbose_level > 3)
4710 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4715 * Same reg size category.
4716 * A quick note on why we don't require widening here.
4717 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4719 * Since the source value comes from a function argument, the JIT will already have
4720 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4722 if (param_size <= 4 && return_size <= 4) {
4723 if (cfg->verbose_level > 3)
4724 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4732 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4734 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4735 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4737 if (mini_is_gsharedvt_variable_type (fsig->ret))
4740 //Valuetypes that are semantically equivalent or numbers than can be widened to
4741 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4744 //Arrays of valuetypes that are semantically equivalent
4745 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
4752 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4754 #ifdef MONO_ARCH_SIMD_INTRINSICS
4755 MonoInst *ins = NULL;
4757 if (cfg->opt & MONO_OPT_SIMD) {
4758 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4764 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
4768 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4770 MonoInst *ins = NULL;
4771 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4772 MONO_ADD_INS (cfg->cbb, ins);
4773 ins->backend.memory_barrier_kind = kind;
4779 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4781 MonoInst *ins = NULL;
4784 /* The LLVM backend supports these intrinsics */
4785 if (cmethod->klass == mono_defaults.math_class) {
4786 if (strcmp (cmethod->name, "Sin") == 0) {
4788 } else if (strcmp (cmethod->name, "Cos") == 0) {
4790 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4792 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4796 if (opcode && fsig->param_count == 1) {
4797 MONO_INST_NEW (cfg, ins, opcode);
4798 ins->type = STACK_R8;
4799 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4800 ins->sreg1 = args [0]->dreg;
4801 MONO_ADD_INS (cfg->cbb, ins);
4805 if (cfg->opt & MONO_OPT_CMOV) {
4806 if (strcmp (cmethod->name, "Min") == 0) {
4807 if (fsig->params [0]->type == MONO_TYPE_I4)
4809 if (fsig->params [0]->type == MONO_TYPE_U4)
4810 opcode = OP_IMIN_UN;
4811 else if (fsig->params [0]->type == MONO_TYPE_I8)
4813 else if (fsig->params [0]->type == MONO_TYPE_U8)
4814 opcode = OP_LMIN_UN;
4815 } else if (strcmp (cmethod->name, "Max") == 0) {
4816 if (fsig->params [0]->type == MONO_TYPE_I4)
4818 if (fsig->params [0]->type == MONO_TYPE_U4)
4819 opcode = OP_IMAX_UN;
4820 else if (fsig->params [0]->type == MONO_TYPE_I8)
4822 else if (fsig->params [0]->type == MONO_TYPE_U8)
4823 opcode = OP_LMAX_UN;
4827 if (opcode && fsig->param_count == 2) {
4828 MONO_INST_NEW (cfg, ins, opcode);
4829 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4830 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4831 ins->sreg1 = args [0]->dreg;
4832 ins->sreg2 = args [1]->dreg;
4833 MONO_ADD_INS (cfg->cbb, ins);
4841 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4843 if (cmethod->klass == mono_defaults.array_class) {
4844 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4845 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4846 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4847 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4848 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
4849 return emit_array_unsafe_mov (cfg, fsig, args);
4857 mono_type_is_native_blittable (MonoType *t)
4859 if (MONO_TYPE_IS_REFERENCE (t))
4862 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
4865 MonoClass *klass = mono_class_from_mono_type (t);
4867 //MonoClass::blitable depends on mono_class_setup_fields being done.
4868 mono_class_setup_fields (klass);
4869 if (!klass->blittable)
4872 // If the native marshal size is different we can't convert PtrToStructure to a type load
4873 if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
4881 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4883 MonoInst *ins = NULL;
4884 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
4886 if (cmethod->klass == mono_defaults.string_class) {
4887 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
4888 int dreg = alloc_ireg (cfg);
4889 int index_reg = alloc_preg (cfg);
4890 int add_reg = alloc_preg (cfg);
4892 #if SIZEOF_REGISTER == 8
4893 if (COMPILE_LLVM (cfg)) {
4894 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
4896 /* The array reg is 64 bits but the index reg is only 32 */
4897 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4900 index_reg = args [1]->dreg;
4902 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4904 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4905 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
4906 add_reg = ins->dreg;
4907 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4910 int mult_reg = alloc_preg (cfg);
4911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4912 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4913 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4914 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
4916 type_from_op (cfg, ins, NULL, NULL);
4918 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4919 int dreg = alloc_ireg (cfg);
4920 /* Decompose later to allow more optimizations */
4921 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4922 ins->type = STACK_I4;
4923 ins->flags |= MONO_INST_FAULT;
4924 cfg->cbb->has_array_access = TRUE;
4925 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4930 } else if (cmethod->klass == mono_defaults.object_class) {
4931 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
4932 int dreg = alloc_ireg_ref (cfg);
4933 int vt_reg = alloc_preg (cfg);
4934 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4935 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
4936 type_from_op (cfg, ins, NULL, NULL);
4939 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
4940 int dreg = alloc_ireg (cfg);
4941 int t1 = alloc_ireg (cfg);
4943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4944 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4945 ins->type = STACK_I4;
4948 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
4949 MONO_INST_NEW (cfg, ins, OP_NOP);
4950 MONO_ADD_INS (cfg->cbb, ins);
4954 } else if (cmethod->klass == mono_defaults.array_class) {
4955 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4956 return emit_array_generic_access (cfg, fsig, args, FALSE);
4957 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4958 return emit_array_generic_access (cfg, fsig, args, TRUE);
4960 #ifndef MONO_BIG_ARRAYS
4962 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4965 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
4966 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
4967 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4968 int dreg = alloc_ireg (cfg);
4969 int bounds_reg = alloc_ireg_mp (cfg);
4970 MonoBasicBlock *end_bb, *szarray_bb;
4971 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4973 NEW_BBLOCK (cfg, end_bb);
4974 NEW_BBLOCK (cfg, szarray_bb);
4976 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4977 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4980 /* Non-szarray case */
4982 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4983 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4985 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4986 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4988 MONO_START_BB (cfg, szarray_bb);
4991 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4992 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
4994 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4995 MONO_START_BB (cfg, end_bb);
4997 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4998 ins->type = STACK_I4;
5004 if (cmethod->name [0] != 'g')
5007 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5008 int dreg = alloc_ireg (cfg);
5009 int vtable_reg = alloc_preg (cfg);
5010 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5011 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5012 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5013 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5014 type_from_op (cfg, ins, NULL, NULL);
5017 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5018 int dreg = alloc_ireg (cfg);
5020 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5021 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5022 type_from_op (cfg, ins, NULL, NULL);
5027 } else if (cmethod->klass == runtime_helpers_class) {
5028 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5029 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5031 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5032 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5034 g_assert (ctx->method_inst);
5035 g_assert (ctx->method_inst->type_argc == 1);
5036 MonoType *arg_type = ctx->method_inst->type_argv [0];
5042 /* Resolve the argument class as possible so we can handle common cases fast */
5043 t = mini_get_underlying_type (arg_type);
5044 klass = mono_class_from_mono_type (t);
5045 mono_class_init (klass);
5046 if (MONO_TYPE_IS_REFERENCE (t))
5047 EMIT_NEW_ICONST (cfg, ins, 1);
5048 else if (MONO_TYPE_IS_PRIMITIVE (t))
5049 EMIT_NEW_ICONST (cfg, ins, 0);
5050 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5051 EMIT_NEW_ICONST (cfg, ins, 1);
5052 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5053 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5055 g_assert (cfg->gshared);
5057 /* Have to use the original argument class here */
5058 MonoClass *arg_class = mono_class_from_mono_type (arg_type);
5059 int context_used = mini_class_check_context_used (cfg, arg_class);
5061 /* This returns 1 or 2 */
5062 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5063 int dreg = alloc_ireg (cfg);
5064 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5070 } else if (cmethod->klass == mono_defaults.monitor_class) {
5071 gboolean is_enter = FALSE;
5072 gboolean is_v4 = FALSE;
5074 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5078 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5083 * To make async stack traces work, icalls which can block should have a wrapper.
5084 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5086 MonoBasicBlock *end_bb;
5088 NEW_BBLOCK (cfg, end_bb);
5090 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5093 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5094 MONO_START_BB (cfg, end_bb);
5097 } else if (cmethod->klass == mono_defaults.thread_class) {
5098 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5099 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5100 MONO_ADD_INS (cfg->cbb, ins);
5102 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5103 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5104 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5106 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5108 if (fsig->params [0]->type == MONO_TYPE_I1)
5109 opcode = OP_LOADI1_MEMBASE;
5110 else if (fsig->params [0]->type == MONO_TYPE_U1)
5111 opcode = OP_LOADU1_MEMBASE;
5112 else if (fsig->params [0]->type == MONO_TYPE_I2)
5113 opcode = OP_LOADI2_MEMBASE;
5114 else if (fsig->params [0]->type == MONO_TYPE_U2)
5115 opcode = OP_LOADU2_MEMBASE;
5116 else if (fsig->params [0]->type == MONO_TYPE_I4)
5117 opcode = OP_LOADI4_MEMBASE;
5118 else if (fsig->params [0]->type == MONO_TYPE_U4)
5119 opcode = OP_LOADU4_MEMBASE;
5120 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5121 opcode = OP_LOADI8_MEMBASE;
5122 else if (fsig->params [0]->type == MONO_TYPE_R4)
5123 opcode = OP_LOADR4_MEMBASE;
5124 else if (fsig->params [0]->type == MONO_TYPE_R8)
5125 opcode = OP_LOADR8_MEMBASE;
5126 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5127 opcode = OP_LOAD_MEMBASE;
5130 MONO_INST_NEW (cfg, ins, opcode);
5131 ins->inst_basereg = args [0]->dreg;
5132 ins->inst_offset = 0;
5133 MONO_ADD_INS (cfg->cbb, ins);
5135 switch (fsig->params [0]->type) {
5142 ins->dreg = mono_alloc_ireg (cfg);
5143 ins->type = STACK_I4;
5147 ins->dreg = mono_alloc_lreg (cfg);
5148 ins->type = STACK_I8;
5152 ins->dreg = mono_alloc_ireg (cfg);
5153 #if SIZEOF_REGISTER == 8
5154 ins->type = STACK_I8;
5156 ins->type = STACK_I4;
5161 ins->dreg = mono_alloc_freg (cfg);
5162 ins->type = STACK_R8;
5165 g_assert (mini_type_is_reference (fsig->params [0]));
5166 ins->dreg = mono_alloc_ireg_ref (cfg);
5167 ins->type = STACK_OBJ;
5171 if (opcode == OP_LOADI8_MEMBASE)
5172 ins = mono_decompose_opcode (cfg, ins);
5174 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5178 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5180 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5182 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5183 opcode = OP_STOREI1_MEMBASE_REG;
5184 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5185 opcode = OP_STOREI2_MEMBASE_REG;
5186 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5187 opcode = OP_STOREI4_MEMBASE_REG;
5188 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5189 opcode = OP_STOREI8_MEMBASE_REG;
5190 else if (fsig->params [0]->type == MONO_TYPE_R4)
5191 opcode = OP_STORER4_MEMBASE_REG;
5192 else if (fsig->params [0]->type == MONO_TYPE_R8)
5193 opcode = OP_STORER8_MEMBASE_REG;
5194 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5195 opcode = OP_STORE_MEMBASE_REG;
5198 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5200 MONO_INST_NEW (cfg, ins, opcode);
5201 ins->sreg1 = args [1]->dreg;
5202 ins->inst_destbasereg = args [0]->dreg;
5203 ins->inst_offset = 0;
5204 MONO_ADD_INS (cfg->cbb, ins);
5206 if (opcode == OP_STOREI8_MEMBASE_REG)
5207 ins = mono_decompose_opcode (cfg, ins);
5212 } else if (cmethod->klass->image == mono_defaults.corlib &&
5213 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5214 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5217 #if SIZEOF_REGISTER == 8
5218 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5219 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5220 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5221 ins->dreg = mono_alloc_preg (cfg);
5222 ins->sreg1 = args [0]->dreg;
5223 ins->type = STACK_I8;
5224 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5225 MONO_ADD_INS (cfg->cbb, ins);
5229 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5231 /* 64 bit reads are already atomic */
5232 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5233 load_ins->dreg = mono_alloc_preg (cfg);
5234 load_ins->inst_basereg = args [0]->dreg;
5235 load_ins->inst_offset = 0;
5236 load_ins->type = STACK_I8;
5237 MONO_ADD_INS (cfg->cbb, load_ins);
5239 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5246 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5247 MonoInst *ins_iconst;
5250 if (fsig->params [0]->type == MONO_TYPE_I4) {
5251 opcode = OP_ATOMIC_ADD_I4;
5252 cfg->has_atomic_add_i4 = TRUE;
5254 #if SIZEOF_REGISTER == 8
5255 else if (fsig->params [0]->type == MONO_TYPE_I8)
5256 opcode = OP_ATOMIC_ADD_I8;
5259 if (!mono_arch_opcode_supported (opcode))
5261 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5262 ins_iconst->inst_c0 = 1;
5263 ins_iconst->dreg = mono_alloc_ireg (cfg);
5264 MONO_ADD_INS (cfg->cbb, ins_iconst);
5266 MONO_INST_NEW (cfg, ins, opcode);
5267 ins->dreg = mono_alloc_ireg (cfg);
5268 ins->inst_basereg = args [0]->dreg;
5269 ins->inst_offset = 0;
5270 ins->sreg2 = ins_iconst->dreg;
5271 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5272 MONO_ADD_INS (cfg->cbb, ins);
5274 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5275 MonoInst *ins_iconst;
5278 if (fsig->params [0]->type == MONO_TYPE_I4) {
5279 opcode = OP_ATOMIC_ADD_I4;
5280 cfg->has_atomic_add_i4 = TRUE;
5282 #if SIZEOF_REGISTER == 8
5283 else if (fsig->params [0]->type == MONO_TYPE_I8)
5284 opcode = OP_ATOMIC_ADD_I8;
5287 if (!mono_arch_opcode_supported (opcode))
5289 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5290 ins_iconst->inst_c0 = -1;
5291 ins_iconst->dreg = mono_alloc_ireg (cfg);
5292 MONO_ADD_INS (cfg->cbb, ins_iconst);
5294 MONO_INST_NEW (cfg, ins, opcode);
5295 ins->dreg = mono_alloc_ireg (cfg);
5296 ins->inst_basereg = args [0]->dreg;
5297 ins->inst_offset = 0;
5298 ins->sreg2 = ins_iconst->dreg;
5299 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5300 MONO_ADD_INS (cfg->cbb, ins);
5302 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5305 if (fsig->params [0]->type == MONO_TYPE_I4) {
5306 opcode = OP_ATOMIC_ADD_I4;
5307 cfg->has_atomic_add_i4 = TRUE;
5309 #if SIZEOF_REGISTER == 8
5310 else if (fsig->params [0]->type == MONO_TYPE_I8)
5311 opcode = OP_ATOMIC_ADD_I8;
5314 if (!mono_arch_opcode_supported (opcode))
5316 MONO_INST_NEW (cfg, ins, opcode);
5317 ins->dreg = mono_alloc_ireg (cfg);
5318 ins->inst_basereg = args [0]->dreg;
5319 ins->inst_offset = 0;
5320 ins->sreg2 = args [1]->dreg;
5321 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5322 MONO_ADD_INS (cfg->cbb, ins);
5325 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5326 MonoInst *f2i = NULL, *i2f;
5327 guint32 opcode, f2i_opcode, i2f_opcode;
5328 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5329 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5331 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5332 fsig->params [0]->type == MONO_TYPE_R4) {
5333 opcode = OP_ATOMIC_EXCHANGE_I4;
5334 f2i_opcode = OP_MOVE_F_TO_I4;
5335 i2f_opcode = OP_MOVE_I4_TO_F;
5336 cfg->has_atomic_exchange_i4 = TRUE;
5338 #if SIZEOF_REGISTER == 8
5340 fsig->params [0]->type == MONO_TYPE_I8 ||
5341 fsig->params [0]->type == MONO_TYPE_R8 ||
5342 fsig->params [0]->type == MONO_TYPE_I) {
5343 opcode = OP_ATOMIC_EXCHANGE_I8;
5344 f2i_opcode = OP_MOVE_F_TO_I8;
5345 i2f_opcode = OP_MOVE_I8_TO_F;
5348 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5349 opcode = OP_ATOMIC_EXCHANGE_I4;
5350 cfg->has_atomic_exchange_i4 = TRUE;
5356 if (!mono_arch_opcode_supported (opcode))
5360 /* TODO: Decompose these opcodes instead of bailing here. */
5361 if (COMPILE_SOFT_FLOAT (cfg))
5364 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5365 f2i->dreg = mono_alloc_ireg (cfg);
5366 f2i->sreg1 = args [1]->dreg;
5367 if (f2i_opcode == OP_MOVE_F_TO_I4)
5368 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5369 MONO_ADD_INS (cfg->cbb, f2i);
5372 MONO_INST_NEW (cfg, ins, opcode);
5373 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5374 ins->inst_basereg = args [0]->dreg;
5375 ins->inst_offset = 0;
5376 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5377 MONO_ADD_INS (cfg->cbb, ins);
5379 switch (fsig->params [0]->type) {
5381 ins->type = STACK_I4;
5384 ins->type = STACK_I8;
5387 #if SIZEOF_REGISTER == 8
5388 ins->type = STACK_I8;
5390 ins->type = STACK_I4;
5395 ins->type = STACK_R8;
5398 g_assert (mini_type_is_reference (fsig->params [0]));
5399 ins->type = STACK_OBJ;
5404 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5405 i2f->dreg = mono_alloc_freg (cfg);
5406 i2f->sreg1 = ins->dreg;
5407 i2f->type = STACK_R8;
5408 if (i2f_opcode == OP_MOVE_I4_TO_F)
5409 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5410 MONO_ADD_INS (cfg->cbb, i2f);
5415 if (cfg->gen_write_barriers && is_ref)
5416 mini_emit_write_barrier (cfg, args [0], args [1]);
5418 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5419 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5420 guint32 opcode, f2i_opcode, i2f_opcode;
5421 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5422 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5424 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5425 fsig->params [1]->type == MONO_TYPE_R4) {
5426 opcode = OP_ATOMIC_CAS_I4;
5427 f2i_opcode = OP_MOVE_F_TO_I4;
5428 i2f_opcode = OP_MOVE_I4_TO_F;
5429 cfg->has_atomic_cas_i4 = TRUE;
5431 #if SIZEOF_REGISTER == 8
5433 fsig->params [1]->type == MONO_TYPE_I8 ||
5434 fsig->params [1]->type == MONO_TYPE_R8 ||
5435 fsig->params [1]->type == MONO_TYPE_I) {
5436 opcode = OP_ATOMIC_CAS_I8;
5437 f2i_opcode = OP_MOVE_F_TO_I8;
5438 i2f_opcode = OP_MOVE_I8_TO_F;
5441 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5442 opcode = OP_ATOMIC_CAS_I4;
5443 cfg->has_atomic_cas_i4 = TRUE;
5449 if (!mono_arch_opcode_supported (opcode))
5453 /* TODO: Decompose these opcodes instead of bailing here. */
5454 if (COMPILE_SOFT_FLOAT (cfg))
5457 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5458 f2i_new->dreg = mono_alloc_ireg (cfg);
5459 f2i_new->sreg1 = args [1]->dreg;
5460 if (f2i_opcode == OP_MOVE_F_TO_I4)
5461 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5462 MONO_ADD_INS (cfg->cbb, f2i_new);
5464 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5465 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5466 f2i_cmp->sreg1 = args [2]->dreg;
5467 if (f2i_opcode == OP_MOVE_F_TO_I4)
5468 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5469 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5472 MONO_INST_NEW (cfg, ins, opcode);
5473 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5474 ins->sreg1 = args [0]->dreg;
5475 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5476 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5477 MONO_ADD_INS (cfg->cbb, ins);
5479 switch (fsig->params [1]->type) {
5481 ins->type = STACK_I4;
5484 ins->type = STACK_I8;
5487 #if SIZEOF_REGISTER == 8
5488 ins->type = STACK_I8;
5490 ins->type = STACK_I4;
5494 ins->type = cfg->r4_stack_type;
5497 ins->type = STACK_R8;
5500 g_assert (mini_type_is_reference (fsig->params [1]));
5501 ins->type = STACK_OBJ;
5506 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5507 i2f->dreg = mono_alloc_freg (cfg);
5508 i2f->sreg1 = ins->dreg;
5509 i2f->type = STACK_R8;
5510 if (i2f_opcode == OP_MOVE_I4_TO_F)
5511 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5512 MONO_ADD_INS (cfg->cbb, i2f);
5517 if (cfg->gen_write_barriers && is_ref)
5518 mini_emit_write_barrier (cfg, args [0], args [1]);
5520 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5521 fsig->params [1]->type == MONO_TYPE_I4) {
5522 MonoInst *cmp, *ceq;
5524 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5527 /* int32 r = CAS (location, value, comparand); */
5528 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5529 ins->dreg = alloc_ireg (cfg);
5530 ins->sreg1 = args [0]->dreg;
5531 ins->sreg2 = args [1]->dreg;
5532 ins->sreg3 = args [2]->dreg;
5533 ins->type = STACK_I4;
5534 MONO_ADD_INS (cfg->cbb, ins);
5536 /* bool result = r == comparand; */
5537 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5538 cmp->sreg1 = ins->dreg;
5539 cmp->sreg2 = args [2]->dreg;
5540 cmp->type = STACK_I4;
5541 MONO_ADD_INS (cfg->cbb, cmp);
5543 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5544 ceq->dreg = alloc_ireg (cfg);
5545 ceq->type = STACK_I4;
5546 MONO_ADD_INS (cfg->cbb, ceq);
5548 /* *success = result; */
5549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5551 cfg->has_atomic_cas_i4 = TRUE;
5553 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5554 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5558 } else if (cmethod->klass->image == mono_defaults.corlib &&
5559 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5560 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5563 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5565 MonoType *t = fsig->params [0];
5567 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5569 g_assert (t->byref);
5570 /* t is a byref type, so the reference check is more complicated */
5571 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5572 if (t->type == MONO_TYPE_I1)
5573 opcode = OP_ATOMIC_LOAD_I1;
5574 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5575 opcode = OP_ATOMIC_LOAD_U1;
5576 else if (t->type == MONO_TYPE_I2)
5577 opcode = OP_ATOMIC_LOAD_I2;
5578 else if (t->type == MONO_TYPE_U2)
5579 opcode = OP_ATOMIC_LOAD_U2;
5580 else if (t->type == MONO_TYPE_I4)
5581 opcode = OP_ATOMIC_LOAD_I4;
5582 else if (t->type == MONO_TYPE_U4)
5583 opcode = OP_ATOMIC_LOAD_U4;
5584 else if (t->type == MONO_TYPE_R4)
5585 opcode = OP_ATOMIC_LOAD_R4;
5586 else if (t->type == MONO_TYPE_R8)
5587 opcode = OP_ATOMIC_LOAD_R8;
5588 #if SIZEOF_REGISTER == 8
5589 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5590 opcode = OP_ATOMIC_LOAD_I8;
5591 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5592 opcode = OP_ATOMIC_LOAD_U8;
5594 else if (t->type == MONO_TYPE_I)
5595 opcode = OP_ATOMIC_LOAD_I4;
5596 else if (is_ref || t->type == MONO_TYPE_U)
5597 opcode = OP_ATOMIC_LOAD_U4;
5601 if (!mono_arch_opcode_supported (opcode))
5604 MONO_INST_NEW (cfg, ins, opcode);
5605 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5606 ins->sreg1 = args [0]->dreg;
5607 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5608 MONO_ADD_INS (cfg->cbb, ins);
5611 case MONO_TYPE_BOOLEAN:
5618 ins->type = STACK_I4;
5622 ins->type = STACK_I8;
5626 #if SIZEOF_REGISTER == 8
5627 ins->type = STACK_I8;
5629 ins->type = STACK_I4;
5633 ins->type = cfg->r4_stack_type;
5636 ins->type = STACK_R8;
5640 ins->type = STACK_OBJ;
5646 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5648 MonoType *t = fsig->params [0];
5651 g_assert (t->byref);
5652 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5653 if (t->type == MONO_TYPE_I1)
5654 opcode = OP_ATOMIC_STORE_I1;
5655 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5656 opcode = OP_ATOMIC_STORE_U1;
5657 else if (t->type == MONO_TYPE_I2)
5658 opcode = OP_ATOMIC_STORE_I2;
5659 else if (t->type == MONO_TYPE_U2)
5660 opcode = OP_ATOMIC_STORE_U2;
5661 else if (t->type == MONO_TYPE_I4)
5662 opcode = OP_ATOMIC_STORE_I4;
5663 else if (t->type == MONO_TYPE_U4)
5664 opcode = OP_ATOMIC_STORE_U4;
5665 else if (t->type == MONO_TYPE_R4)
5666 opcode = OP_ATOMIC_STORE_R4;
5667 else if (t->type == MONO_TYPE_R8)
5668 opcode = OP_ATOMIC_STORE_R8;
5669 #if SIZEOF_REGISTER == 8
5670 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5671 opcode = OP_ATOMIC_STORE_I8;
5672 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5673 opcode = OP_ATOMIC_STORE_U8;
5675 else if (t->type == MONO_TYPE_I)
5676 opcode = OP_ATOMIC_STORE_I4;
5677 else if (is_ref || t->type == MONO_TYPE_U)
5678 opcode = OP_ATOMIC_STORE_U4;
5682 if (!mono_arch_opcode_supported (opcode))
5685 MONO_INST_NEW (cfg, ins, opcode);
5686 ins->dreg = args [0]->dreg;
5687 ins->sreg1 = args [1]->dreg;
5688 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5689 MONO_ADD_INS (cfg->cbb, ins);
5691 if (cfg->gen_write_barriers && is_ref)
5692 mini_emit_write_barrier (cfg, args [0], args [1]);
5698 } else if (cmethod->klass->image == mono_defaults.corlib &&
5699 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5700 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5701 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5702 if (mini_should_insert_breakpoint (cfg->method)) {
5703 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5705 MONO_INST_NEW (cfg, ins, OP_NOP);
5706 MONO_ADD_INS (cfg->cbb, ins);
5710 } else if (cmethod->klass->image == mono_defaults.corlib &&
5711 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5712 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5713 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5715 EMIT_NEW_ICONST (cfg, ins, 1);
5717 EMIT_NEW_ICONST (cfg, ins, 0);
5720 } else if (cmethod->klass->image == mono_defaults.corlib &&
5721 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5722 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5723 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5724 /* No stack walks are currently available, so implement this as an intrinsic */
5725 MonoInst *assembly_ins;
5727 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5728 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5731 } else if (cmethod->klass->image == mono_defaults.corlib &&
5732 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5733 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5734 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5735 /* No stack walks are currently available, so implement this as an intrinsic */
5736 MonoInst *method_ins;
5737 MonoMethod *declaring = cfg->method;
5739 /* This returns the declaring generic method */
5740 if (declaring->is_inflated)
5741 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5742 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5743 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5744 cfg->no_inline = TRUE;
5745 if (cfg->method != cfg->current_method)
5746 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5749 } else if (cmethod->klass == mono_defaults.math_class) {
5751 * There is general branchless code for Min/Max, but it does not work for
5753 * http://everything2.com/?node_id=1051618
5755 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5756 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5757 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5758 ins->dreg = alloc_preg (cfg);
5759 ins->type = STACK_I4;
5760 MONO_ADD_INS (cfg->cbb, ins);
5762 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5763 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5764 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5765 !strcmp (cmethod->klass->name, "Selector")) ||
5766 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5767 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5768 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5769 !strcmp (cmethod->klass->name, "Selector"))
5771 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5772 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5773 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5776 MonoJumpInfoToken *ji;
5779 if (args [0]->opcode == OP_GOT_ENTRY) {
5780 pi = (MonoInst *)args [0]->inst_p1;
5781 g_assert (pi->opcode == OP_PATCH_INFO);
5782 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5783 ji = (MonoJumpInfoToken *)pi->inst_p0;
5785 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5786 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
5789 NULLIFY_INS (args [0]);
5791 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
5792 return_val_if_nok (&cfg->error, NULL);
5794 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5795 ins->dreg = mono_alloc_ireg (cfg);
5798 MONO_ADD_INS (cfg->cbb, ins);
5801 } else if (cmethod->klass->image == mono_defaults.corlib &&
5802 (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
5803 (strcmp (cmethod->klass->name, "Marshal") == 0)) {
5804 //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
5805 if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
5806 cmethod->is_inflated &&
5807 fsig->param_count == 1 &&
5808 !mini_method_check_context_used (cfg, cmethod)) {
5810 MonoGenericContext *method_context = mono_method_get_context (cmethod);
5811 MonoType *arg0 = method_context->method_inst->type_argv [0];
5812 if (mono_type_is_native_blittable (arg0))
5813 return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
5817 #ifdef MONO_ARCH_SIMD_INTRINSICS
5818 if (cfg->opt & MONO_OPT_SIMD) {
5819 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5825 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5829 if (COMPILE_LLVM (cfg)) {
5830 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5835 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5839 * This entry point could be used later for arbitrary method
5842 inline static MonoInst*
5843 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5844 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
5846 if (method->klass == mono_defaults.string_class) {
5847 /* managed string allocation support */
5848 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
5849 MonoInst *iargs [2];
5850 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5851 MonoMethod *managed_alloc = NULL;
5853 g_assert (vtable); /*Should not fail since it System.String*/
5854 #ifndef MONO_CROSS_COMPILE
5855 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5859 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5860 iargs [1] = args [0];
5861 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
5868 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5870 MonoInst *store, *temp;
5873 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5874 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5877 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5878 * would be different than the MonoInst's used to represent arguments, and
5879 * the ldelema implementation can't deal with that.
5880 * Solution: When ldelema is used on an inline argument, create a var for
5881 * it, emit ldelema on that var, and emit the saving code below in
5882 * inline_method () if needed.
5884 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5885 cfg->args [i] = temp;
5886 /* This uses cfg->args [i] which is set by the preceeding line */
5887 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5888 store->cil_code = sp [0]->cil_code;
5893 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5894 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5896 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5898 check_inline_called_method_name_limit (MonoMethod *called_method)
5901 static const char *limit = NULL;
5903 if (limit == NULL) {
5904 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5906 if (limit_string != NULL)
5907 limit = limit_string;
5912 if (limit [0] != '\0') {
5913 char *called_method_name = mono_method_full_name (called_method, TRUE);
5915 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5916 g_free (called_method_name);
5918 //return (strncmp_result <= 0);
5919 return (strncmp_result == 0);
5926 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5928 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5931 static const char *limit = NULL;
5933 if (limit == NULL) {
5934 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5935 if (limit_string != NULL) {
5936 limit = limit_string;
5942 if (limit [0] != '\0') {
5943 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5945 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5946 g_free (caller_method_name);
5948 //return (strncmp_result <= 0);
5949 return (strncmp_result == 0);
5957 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5959 static double r8_0 = 0.0;
5960 static float r4_0 = 0.0;
5964 rtype = mini_get_underlying_type (rtype);
5968 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5969 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5970 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5971 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5972 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5973 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
5974 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5975 ins->type = STACK_R4;
5976 ins->inst_p0 = (void*)&r4_0;
5978 MONO_ADD_INS (cfg->cbb, ins);
5979 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5980 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5981 ins->type = STACK_R8;
5982 ins->inst_p0 = (void*)&r8_0;
5984 MONO_ADD_INS (cfg->cbb, ins);
5985 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5986 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5987 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5988 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5989 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5991 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5996 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6000 rtype = mini_get_underlying_type (rtype);
6004 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6005 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6006 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6007 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6008 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6009 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6010 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6011 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6012 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6013 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6014 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6016 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6017 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6019 emit_init_rvar (cfg, dreg, rtype);
6023 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6025 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6027 MonoInst *var = cfg->locals [local];
6028 if (COMPILE_SOFT_FLOAT (cfg)) {
6030 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6031 emit_init_rvar (cfg, reg, type);
6032 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6035 emit_init_rvar (cfg, var->dreg, type);
6037 emit_dummy_init_rvar (cfg, var->dreg, type);
6042 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6044 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6050 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6053 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6054 guchar *ip, guint real_offset, gboolean inline_always)
6057 MonoInst *ins, *rvar = NULL;
6058 MonoMethodHeader *cheader;
6059 MonoBasicBlock *ebblock, *sbblock;
6061 MonoMethod *prev_inlined_method;
6062 MonoInst **prev_locals, **prev_args;
6063 MonoType **prev_arg_types;
6064 guint prev_real_offset;
6065 GHashTable *prev_cbb_hash;
6066 MonoBasicBlock **prev_cil_offset_to_bb;
6067 MonoBasicBlock *prev_cbb;
6068 const unsigned char *prev_ip;
6069 unsigned char *prev_cil_start;
6070 guint32 prev_cil_offset_to_bb_len;
6071 MonoMethod *prev_current_method;
6072 MonoGenericContext *prev_generic_context;
6073 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6075 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6077 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6078 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6081 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6082 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6087 fsig = mono_method_signature (cmethod);
6089 if (cfg->verbose_level > 2)
6090 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6092 if (!cmethod->inline_info) {
6093 cfg->stat_inlineable_methods++;
6094 cmethod->inline_info = 1;
6097 /* allocate local variables */
6098 cheader = mono_method_get_header_checked (cmethod, &error);
6100 if (inline_always) {
6101 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6102 mono_error_move (&cfg->error, &error);
6104 mono_error_cleanup (&error);
6109 /*Must verify before creating locals as it can cause the JIT to assert.*/
6110 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6111 mono_metadata_free_mh (cheader);
6115 /* allocate space to store the return value */
6116 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6117 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6120 prev_locals = cfg->locals;
6121 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6122 for (i = 0; i < cheader->num_locals; ++i)
6123 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6125 /* allocate start and end blocks */
6126 /* This is needed so if the inline is aborted, we can clean up */
6127 NEW_BBLOCK (cfg, sbblock);
6128 sbblock->real_offset = real_offset;
6130 NEW_BBLOCK (cfg, ebblock);
6131 ebblock->block_num = cfg->num_bblocks++;
6132 ebblock->real_offset = real_offset;
6134 prev_args = cfg->args;
6135 prev_arg_types = cfg->arg_types;
6136 prev_inlined_method = cfg->inlined_method;
6137 cfg->inlined_method = cmethod;
6138 cfg->ret_var_set = FALSE;
6139 cfg->inline_depth ++;
6140 prev_real_offset = cfg->real_offset;
6141 prev_cbb_hash = cfg->cbb_hash;
6142 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6143 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6144 prev_cil_start = cfg->cil_start;
6146 prev_cbb = cfg->cbb;
6147 prev_current_method = cfg->current_method;
6148 prev_generic_context = cfg->generic_context;
6149 prev_ret_var_set = cfg->ret_var_set;
6150 prev_disable_inline = cfg->disable_inline;
6152 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6155 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6157 ret_var_set = cfg->ret_var_set;
6159 cfg->inlined_method = prev_inlined_method;
6160 cfg->real_offset = prev_real_offset;
6161 cfg->cbb_hash = prev_cbb_hash;
6162 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6163 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6164 cfg->cil_start = prev_cil_start;
6166 cfg->locals = prev_locals;
6167 cfg->args = prev_args;
6168 cfg->arg_types = prev_arg_types;
6169 cfg->current_method = prev_current_method;
6170 cfg->generic_context = prev_generic_context;
6171 cfg->ret_var_set = prev_ret_var_set;
6172 cfg->disable_inline = prev_disable_inline;
6173 cfg->inline_depth --;
6175 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6176 if (cfg->verbose_level > 2)
6177 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6179 cfg->stat_inlined_methods++;
6181 /* always add some code to avoid block split failures */
6182 MONO_INST_NEW (cfg, ins, OP_NOP);
6183 MONO_ADD_INS (prev_cbb, ins);
6185 prev_cbb->next_bb = sbblock;
6186 link_bblock (cfg, prev_cbb, sbblock);
6189 * Get rid of the begin and end bblocks if possible to aid local
6192 if (prev_cbb->out_count == 1)
6193 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6195 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6196 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6198 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6199 MonoBasicBlock *prev = ebblock->in_bb [0];
6201 if (prev->next_bb == ebblock) {
6202 mono_merge_basic_blocks (cfg, prev, ebblock);
6204 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6205 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6206 cfg->cbb = prev_cbb;
6209 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6214 * Its possible that the rvar is set in some prev bblock, but not in others.
6220 for (i = 0; i < ebblock->in_count; ++i) {
6221 bb = ebblock->in_bb [i];
6223 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6226 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6236 * If the inlined method contains only a throw, then the ret var is not
6237 * set, so set it to a dummy value.
6240 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6242 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6245 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6248 if (cfg->verbose_level > 2)
6249 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6250 cfg->exception_type = MONO_EXCEPTION_NONE;
6252 /* This gets rid of the newly added bblocks */
6253 cfg->cbb = prev_cbb;
6255 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6260 * Some of these comments may well be out-of-date.
6261 * Design decisions: we do a single pass over the IL code (and we do bblock
6262 * splitting/merging in the few cases when it's required: a back jump to an IL
6263 * address that was not already seen as bblock starting point).
6264 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6265 * Complex operations are decomposed in simpler ones right away. We need to let the
6266 * arch-specific code peek and poke inside this process somehow (except when the
6267 * optimizations can take advantage of the full semantic info of coarse opcodes).
6268 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6269 * MonoInst->opcode initially is the IL opcode or some simplification of that
6270 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6271 * opcode with value bigger than OP_LAST.
6272 * At this point the IR can be handed over to an interpreter, a dumb code generator
6273 * or to the optimizing code generator that will translate it to SSA form.
6275 * Profiling directed optimizations.
6276 * We may compile by default with few or no optimizations and instrument the code
6277 * or the user may indicate what methods to optimize the most either in a config file
6278 * or through repeated runs where the compiler applies offline the optimizations to
6279 * each method and then decides if it was worth it.
6282 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6283 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6284 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6285 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6286 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6287 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6288 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6289 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6291 /* offset from br.s -> br like opcodes */
6292 #define BIG_BRANCH_OFFSET 13
6295 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6297 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6299 return b == NULL || b == bb;
6303 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6305 unsigned char *ip = start;
6306 unsigned char *target;
6309 MonoBasicBlock *bblock;
6310 const MonoOpcode *opcode;
6313 cli_addr = ip - start;
6314 i = mono_opcode_value ((const guint8 **)&ip, end);
6317 opcode = &mono_opcodes [i];
6318 switch (opcode->argument) {
6319 case MonoInlineNone:
6322 case MonoInlineString:
6323 case MonoInlineType:
6324 case MonoInlineField:
6325 case MonoInlineMethod:
6328 case MonoShortInlineR:
6335 case MonoShortInlineVar:
6336 case MonoShortInlineI:
6339 case MonoShortInlineBrTarget:
6340 target = start + cli_addr + 2 + (signed char)ip [1];
6341 GET_BBLOCK (cfg, bblock, target);
6344 GET_BBLOCK (cfg, bblock, ip);
6346 case MonoInlineBrTarget:
6347 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6348 GET_BBLOCK (cfg, bblock, target);
6351 GET_BBLOCK (cfg, bblock, ip);
6353 case MonoInlineSwitch: {
6354 guint32 n = read32 (ip + 1);
6357 cli_addr += 5 + 4 * n;
6358 target = start + cli_addr;
6359 GET_BBLOCK (cfg, bblock, target);
6361 for (j = 0; j < n; ++j) {
6362 target = start + cli_addr + (gint32)read32 (ip);
6363 GET_BBLOCK (cfg, bblock, target);
6373 g_assert_not_reached ();
6376 if (i == CEE_THROW) {
6377 unsigned char *bb_start = ip - 1;
6379 /* Find the start of the bblock containing the throw */
6381 while ((bb_start >= start) && !bblock) {
6382 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6386 bblock->out_of_line = 1;
6396 static inline MonoMethod *
6397 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6403 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6404 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6406 method = mono_class_inflate_generic_method_checked (method, context, error);
6409 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6415 static inline MonoMethod *
6416 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6419 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6421 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6422 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6426 if (!method && !cfg)
6427 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6432 static inline MonoMethodSignature*
6433 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6435 MonoMethodSignature *fsig;
6438 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6439 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6441 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6442 return_val_if_nok (error, NULL);
6445 fsig = mono_inflate_generic_signature(fsig, context, error);
6451 throw_exception (void)
6453 static MonoMethod *method = NULL;
6456 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6457 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6464 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6466 MonoMethod *thrower = throw_exception ();
6469 EMIT_NEW_PCONST (cfg, args [0], ex);
6470 mono_emit_method_call (cfg, thrower, args, NULL);
6474 * Return the original method is a wrapper is specified. We can only access
6475 * the custom attributes from the original method.
6478 get_original_method (MonoMethod *method)
6480 if (method->wrapper_type == MONO_WRAPPER_NONE)
6483 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6484 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6487 /* in other cases we need to find the original method */
6488 return mono_marshal_method_from_wrapper (method);
6492 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6494 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6495 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6497 emit_throw_exception (cfg, ex);
6501 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6503 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6504 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6506 emit_throw_exception (cfg, ex);
6510 * Check that the IL instructions at ip are the array initialization
6511 * sequence and return the pointer to the data and the size.
6514 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6517 * newarr[System.Int32]
6519 * ldtoken field valuetype ...
6520 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6522 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6524 guint32 token = read32 (ip + 7);
6525 guint32 field_token = read32 (ip + 2);
6526 guint32 field_index = field_token & 0xffffff;
6528 const char *data_ptr;
6530 MonoMethod *cmethod;
6531 MonoClass *dummy_class;
6532 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6536 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6540 *out_field_token = field_token;
6542 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6545 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6547 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6551 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6552 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6569 if (size > mono_type_size (field->type, &dummy_align))
6572 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6573 if (!image_is_dynamic (method->klass->image)) {
6574 field_index = read32 (ip + 2) & 0xffffff;
6575 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6576 data_ptr = mono_image_rva_map (method->klass->image, rva);
6577 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6578 /* for aot code we do the lookup on load */
6579 if (aot && data_ptr)
6580 return (const char *)GUINT_TO_POINTER (rva);
6582 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6584 data_ptr = mono_field_get_data (field);
6592 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6595 char *method_fname = mono_method_full_name (method, TRUE);
6597 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6600 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6601 mono_error_cleanup (&error);
6602 } else if (header->code_size == 0)
6603 method_code = g_strdup ("method body is empty.");
6605 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6606 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6607 g_free (method_fname);
6608 g_free (method_code);
6609 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6613 mono_type_to_stloc_coerce (MonoType *type)
6618 type = mini_get_underlying_type (type);
6620 switch (type->type) {
6622 return OP_ICONV_TO_I1;
6624 return OP_ICONV_TO_U1;
6626 return OP_ICONV_TO_I2;
6628 return OP_ICONV_TO_U2;
6634 case MONO_TYPE_FNPTR:
6635 case MONO_TYPE_CLASS:
6636 case MONO_TYPE_STRING:
6637 case MONO_TYPE_OBJECT:
6638 case MONO_TYPE_SZARRAY:
6639 case MONO_TYPE_ARRAY:
6644 case MONO_TYPE_TYPEDBYREF:
6645 case MONO_TYPE_GENERICINST:
6647 case MONO_TYPE_VALUETYPE:
6648 if (type->data.klass->enumtype) {
6649 type = mono_class_enum_basetype (type->data.klass);
6654 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
6657 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
6663 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6666 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
6669 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6670 if (cfg->verbose_level > 2)
6671 printf ("Found existing coercing is enough for stloc\n");
6673 MONO_INST_NEW (cfg, ins, coerce_op);
6674 ins->dreg = alloc_ireg (cfg);
6675 ins->sreg1 = sp [0]->dreg;
6676 ins->type = STACK_I4;
6677 ins->klass = mono_class_from_mono_type (header->locals [n]);
6678 MONO_ADD_INS (cfg->cbb, ins);
6679 *sp = mono_decompose_opcode (cfg, ins);
6684 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6685 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6686 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6687 /* Optimize reg-reg moves away */
6689 * Can't optimize other opcodes, since sp[0] might point to
6690 * the last ins of a decomposed opcode.
6692 sp [0]->dreg = (cfg)->locals [n]->dreg;
6694 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6699 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
6702 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
6705 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
6706 if (cfg->verbose_level > 2)
6707 printf ("Found existing coercing is enough for starg\n");
6709 MONO_INST_NEW (cfg, ins, coerce_op);
6710 ins->dreg = alloc_ireg (cfg);
6711 ins->sreg1 = sp [0]->dreg;
6712 ins->type = STACK_I4;
6713 ins->klass = mono_class_from_mono_type (cfg->arg_types [n]);
6714 MONO_ADD_INS (cfg->cbb, ins);
6715 *sp = mono_decompose_opcode (cfg, ins);
6719 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6723 * ldloca inhibits many optimizations so try to get rid of it in common
6726 static inline unsigned char *
6727 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6737 local = read16 (ip + 2);
6741 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6742 /* From the INITOBJ case */
6743 token = read32 (ip + 2);
6744 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6745 CHECK_TYPELOAD (klass);
6746 type = mini_get_underlying_type (&klass->byval_arg);
6747 emit_init_local (cfg, local, type, TRUE);
6755 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6757 MonoInst *icall_args [16];
6758 MonoInst *call_target, *ins, *vtable_ins;
6759 int arg_reg, this_reg, vtable_reg;
6760 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6761 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6762 gboolean variant_iface = FALSE;
6765 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6768 * In llvm-only mode, vtables contain function descriptors instead of
6769 * method addresses/trampolines.
6771 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6774 slot = mono_method_get_imt_slot (cmethod);
6776 slot = mono_method_get_vtable_index (cmethod);
6778 this_reg = sp [0]->dreg;
6780 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6781 variant_iface = TRUE;
6783 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6785 * The simplest case, a normal virtual call.
6787 int slot_reg = alloc_preg (cfg);
6788 int addr_reg = alloc_preg (cfg);
6789 int arg_reg = alloc_preg (cfg);
6790 MonoBasicBlock *non_null_bb;
6792 vtable_reg = alloc_preg (cfg);
6793 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6794 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6796 /* Load the vtable slot, which contains a function descriptor. */
6797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6799 NEW_BBLOCK (cfg, non_null_bb);
6801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6802 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6806 // FIXME: Make the wrapper use the preserveall cconv
6807 // FIXME: Use one icall per slot for small slot numbers ?
6808 icall_args [0] = vtable_ins;
6809 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6810 /* Make the icall return the vtable slot value to save some code space */
6811 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6812 ins->dreg = slot_reg;
6813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6816 MONO_START_BB (cfg, non_null_bb);
6817 /* Load the address + arg from the vtable slot */
6818 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6821 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6824 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6826 * A simple interface call
6828 * We make a call through an imt slot to obtain the function descriptor we need to call.
6829 * The imt slot contains a function descriptor for a runtime function + arg.
6831 int slot_reg = alloc_preg (cfg);
6832 int addr_reg = alloc_preg (cfg);
6833 int arg_reg = alloc_preg (cfg);
6834 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6836 vtable_reg = alloc_preg (cfg);
6837 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6838 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6841 * The slot is already initialized when the vtable is created so there is no need
6845 /* Load the imt slot, which contains a function descriptor. */
6846 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6848 /* Load the address + arg of the imt thunk from the imt slot */
6849 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6850 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6852 * IMT thunks in llvm-only mode are C functions which take an info argument
6853 * plus the imt method and return the ftndesc to call.
6855 icall_args [0] = thunk_arg_ins;
6856 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6857 cmethod, MONO_RGCTX_INFO_METHOD);
6858 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6860 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6863 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6865 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6866 * dynamically extended as more instantiations are discovered.
6867 * This handles generic virtual methods both on classes and interfaces.
6869 int slot_reg = alloc_preg (cfg);
6870 int addr_reg = alloc_preg (cfg);
6871 int arg_reg = alloc_preg (cfg);
6872 int ftndesc_reg = alloc_preg (cfg);
6873 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6874 MonoBasicBlock *slowpath_bb, *end_bb;
6876 NEW_BBLOCK (cfg, slowpath_bb);
6877 NEW_BBLOCK (cfg, end_bb);
6879 vtable_reg = alloc_preg (cfg);
6880 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6882 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6884 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6886 /* Load the slot, which contains a function descriptor. */
6887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6889 /* These slots are not initialized, so fall back to the slow path until they are initialized */
6890 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
6891 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6892 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6895 /* Same as with iface calls */
6896 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6897 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6898 icall_args [0] = thunk_arg_ins;
6899 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6900 cmethod, MONO_RGCTX_INFO_METHOD);
6901 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6902 ftndesc_ins->dreg = ftndesc_reg;
6904 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
6905 * they don't know about yet. Fall back to the slowpath in that case.
6907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
6908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6913 MONO_START_BB (cfg, slowpath_bb);
6914 icall_args [0] = vtable_ins;
6915 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6916 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6917 cmethod, MONO_RGCTX_INFO_METHOD);
6919 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
6921 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
6922 ftndesc_ins->dreg = ftndesc_reg;
6923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6926 MONO_START_BB (cfg, end_bb);
6927 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6931 * Non-optimized cases
6933 icall_args [0] = sp [0];
6934 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6936 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6937 cmethod, MONO_RGCTX_INFO_METHOD);
6939 arg_reg = alloc_preg (cfg);
6940 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
6941 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
6943 g_assert (is_gsharedvt);
6945 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
6947 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
6950 * Pass the extra argument even if the callee doesn't receive it, most
6951 * calling conventions allow this.
6953 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6957 is_exception_class (MonoClass *klass)
6960 if (klass == mono_defaults.exception_class)
6962 klass = klass->parent;
6968 * is_jit_optimizer_disabled:
6970 * Determine whenever M's assembly has a DebuggableAttribute with the
6971 * IsJITOptimizerDisabled flag set.
6974 is_jit_optimizer_disabled (MonoMethod *m)
6977 MonoAssembly *ass = m->klass->image->assembly;
6978 MonoCustomAttrInfo* attrs;
6981 gboolean val = FALSE;
6984 if (ass->jit_optimizer_disabled_inited)
6985 return ass->jit_optimizer_disabled;
6987 klass = mono_class_try_get_debuggable_attribute_class ();
6991 ass->jit_optimizer_disabled = FALSE;
6992 mono_memory_barrier ();
6993 ass->jit_optimizer_disabled_inited = TRUE;
6997 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
6998 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7000 for (i = 0; i < attrs->num_attrs; ++i) {
7001 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7003 MonoMethodSignature *sig;
7005 if (!attr->ctor || attr->ctor->klass != klass)
7007 /* Decode the attribute. See reflection.c */
7008 p = (const char*)attr->data;
7009 g_assert (read16 (p) == 0x0001);
7012 // FIXME: Support named parameters
7013 sig = mono_method_signature (attr->ctor);
7014 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7016 /* Two boolean arguments */
7020 mono_custom_attrs_free (attrs);
7023 ass->jit_optimizer_disabled = val;
7024 mono_memory_barrier ();
7025 ass->jit_optimizer_disabled_inited = TRUE;
7031 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7033 gboolean supported_tail_call;
7036 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7038 for (i = 0; i < fsig->param_count; ++i) {
7039 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7040 /* These can point to the current method's stack */
7041 supported_tail_call = FALSE;
7043 if (fsig->hasthis && cmethod->klass->valuetype)
7044 /* this might point to the current method's stack */
7045 supported_tail_call = FALSE;
7046 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7047 supported_tail_call = FALSE;
7048 if (cfg->method->save_lmf)
7049 supported_tail_call = FALSE;
7050 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7051 supported_tail_call = FALSE;
7052 if (call_opcode != CEE_CALL)
7053 supported_tail_call = FALSE;
7055 /* Debugging support */
7057 if (supported_tail_call) {
7058 if (!mono_debug_count ())
7059 supported_tail_call = FALSE;
7063 return supported_tail_call;
7069 * Handle calls made to ctors from NEWOBJ opcodes.
7072 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7073 MonoInst **sp, guint8 *ip, int *inline_costs)
7075 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7077 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7078 mono_method_is_generic_sharable (cmethod, TRUE)) {
7079 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7080 mono_class_vtable (cfg->domain, cmethod->klass);
7081 CHECK_TYPELOAD (cmethod->klass);
7083 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7084 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7087 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7088 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7090 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7092 CHECK_TYPELOAD (cmethod->klass);
7093 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7098 /* Avoid virtual calls to ctors if possible */
7099 if (mono_class_is_marshalbyref (cmethod->klass))
7100 callvirt_this_arg = sp [0];
7102 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7103 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7104 CHECK_CFG_EXCEPTION;
7105 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7106 mono_method_check_inlining (cfg, cmethod) &&
7107 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7110 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7111 cfg->real_offset += 5;
7113 *inline_costs += costs - 5;
7115 INLINE_FAILURE ("inline failure");
7116 // FIXME-VT: Clean this up
7117 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7118 GSHAREDVT_FAILURE(*ip);
7119 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7121 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7124 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7126 if (cfg->llvm_only) {
7127 // FIXME: Avoid initializing vtable_arg
7128 emit_llvmonly_calli (cfg, fsig, sp, addr);
7130 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7132 } else if (context_used &&
7133 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7134 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7135 MonoInst *cmethod_addr;
7137 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7139 if (cfg->llvm_only) {
7140 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7141 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7142 emit_llvmonly_calli (cfg, fsig, sp, addr);
7144 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7145 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7147 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7150 INLINE_FAILURE ("ctor call");
7151 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7152 callvirt_this_arg, NULL, vtable_arg);
7159 emit_setret (MonoCompile *cfg, MonoInst *val)
7161 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7164 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7167 if (!cfg->vret_addr) {
7168 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7170 EMIT_NEW_RETLOADA (cfg, ret_addr);
7172 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7173 ins->klass = mono_class_from_mono_type (ret_type);
7176 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7177 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7178 MonoInst *iargs [1];
7182 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7183 mono_arch_emit_setret (cfg, cfg->method, conv);
7185 mono_arch_emit_setret (cfg, cfg->method, val);
7188 mono_arch_emit_setret (cfg, cfg->method, val);
7194 * mono_method_to_ir:
7196 * Translate the .net IL into linear IR.
7198 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7199 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7200 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7201 * @inline_args: if not NULL, contains the arguments to the inline call
7202 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7203 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7205 * This method is used to turn ECMA IL into Mono's internal Linear IR
7206 * reprensetation. It is used both for entire methods, as well as
7207 * inlining existing methods. In the former case, the @start_bblock,
7208 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7209 * inline_offset is set to zero.
7211 * Returns: the inline cost, or -1 if there was an error processing this method.
7214 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7215 MonoInst *return_var, MonoInst **inline_args,
7216 guint inline_offset, gboolean is_virtual_call)
7219 MonoInst *ins, **sp, **stack_start;
7220 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7221 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7222 MonoMethod *cmethod, *method_definition;
7223 MonoInst **arg_array;
7224 MonoMethodHeader *header;
7226 guint32 token, ins_flag;
7228 MonoClass *constrained_class = NULL;
7229 unsigned char *ip, *end, *target, *err_pos;
7230 MonoMethodSignature *sig;
7231 MonoGenericContext *generic_context = NULL;
7232 MonoGenericContainer *generic_container = NULL;
7233 MonoType **param_types;
7234 int i, n, start_new_bblock, dreg;
7235 int num_calls = 0, inline_costs = 0;
7236 int breakpoint_id = 0;
7238 GSList *class_inits = NULL;
7239 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7241 gboolean init_locals, seq_points, skip_dead_blocks;
7242 gboolean sym_seq_points = FALSE;
7243 MonoDebugMethodInfo *minfo;
7244 MonoBitSet *seq_point_locs = NULL;
7245 MonoBitSet *seq_point_set_locs = NULL;
7247 cfg->disable_inline = is_jit_optimizer_disabled (method);
7249 /* serialization and xdomain stuff may need access to private fields and methods */
7250 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7251 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7252 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7253 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7254 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7255 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7257 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7258 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7259 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7260 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7261 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7263 image = method->klass->image;
7264 header = mono_method_get_header_checked (method, &cfg->error);
7266 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7267 goto exception_exit;
7269 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7272 generic_container = mono_method_get_generic_container (method);
7273 sig = mono_method_signature (method);
7274 num_args = sig->hasthis + sig->param_count;
7275 ip = (unsigned char*)header->code;
7276 cfg->cil_start = ip;
7277 end = ip + header->code_size;
7278 cfg->stat_cil_code_size += header->code_size;
7280 seq_points = cfg->gen_seq_points && cfg->method == method;
7282 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7283 /* We could hit a seq point before attaching to the JIT (#8338) */
7287 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7288 minfo = mono_debug_lookup_method (method);
7290 MonoSymSeqPoint *sps;
7291 int i, n_il_offsets;
7293 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7294 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7295 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7296 sym_seq_points = TRUE;
7297 for (i = 0; i < n_il_offsets; ++i) {
7298 if (sps [i].il_offset < header->code_size)
7299 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7303 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7305 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7307 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7308 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7310 mono_debug_free_method_async_debug_info (asyncMethod);
7312 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7313 /* Methods without line number info like auto-generated property accessors */
7314 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7315 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7316 sym_seq_points = TRUE;
7321 * Methods without init_locals set could cause asserts in various passes
7322 * (#497220). To work around this, we emit dummy initialization opcodes
7323 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7324 * on some platforms.
7326 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7327 init_locals = header->init_locals;
7331 method_definition = method;
7332 while (method_definition->is_inflated) {
7333 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7334 method_definition = imethod->declaring;
7337 /* SkipVerification is not allowed if core-clr is enabled */
7338 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7340 dont_verify_stloc = TRUE;
7343 if (sig->is_inflated)
7344 generic_context = mono_method_get_context (method);
7345 else if (generic_container)
7346 generic_context = &generic_container->context;
7347 cfg->generic_context = generic_context;
7350 g_assert (!sig->has_type_parameters);
7352 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7353 g_assert (method->is_inflated);
7354 g_assert (mono_method_get_context (method)->method_inst);
7356 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7357 g_assert (sig->generic_param_count);
7359 if (cfg->method == method) {
7360 cfg->real_offset = 0;
7362 cfg->real_offset = inline_offset;
7365 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7366 cfg->cil_offset_to_bb_len = header->code_size;
7368 cfg->current_method = method;
7370 if (cfg->verbose_level > 2)
7371 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7373 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7375 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7376 for (n = 0; n < sig->param_count; ++n)
7377 param_types [n + sig->hasthis] = sig->params [n];
7378 cfg->arg_types = param_types;
7380 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7381 if (cfg->method == method) {
7383 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7386 NEW_BBLOCK (cfg, start_bblock);
7387 cfg->bb_entry = start_bblock;
7388 start_bblock->cil_code = NULL;
7389 start_bblock->cil_length = 0;
7392 NEW_BBLOCK (cfg, end_bblock);
7393 cfg->bb_exit = end_bblock;
7394 end_bblock->cil_code = NULL;
7395 end_bblock->cil_length = 0;
7396 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7397 g_assert (cfg->num_bblocks == 2);
7399 arg_array = cfg->args;
7401 if (header->num_clauses) {
7402 cfg->spvars = g_hash_table_new (NULL, NULL);
7403 cfg->exvars = g_hash_table_new (NULL, NULL);
7405 /* handle exception clauses */
7406 for (i = 0; i < header->num_clauses; ++i) {
7407 MonoBasicBlock *try_bb;
7408 MonoExceptionClause *clause = &header->clauses [i];
7409 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7411 try_bb->real_offset = clause->try_offset;
7412 try_bb->try_start = TRUE;
7413 try_bb->region = ((i + 1) << 8) | clause->flags;
7414 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7415 tblock->real_offset = clause->handler_offset;
7416 tblock->flags |= BB_EXCEPTION_HANDLER;
7418 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
7419 mono_create_exvar_for_offset (cfg, clause->handler_offset);
7421 * Linking the try block with the EH block hinders inlining as we won't be able to
7422 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7424 if (COMPILE_LLVM (cfg))
7425 link_bblock (cfg, try_bb, tblock);
7427 if (*(ip + clause->handler_offset) == CEE_POP)
7428 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7430 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7431 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7432 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7433 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7434 MONO_ADD_INS (tblock, ins);
7436 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7437 /* finally clauses already have a seq point */
7438 /* seq points for filter clauses are emitted below */
7439 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7440 MONO_ADD_INS (tblock, ins);
7443 /* todo: is a fault block unsafe to optimize? */
7444 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7445 tblock->flags |= BB_EXCEPTION_UNSAFE;
7448 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7450 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7452 /* catch and filter blocks get the exception object on the stack */
7453 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7454 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7456 /* mostly like handle_stack_args (), but just sets the input args */
7457 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7458 tblock->in_scount = 1;
7459 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7460 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7464 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7465 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7466 if (!cfg->compile_llvm) {
7467 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7468 ins->dreg = tblock->in_stack [0]->dreg;
7469 MONO_ADD_INS (tblock, ins);
7472 MonoInst *dummy_use;
7475 * Add a dummy use for the exvar so its liveness info will be
7478 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7481 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7482 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7483 MONO_ADD_INS (tblock, ins);
7486 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7487 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7488 tblock->flags |= BB_EXCEPTION_HANDLER;
7489 tblock->real_offset = clause->data.filter_offset;
7490 tblock->in_scount = 1;
7491 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7492 /* The filter block shares the exvar with the handler block */
7493 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7494 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7495 MONO_ADD_INS (tblock, ins);
7499 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7500 clause->data.catch_class &&
7502 mono_class_check_context_used (clause->data.catch_class)) {
7504 * In shared generic code with catch
7505 * clauses containing type variables
7506 * the exception handling code has to
7507 * be able to get to the rgctx.
7508 * Therefore we have to make sure that
7509 * the vtable/mrgctx argument (for
7510 * static or generic methods) or the
7511 * "this" argument (for non-static
7512 * methods) are live.
7514 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7515 mini_method_get_context (method)->method_inst ||
7516 method->klass->valuetype) {
7517 mono_get_vtable_var (cfg);
7519 MonoInst *dummy_use;
7521 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7526 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7527 cfg->cbb = start_bblock;
7528 cfg->args = arg_array;
7529 mono_save_args (cfg, sig, inline_args);
7532 /* FIRST CODE BLOCK */
7533 NEW_BBLOCK (cfg, tblock);
7534 tblock->cil_code = ip;
7538 ADD_BBLOCK (cfg, tblock);
7540 if (cfg->method == method) {
7541 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7542 if (breakpoint_id) {
7543 MONO_INST_NEW (cfg, ins, OP_BREAK);
7544 MONO_ADD_INS (cfg->cbb, ins);
7548 /* we use a separate basic block for the initialization code */
7549 NEW_BBLOCK (cfg, init_localsbb);
7550 if (cfg->method == method)
7551 cfg->bb_init = init_localsbb;
7552 init_localsbb->real_offset = cfg->real_offset;
7553 start_bblock->next_bb = init_localsbb;
7554 init_localsbb->next_bb = cfg->cbb;
7555 link_bblock (cfg, start_bblock, init_localsbb);
7556 link_bblock (cfg, init_localsbb, cfg->cbb);
7558 cfg->cbb = init_localsbb;
7560 if (cfg->gsharedvt && cfg->method == method) {
7561 MonoGSharedVtMethodInfo *info;
7562 MonoInst *var, *locals_var;
7565 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7566 info->method = cfg->method;
7567 info->count_entries = 16;
7568 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7569 cfg->gsharedvt_info = info;
7571 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7572 /* prevent it from being register allocated */
7573 //var->flags |= MONO_INST_VOLATILE;
7574 cfg->gsharedvt_info_var = var;
7576 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7577 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7579 /* Allocate locals */
7580 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7581 /* prevent it from being register allocated */
7582 //locals_var->flags |= MONO_INST_VOLATILE;
7583 cfg->gsharedvt_locals_var = locals_var;
7585 dreg = alloc_ireg (cfg);
7586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7588 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7589 ins->dreg = locals_var->dreg;
7591 MONO_ADD_INS (cfg->cbb, ins);
7592 cfg->gsharedvt_locals_var_ins = ins;
7594 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7597 ins->flags |= MONO_INST_INIT;
7601 if (mono_security_core_clr_enabled ()) {
7602 /* check if this is native code, e.g. an icall or a p/invoke */
7603 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7604 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7606 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7607 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7609 /* if this ia a native call then it can only be JITted from platform code */
7610 if ((icall || pinvk) && method->klass && method->klass->image) {
7611 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7612 MonoException *ex = icall ? mono_get_exception_security () :
7613 mono_get_exception_method_access ();
7614 emit_throw_exception (cfg, ex);
7621 CHECK_CFG_EXCEPTION;
7623 if (header->code_size == 0)
7626 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7631 if (cfg->method == method)
7632 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7634 for (n = 0; n < header->num_locals; ++n) {
7635 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7640 /* We force the vtable variable here for all shared methods
7641 for the possibility that they might show up in a stack
7642 trace where their exact instantiation is needed. */
7643 if (cfg->gshared && method == cfg->method) {
7644 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7645 mini_method_get_context (method)->method_inst ||
7646 method->klass->valuetype) {
7647 mono_get_vtable_var (cfg);
7649 /* FIXME: Is there a better way to do this?
7650 We need the variable live for the duration
7651 of the whole method. */
7652 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7656 /* add a check for this != NULL to inlined methods */
7657 if (is_virtual_call) {
7660 NEW_ARGLOAD (cfg, arg_ins, 0);
7661 MONO_ADD_INS (cfg->cbb, arg_ins);
7662 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7665 skip_dead_blocks = !dont_verify;
7666 if (skip_dead_blocks) {
7667 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7672 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7673 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7676 start_new_bblock = 0;
7678 if (cfg->method == method)
7679 cfg->real_offset = ip - header->code;
7681 cfg->real_offset = inline_offset;
7686 if (start_new_bblock) {
7687 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7688 if (start_new_bblock == 2) {
7689 g_assert (ip == tblock->cil_code);
7691 GET_BBLOCK (cfg, tblock, ip);
7693 cfg->cbb->next_bb = tblock;
7695 start_new_bblock = 0;
7696 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7697 if (cfg->verbose_level > 3)
7698 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7699 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7703 g_slist_free (class_inits);
7706 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7707 link_bblock (cfg, cfg->cbb, tblock);
7708 if (sp != stack_start) {
7709 handle_stack_args (cfg, stack_start, sp - stack_start);
7711 CHECK_UNVERIFIABLE (cfg);
7713 cfg->cbb->next_bb = tblock;
7715 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7716 if (cfg->verbose_level > 3)
7717 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7718 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7721 g_slist_free (class_inits);
7726 if (skip_dead_blocks) {
7727 int ip_offset = ip - header->code;
7729 if (ip_offset == bb->end)
7733 int op_size = mono_opcode_size (ip, end);
7734 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7736 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7738 if (ip_offset + op_size == bb->end) {
7739 MONO_INST_NEW (cfg, ins, OP_NOP);
7740 MONO_ADD_INS (cfg->cbb, ins);
7741 start_new_bblock = 1;
7749 * Sequence points are points where the debugger can place a breakpoint.
7750 * Currently, we generate these automatically at points where the IL
7753 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7755 * Make methods interruptable at the beginning, and at the targets of
7756 * backward branches.
7757 * Also, do this at the start of every bblock in methods with clauses too,
7758 * to be able to handle instructions with inprecise control flow like
7760 * Backward branches are handled at the end of method-to-ir ().
7762 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7763 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7765 /* Avoid sequence points on empty IL like .volatile */
7766 // FIXME: Enable this
7767 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7768 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7769 if ((sp != stack_start) && !sym_seq_point)
7770 ins->flags |= MONO_INST_NONEMPTY_STACK;
7771 MONO_ADD_INS (cfg->cbb, ins);
7774 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7777 cfg->cbb->real_offset = cfg->real_offset;
7779 if ((cfg->method == method) && cfg->coverage_info) {
7780 guint32 cil_offset = ip - header->code;
7781 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
7782 cfg->coverage_info->data [cil_offset].cil_code = ip;
7784 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
7785 MonoInst *one_ins, *load_ins;
7787 EMIT_NEW_PCONST (cfg, load_ins, counter);
7788 EMIT_NEW_ICONST (cfg, one_ins, 1);
7789 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
7790 ins->dreg = mono_alloc_ireg (cfg);
7791 ins->inst_basereg = load_ins->dreg;
7792 ins->inst_offset = 0;
7793 ins->sreg2 = one_ins->dreg;
7794 ins->type = STACK_I4;
7795 MONO_ADD_INS (cfg->cbb, ins);
7797 EMIT_NEW_PCONST (cfg, ins, counter);
7798 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7802 if (cfg->verbose_level > 3)
7803 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7807 if (seq_points && !sym_seq_points && sp != stack_start) {
7809 * The C# compiler uses these nops to notify the JIT that it should
7810 * insert seq points.
7812 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7813 MONO_ADD_INS (cfg->cbb, ins);
7815 if (cfg->keep_cil_nops)
7816 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7818 MONO_INST_NEW (cfg, ins, OP_NOP);
7820 MONO_ADD_INS (cfg->cbb, ins);
7823 if (mini_should_insert_breakpoint (cfg->method)) {
7824 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7826 MONO_INST_NEW (cfg, ins, OP_NOP);
7829 MONO_ADD_INS (cfg->cbb, ins);
7835 CHECK_STACK_OVF (1);
7836 n = (*ip)-CEE_LDARG_0;
7838 EMIT_NEW_ARGLOAD (cfg, ins, n);
7846 CHECK_STACK_OVF (1);
7847 n = (*ip)-CEE_LDLOC_0;
7849 EMIT_NEW_LOCLOAD (cfg, ins, n);
7858 n = (*ip)-CEE_STLOC_0;
7861 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7863 emit_stloc_ir (cfg, sp, header, n);
7870 CHECK_STACK_OVF (1);
7873 EMIT_NEW_ARGLOAD (cfg, ins, n);
7879 CHECK_STACK_OVF (1);
7882 NEW_ARGLOADA (cfg, ins, n);
7883 MONO_ADD_INS (cfg->cbb, ins);
7893 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7895 emit_starg_ir (cfg, sp, n);
7900 CHECK_STACK_OVF (1);
7903 if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
7904 /* Avoid loading a struct just to load one of its fields */
7905 EMIT_NEW_LOCLOADA (cfg, ins, n);
7907 EMIT_NEW_LOCLOAD (cfg, ins, n);
7912 case CEE_LDLOCA_S: {
7913 unsigned char *tmp_ip;
7915 CHECK_STACK_OVF (1);
7916 CHECK_LOCAL (ip [1]);
7918 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7924 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7933 CHECK_LOCAL (ip [1]);
7934 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7936 emit_stloc_ir (cfg, sp, header, ip [1]);
7941 CHECK_STACK_OVF (1);
7942 EMIT_NEW_PCONST (cfg, ins, NULL);
7943 ins->type = STACK_OBJ;
7948 CHECK_STACK_OVF (1);
7949 EMIT_NEW_ICONST (cfg, ins, -1);
7962 CHECK_STACK_OVF (1);
7963 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7969 CHECK_STACK_OVF (1);
7971 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7977 CHECK_STACK_OVF (1);
7978 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7984 CHECK_STACK_OVF (1);
7985 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7986 ins->type = STACK_I8;
7987 ins->dreg = alloc_dreg (cfg, STACK_I8);
7989 ins->inst_l = (gint64)read64 (ip);
7990 MONO_ADD_INS (cfg->cbb, ins);
7996 gboolean use_aotconst = FALSE;
7998 #ifdef TARGET_POWERPC
7999 /* FIXME: Clean this up */
8000 if (cfg->compile_aot)
8001 use_aotconst = TRUE;
8004 /* FIXME: we should really allocate this only late in the compilation process */
8005 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8007 CHECK_STACK_OVF (1);
8013 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8015 dreg = alloc_freg (cfg);
8016 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8017 ins->type = cfg->r4_stack_type;
8019 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8020 ins->type = cfg->r4_stack_type;
8021 ins->dreg = alloc_dreg (cfg, STACK_R8);
8023 MONO_ADD_INS (cfg->cbb, ins);
8033 gboolean use_aotconst = FALSE;
8035 #ifdef TARGET_POWERPC
8036 /* FIXME: Clean this up */
8037 if (cfg->compile_aot)
8038 use_aotconst = TRUE;
8041 /* FIXME: we should really allocate this only late in the compilation process */
8042 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8044 CHECK_STACK_OVF (1);
8050 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8052 dreg = alloc_freg (cfg);
8053 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8054 ins->type = STACK_R8;
8056 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8057 ins->type = STACK_R8;
8058 ins->dreg = alloc_dreg (cfg, STACK_R8);
8060 MONO_ADD_INS (cfg->cbb, ins);
8069 MonoInst *temp, *store;
8071 CHECK_STACK_OVF (1);
8075 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8076 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8078 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8081 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8094 if (sp [0]->type == STACK_R8)
8095 /* we need to pop the value from the x86 FP stack */
8096 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8101 MonoMethodSignature *fsig;
8104 INLINE_FAILURE ("jmp");
8105 GSHAREDVT_FAILURE (*ip);
8108 if (stack_start != sp)
8110 token = read32 (ip + 1);
8111 /* FIXME: check the signature matches */
8112 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8115 if (cfg->gshared && mono_method_check_context_used (cmethod))
8116 GENERIC_SHARING_FAILURE (CEE_JMP);
8118 mini_profiler_emit_tail_call (cfg, cmethod);
8120 fsig = mono_method_signature (cmethod);
8121 n = fsig->param_count + fsig->hasthis;
8122 if (cfg->llvm_only) {
8125 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8126 for (i = 0; i < n; ++i)
8127 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8128 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8130 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8131 * have to emit a normal return since llvm expects it.
8134 emit_setret (cfg, ins);
8135 MONO_INST_NEW (cfg, ins, OP_BR);
8136 ins->inst_target_bb = end_bblock;
8137 MONO_ADD_INS (cfg->cbb, ins);
8138 link_bblock (cfg, cfg->cbb, end_bblock);
8141 } else if (cfg->backend->have_op_tail_call) {
8142 /* Handle tail calls similarly to calls */
8145 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8146 call->method = cmethod;
8147 call->tail_call = TRUE;
8148 call->signature = mono_method_signature (cmethod);
8149 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8150 call->inst.inst_p0 = cmethod;
8151 for (i = 0; i < n; ++i)
8152 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8154 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8155 call->vret_var = cfg->vret_addr;
8157 mono_arch_emit_call (cfg, call);
8158 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8159 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8161 for (i = 0; i < num_args; ++i)
8162 /* Prevent arguments from being optimized away */
8163 arg_array [i]->flags |= MONO_INST_VOLATILE;
8165 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8166 ins = (MonoInst*)call;
8167 ins->inst_p0 = cmethod;
8168 MONO_ADD_INS (cfg->cbb, ins);
8172 start_new_bblock = 1;
8177 MonoMethodSignature *fsig;
8180 token = read32 (ip + 1);
8184 //GSHAREDVT_FAILURE (*ip);
8189 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8192 if (method->dynamic && fsig->pinvoke) {
8196 * This is a call through a function pointer using a pinvoke
8197 * signature. Have to create a wrapper and call that instead.
8198 * FIXME: This is very slow, need to create a wrapper at JIT time
8199 * instead based on the signature.
8201 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8202 EMIT_NEW_PCONST (cfg, args [1], fsig);
8204 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8207 n = fsig->param_count + fsig->hasthis;
8211 //g_assert (!virtual_ || fsig->hasthis);
8215 inline_costs += 10 * num_calls++;
8218 * Making generic calls out of gsharedvt methods.
8219 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8220 * patching gshared method addresses into a gsharedvt method.
8222 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8224 * We pass the address to the gsharedvt trampoline in the rgctx reg
8226 MonoInst *callee = addr;
8228 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8230 GSHAREDVT_FAILURE (*ip);
8234 GSHAREDVT_FAILURE (*ip);
8236 addr = emit_get_rgctx_sig (cfg, context_used,
8237 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8238 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8242 /* Prevent inlining of methods with indirect calls */
8243 INLINE_FAILURE ("indirect call");
8245 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8246 MonoJumpInfoType info_type;
8250 * Instead of emitting an indirect call, emit a direct call
8251 * with the contents of the aotconst as the patch info.
8253 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8254 info_type = (MonoJumpInfoType)addr->inst_c1;
8255 info_data = addr->inst_p0;
8257 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8258 info_data = addr->inst_right->inst_left;
8261 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8262 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8265 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8266 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8271 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8275 /* End of call, INS should contain the result of the call, if any */
8277 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8279 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8282 CHECK_CFG_EXCEPTION;
8286 constrained_class = NULL;
8290 case CEE_CALLVIRT: {
8291 MonoInst *addr = NULL;
8292 MonoMethodSignature *fsig = NULL;
8294 int virtual_ = *ip == CEE_CALLVIRT;
8295 gboolean pass_imt_from_rgctx = FALSE;
8296 MonoInst *imt_arg = NULL;
8297 MonoInst *keep_this_alive = NULL;
8298 gboolean pass_vtable = FALSE;
8299 gboolean pass_mrgctx = FALSE;
8300 MonoInst *vtable_arg = NULL;
8301 gboolean check_this = FALSE;
8302 gboolean supported_tail_call = FALSE;
8303 gboolean tail_call = FALSE;
8304 gboolean need_seq_point = FALSE;
8305 guint32 call_opcode = *ip;
8306 gboolean emit_widen = TRUE;
8307 gboolean push_res = TRUE;
8308 gboolean skip_ret = FALSE;
8309 gboolean delegate_invoke = FALSE;
8310 gboolean direct_icall = FALSE;
8311 gboolean constrained_partial_call = FALSE;
8312 MonoMethod *cil_method;
8315 token = read32 (ip + 1);
8319 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8322 cil_method = cmethod;
8324 if (constrained_class) {
8325 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8326 if (!mini_is_gsharedvt_klass (constrained_class)) {
8327 g_assert (!cmethod->klass->valuetype);
8328 if (!mini_type_is_reference (&constrained_class->byval_arg))
8329 constrained_partial_call = TRUE;
8333 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8334 if (cfg->verbose_level > 2)
8335 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8336 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8337 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8339 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8343 if (cfg->verbose_level > 2)
8344 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8346 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8348 * This is needed since get_method_constrained can't find
8349 * the method in klass representing a type var.
8350 * The type var is guaranteed to be a reference type in this
8353 if (!mini_is_gsharedvt_klass (constrained_class))
8354 g_assert (!cmethod->klass->valuetype);
8356 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8361 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8362 /* Use the corresponding method from the base type to avoid boxing */
8363 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8364 g_assert (base_type);
8365 constrained_class = mono_class_from_mono_type (base_type);
8366 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8371 if (!dont_verify && !cfg->skip_visibility) {
8372 MonoMethod *target_method = cil_method;
8373 if (method->is_inflated) {
8374 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8377 if (!mono_method_can_access_method (method_definition, target_method) &&
8378 !mono_method_can_access_method (method, cil_method))
8379 emit_method_access_failure (cfg, method, cil_method);
8382 if (mono_security_core_clr_enabled ())
8383 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8385 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8386 /* MS.NET seems to silently convert this to a callvirt */
8391 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8392 * converts to a callvirt.
8394 * tests/bug-515884.il is an example of this behavior
8396 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8397 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8398 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8402 if (!cmethod->klass->inited)
8403 if (!mono_class_init (cmethod->klass))
8404 TYPE_LOAD_ERROR (cmethod->klass);
8406 fsig = mono_method_signature (cmethod);
8409 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8410 mini_class_is_system_array (cmethod->klass)) {
8411 array_rank = cmethod->klass->rank;
8412 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8413 direct_icall = TRUE;
8414 } else if (fsig->pinvoke) {
8415 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8416 fsig = mono_method_signature (wrapper);
8417 } else if (constrained_class) {
8419 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8423 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8424 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8426 /* See code below */
8427 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8428 MonoBasicBlock *tbb;
8430 GET_BBLOCK (cfg, tbb, ip + 5);
8431 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8433 * We want to extend the try block to cover the call, but we can't do it if the
8434 * call is made directly since its followed by an exception check.
8436 direct_icall = FALSE;
8440 mono_save_token_info (cfg, image, token, cil_method);
8442 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8443 need_seq_point = TRUE;
8445 /* Don't support calls made using type arguments for now */
8447 if (cfg->gsharedvt) {
8448 if (mini_is_gsharedvt_signature (fsig))
8449 GSHAREDVT_FAILURE (*ip);
8453 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8454 g_assert_not_reached ();
8456 n = fsig->param_count + fsig->hasthis;
8458 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8462 g_assert (!mono_method_check_context_used (cmethod));
8466 //g_assert (!virtual_ || fsig->hasthis);
8470 if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8471 cfg->cbb->out_of_line = TRUE;
8474 * We have the `constrained.' prefix opcode.
8476 if (constrained_class) {
8477 if (mini_is_gsharedvt_klass (constrained_class)) {
8478 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8479 /* The 'Own method' case below */
8480 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8481 /* 'The type parameter is instantiated as a reference type' case below. */
8483 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8484 CHECK_CFG_EXCEPTION;
8490 if (constrained_partial_call) {
8491 gboolean need_box = TRUE;
8494 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8495 * called method is not known at compile time either. The called method could end up being
8496 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8497 * to box the receiver.
8498 * A simple solution would be to box always and make a normal virtual call, but that would
8499 * be bad performance wise.
8501 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8503 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8508 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8509 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8510 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8511 ins->klass = constrained_class;
8512 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8513 CHECK_CFG_EXCEPTION;
8514 } else if (need_box) {
8516 MonoBasicBlock *is_ref_bb, *end_bb;
8517 MonoInst *nonbox_call;
8520 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8522 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8523 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8525 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8527 NEW_BBLOCK (cfg, is_ref_bb);
8528 NEW_BBLOCK (cfg, end_bb);
8530 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8535 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8540 MONO_START_BB (cfg, is_ref_bb);
8541 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8542 ins->klass = constrained_class;
8543 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8544 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8548 MONO_START_BB (cfg, end_bb);
8551 nonbox_call->dreg = ins->dreg;
8554 g_assert (mono_class_is_interface (cmethod->klass));
8555 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8556 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8559 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8561 * The type parameter is instantiated as a valuetype,
8562 * but that type doesn't override the method we're
8563 * calling, so we need to box `this'.
8565 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8566 ins->klass = constrained_class;
8567 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8568 CHECK_CFG_EXCEPTION;
8569 } else if (!constrained_class->valuetype) {
8570 int dreg = alloc_ireg_ref (cfg);
8573 * The type parameter is instantiated as a reference
8574 * type. We have a managed pointer on the stack, so
8575 * we need to dereference it here.
8577 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8578 ins->type = STACK_OBJ;
8581 if (cmethod->klass->valuetype) {
8584 /* Interface method */
8587 mono_class_setup_vtable (constrained_class);
8588 CHECK_TYPELOAD (constrained_class);
8589 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8591 TYPE_LOAD_ERROR (constrained_class);
8592 slot = mono_method_get_vtable_slot (cmethod);
8594 TYPE_LOAD_ERROR (cmethod->klass);
8595 cmethod = constrained_class->vtable [ioffset + slot];
8597 if (cmethod->klass == mono_defaults.enum_class) {
8598 /* Enum implements some interfaces, so treat this as the first case */
8599 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8600 ins->klass = constrained_class;
8601 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8602 CHECK_CFG_EXCEPTION;
8607 constrained_class = NULL;
8610 if (check_call_signature (cfg, fsig, sp))
8613 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8614 delegate_invoke = TRUE;
8616 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8617 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8618 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8626 * If the callee is a shared method, then its static cctor
8627 * might not get called after the call was patched.
8629 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8630 emit_class_init (cfg, cmethod->klass);
8631 CHECK_TYPELOAD (cmethod->klass);
8634 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8637 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8639 context_used = mini_method_check_context_used (cfg, cmethod);
8641 if (context_used && mono_class_is_interface (cmethod->klass)) {
8642 /* Generic method interface
8643 calls are resolved via a
8644 helper function and don't
8646 if (!cmethod_context || !cmethod_context->method_inst)
8647 pass_imt_from_rgctx = TRUE;
8651 * If a shared method calls another
8652 * shared method then the caller must
8653 * have a generic sharing context
8654 * because the magic trampoline
8655 * requires it. FIXME: We shouldn't
8656 * have to force the vtable/mrgctx
8657 * variable here. Instead there
8658 * should be a flag in the cfg to
8659 * request a generic sharing context.
8662 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8663 mono_get_vtable_var (cfg);
8668 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8670 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8672 CHECK_TYPELOAD (cmethod->klass);
8673 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8678 g_assert (!vtable_arg);
8680 if (!cfg->compile_aot) {
8682 * emit_get_rgctx_method () calls mono_class_vtable () so check
8683 * for type load errors before.
8685 mono_class_setup_vtable (cmethod->klass);
8686 CHECK_TYPELOAD (cmethod->klass);
8689 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8691 /* !marshalbyref is needed to properly handle generic methods + remoting */
8692 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8693 MONO_METHOD_IS_FINAL (cmethod)) &&
8694 !mono_class_is_marshalbyref (cmethod->klass)) {
8701 if (pass_imt_from_rgctx) {
8702 g_assert (!pass_vtable);
8704 imt_arg = emit_get_rgctx_method (cfg, context_used,
8705 cmethod, MONO_RGCTX_INFO_METHOD);
8709 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8711 /* Calling virtual generic methods */
8712 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8713 !(MONO_METHOD_IS_FINAL (cmethod) &&
8714 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8715 fsig->generic_param_count &&
8716 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8718 MonoInst *this_temp, *this_arg_temp, *store;
8719 MonoInst *iargs [4];
8721 g_assert (fsig->is_inflated);
8723 /* Prevent inlining of methods that contain indirect calls */
8724 INLINE_FAILURE ("virtual generic call");
8726 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8727 GSHAREDVT_FAILURE (*ip);
8729 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8730 g_assert (!imt_arg);
8732 g_assert (cmethod->is_inflated);
8733 imt_arg = emit_get_rgctx_method (cfg, context_used,
8734 cmethod, MONO_RGCTX_INFO_METHOD);
8735 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8737 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8738 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8739 MONO_ADD_INS (cfg->cbb, store);
8741 /* FIXME: This should be a managed pointer */
8742 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8744 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8745 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8746 cmethod, MONO_RGCTX_INFO_METHOD);
8747 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8748 addr = mono_emit_jit_icall (cfg,
8749 mono_helper_compile_generic_method, iargs);
8751 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8753 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8760 * Implement a workaround for the inherent races involved in locking:
8766 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8767 * try block, the Exit () won't be executed, see:
8768 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8769 * To work around this, we extend such try blocks to include the last x bytes
8770 * of the Monitor.Enter () call.
8772 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8773 MonoBasicBlock *tbb;
8775 GET_BBLOCK (cfg, tbb, ip + 5);
8777 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8778 * from Monitor.Enter like ArgumentNullException.
8780 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8781 /* Mark this bblock as needing to be extended */
8782 tbb->extend_try_block = TRUE;
8786 /* Conversion to a JIT intrinsic */
8787 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8788 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8789 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8797 if ((cfg->opt & MONO_OPT_INLINE) &&
8798 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8799 mono_method_check_inlining (cfg, cmethod)) {
8801 gboolean always = FALSE;
8803 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8804 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8805 /* Prevent inlining of methods that call wrappers */
8806 INLINE_FAILURE ("wrapper call");
8807 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8811 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8813 cfg->real_offset += 5;
8815 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8816 /* *sp is already set by inline_method */
8821 inline_costs += costs;
8827 /* Tail recursion elimination */
8828 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8829 gboolean has_vtargs = FALSE;
8832 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8833 INLINE_FAILURE ("tail call");
8835 /* keep it simple */
8836 for (i = fsig->param_count - 1; i >= 0; i--) {
8837 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8842 if (need_seq_point) {
8843 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8844 need_seq_point = FALSE;
8846 for (i = 0; i < n; ++i)
8847 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8848 MONO_INST_NEW (cfg, ins, OP_BR);
8849 MONO_ADD_INS (cfg->cbb, ins);
8850 tblock = start_bblock->out_bb [0];
8851 link_bblock (cfg, cfg->cbb, tblock);
8852 ins->inst_target_bb = tblock;
8853 start_new_bblock = 1;
8855 /* skip the CEE_RET, too */
8856 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8863 inline_costs += 10 * num_calls++;
8866 * Synchronized wrappers.
8867 * Its hard to determine where to replace a method with its synchronized
8868 * wrapper without causing an infinite recursion. The current solution is
8869 * to add the synchronized wrapper in the trampolines, and to
8870 * change the called method to a dummy wrapper, and resolve that wrapper
8871 * to the real method in mono_jit_compile_method ().
8873 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8874 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8875 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8876 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8880 * Making generic calls out of gsharedvt methods.
8881 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8882 * patching gshared method addresses into a gsharedvt method.
8884 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
8885 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
8886 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
8887 MonoRgctxInfoType info_type;
8890 //if (mono_class_is_interface (cmethod->klass))
8891 //GSHAREDVT_FAILURE (*ip);
8892 // disable for possible remoting calls
8893 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8894 GSHAREDVT_FAILURE (*ip);
8895 if (fsig->generic_param_count) {
8896 /* virtual generic call */
8897 g_assert (!imt_arg);
8898 /* Same as the virtual generic case above */
8899 imt_arg = emit_get_rgctx_method (cfg, context_used,
8900 cmethod, MONO_RGCTX_INFO_METHOD);
8901 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8903 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
8904 /* This can happen when we call a fully instantiated iface method */
8905 imt_arg = emit_get_rgctx_method (cfg, context_used,
8906 cmethod, MONO_RGCTX_INFO_METHOD);
8911 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8912 keep_this_alive = sp [0];
8914 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8915 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8917 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8918 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8920 if (cfg->llvm_only) {
8921 // FIXME: Avoid initializing vtable_arg
8922 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8924 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8929 /* Generic sharing */
8932 * Use this if the callee is gsharedvt sharable too, since
8933 * at runtime we might find an instantiation so the call cannot
8934 * be patched (the 'no_patch' code path in mini-trampolines.c).
8936 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8937 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8938 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8939 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
8940 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8941 INLINE_FAILURE ("gshared");
8943 g_assert (cfg->gshared && cmethod);
8947 * We are compiling a call to a
8948 * generic method from shared code,
8949 * which means that we have to look up
8950 * the method in the rgctx and do an
8954 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8956 if (cfg->llvm_only) {
8957 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
8958 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
8960 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8961 // FIXME: Avoid initializing imt_arg/vtable_arg
8962 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8964 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8965 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8970 /* Direct calls to icalls */
8972 MonoMethod *wrapper;
8975 /* Inline the wrapper */
8976 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8978 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
8979 g_assert (costs > 0);
8980 cfg->real_offset += 5;
8982 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8983 /* *sp is already set by inline_method */
8988 inline_costs += costs;
8997 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8998 MonoInst *val = sp [fsig->param_count];
9000 if (val->type == STACK_OBJ) {
9001 MonoInst *iargs [2];
9006 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9009 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9010 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9011 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9012 mini_emit_write_barrier (cfg, addr, val);
9013 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9014 GSHAREDVT_FAILURE (*ip);
9015 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9016 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9018 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9019 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9020 if (!cmethod->klass->element_class->valuetype && !readonly)
9021 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9022 CHECK_TYPELOAD (cmethod->klass);
9025 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9028 g_assert_not_reached ();
9035 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9039 /* Tail prefix / tail call optimization */
9041 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9042 /* FIXME: runtime generic context pointer for jumps? */
9043 /* FIXME: handle this for generic sharing eventually */
9044 if ((ins_flag & MONO_INST_TAILCALL) &&
9045 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9046 supported_tail_call = TRUE;
9048 if (supported_tail_call) {
9051 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9052 INLINE_FAILURE ("tail call");
9054 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9056 if (cfg->backend->have_op_tail_call) {
9057 /* Handle tail calls similarly to normal calls */
9060 mini_profiler_emit_tail_call (cfg, cmethod);
9062 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9063 call->tail_call = TRUE;
9064 call->method = cmethod;
9065 call->signature = mono_method_signature (cmethod);
9068 * We implement tail calls by storing the actual arguments into the
9069 * argument variables, then emitting a CEE_JMP.
9071 for (i = 0; i < n; ++i) {
9072 /* Prevent argument from being register allocated */
9073 arg_array [i]->flags |= MONO_INST_VOLATILE;
9074 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9076 ins = (MonoInst*)call;
9077 ins->inst_p0 = cmethod;
9078 ins->inst_p1 = arg_array [0];
9079 MONO_ADD_INS (cfg->cbb, ins);
9080 link_bblock (cfg, cfg->cbb, end_bblock);
9081 start_new_bblock = 1;
9083 // FIXME: Eliminate unreachable epilogs
9086 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9087 * only reachable from this call.
9089 GET_BBLOCK (cfg, tblock, ip + 5);
9090 if (tblock == cfg->cbb || tblock->in_count == 0)
9099 * Virtual calls in llvm-only mode.
9101 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9102 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9107 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9108 INLINE_FAILURE ("call");
9109 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9110 imt_arg, vtable_arg);
9112 if (tail_call && !cfg->llvm_only) {
9113 link_bblock (cfg, cfg->cbb, end_bblock);
9114 start_new_bblock = 1;
9116 // FIXME: Eliminate unreachable epilogs
9119 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9120 * only reachable from this call.
9122 GET_BBLOCK (cfg, tblock, ip + 5);
9123 if (tblock == cfg->cbb || tblock->in_count == 0)
9130 /* End of call, INS should contain the result of the call, if any */
9132 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9135 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9140 if (keep_this_alive) {
9141 MonoInst *dummy_use;
9143 /* See mono_emit_method_call_full () */
9144 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9147 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9149 * Clang can convert these calls to tail calls which screw up the stack
9150 * walk. This happens even when the -fno-optimize-sibling-calls
9151 * option is passed to clang.
9152 * Work around this by emitting a dummy call.
9154 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9157 CHECK_CFG_EXCEPTION;
9161 g_assert (*ip == CEE_RET);
9165 constrained_class = NULL;
9167 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9171 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
9173 if (cfg->method != method) {
9174 /* return from inlined method */
9176 * If in_count == 0, that means the ret is unreachable due to
9177 * being preceeded by a throw. In that case, inline_method () will
9178 * handle setting the return value
9179 * (test case: test_0_inline_throw ()).
9181 if (return_var && cfg->cbb->in_count) {
9182 MonoType *ret_type = mono_method_signature (method)->ret;
9188 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9191 //g_assert (returnvar != -1);
9192 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9193 cfg->ret_var_set = TRUE;
9196 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9200 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9202 if (seq_points && !sym_seq_points) {
9204 * Place a seq point here too even through the IL stack is not
9205 * empty, so a step over on
9208 * will work correctly.
9210 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9211 MONO_ADD_INS (cfg->cbb, ins);
9214 g_assert (!return_var);
9218 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9221 emit_setret (cfg, *sp);
9224 if (sp != stack_start)
9226 MONO_INST_NEW (cfg, ins, OP_BR);
9228 ins->inst_target_bb = end_bblock;
9229 MONO_ADD_INS (cfg->cbb, ins);
9230 link_bblock (cfg, cfg->cbb, end_bblock);
9231 start_new_bblock = 1;
9235 MONO_INST_NEW (cfg, ins, OP_BR);
9237 target = ip + 1 + (signed char)(*ip);
9239 GET_BBLOCK (cfg, tblock, target);
9240 link_bblock (cfg, cfg->cbb, tblock);
9241 ins->inst_target_bb = tblock;
9242 if (sp != stack_start) {
9243 handle_stack_args (cfg, stack_start, sp - stack_start);
9245 CHECK_UNVERIFIABLE (cfg);
9247 MONO_ADD_INS (cfg->cbb, ins);
9248 start_new_bblock = 1;
9249 inline_costs += BRANCH_COST;
9263 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9265 target = ip + 1 + *(signed char*)ip;
9271 inline_costs += BRANCH_COST;
9275 MONO_INST_NEW (cfg, ins, OP_BR);
9278 target = ip + 4 + (gint32)read32(ip);
9280 GET_BBLOCK (cfg, tblock, target);
9281 link_bblock (cfg, cfg->cbb, tblock);
9282 ins->inst_target_bb = tblock;
9283 if (sp != stack_start) {
9284 handle_stack_args (cfg, stack_start, sp - stack_start);
9286 CHECK_UNVERIFIABLE (cfg);
9289 MONO_ADD_INS (cfg->cbb, ins);
9291 start_new_bblock = 1;
9292 inline_costs += BRANCH_COST;
9299 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9300 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9301 guint32 opsize = is_short ? 1 : 4;
9303 CHECK_OPSIZE (opsize);
9305 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9308 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9313 GET_BBLOCK (cfg, tblock, target);
9314 link_bblock (cfg, cfg->cbb, tblock);
9315 GET_BBLOCK (cfg, tblock, ip);
9316 link_bblock (cfg, cfg->cbb, tblock);
9318 if (sp != stack_start) {
9319 handle_stack_args (cfg, stack_start, sp - stack_start);
9320 CHECK_UNVERIFIABLE (cfg);
9323 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9324 cmp->sreg1 = sp [0]->dreg;
9325 type_from_op (cfg, cmp, sp [0], NULL);
9328 #if SIZEOF_REGISTER == 4
9329 if (cmp->opcode == OP_LCOMPARE_IMM) {
9330 /* Convert it to OP_LCOMPARE */
9331 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9332 ins->type = STACK_I8;
9333 ins->dreg = alloc_dreg (cfg, STACK_I8);
9335 MONO_ADD_INS (cfg->cbb, ins);
9336 cmp->opcode = OP_LCOMPARE;
9337 cmp->sreg2 = ins->dreg;
9340 MONO_ADD_INS (cfg->cbb, cmp);
9342 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9343 type_from_op (cfg, ins, sp [0], NULL);
9344 MONO_ADD_INS (cfg->cbb, ins);
9345 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9346 GET_BBLOCK (cfg, tblock, target);
9347 ins->inst_true_bb = tblock;
9348 GET_BBLOCK (cfg, tblock, ip);
9349 ins->inst_false_bb = tblock;
9350 start_new_bblock = 2;
9353 inline_costs += BRANCH_COST;
9368 MONO_INST_NEW (cfg, ins, *ip);
9370 target = ip + 4 + (gint32)read32(ip);
9376 inline_costs += BRANCH_COST;
9380 MonoBasicBlock **targets;
9381 MonoBasicBlock *default_bblock;
9382 MonoJumpInfoBBTable *table;
9383 int offset_reg = alloc_preg (cfg);
9384 int target_reg = alloc_preg (cfg);
9385 int table_reg = alloc_preg (cfg);
9386 int sum_reg = alloc_preg (cfg);
9387 gboolean use_op_switch;
9391 n = read32 (ip + 1);
9394 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9398 CHECK_OPSIZE (n * sizeof (guint32));
9399 target = ip + n * sizeof (guint32);
9401 GET_BBLOCK (cfg, default_bblock, target);
9402 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9404 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9405 for (i = 0; i < n; ++i) {
9406 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9407 targets [i] = tblock;
9408 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9412 if (sp != stack_start) {
9414 * Link the current bb with the targets as well, so handle_stack_args
9415 * will set their in_stack correctly.
9417 link_bblock (cfg, cfg->cbb, default_bblock);
9418 for (i = 0; i < n; ++i)
9419 link_bblock (cfg, cfg->cbb, targets [i]);
9421 handle_stack_args (cfg, stack_start, sp - stack_start);
9423 CHECK_UNVERIFIABLE (cfg);
9425 /* Undo the links */
9426 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9427 for (i = 0; i < n; ++i)
9428 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9431 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9432 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9434 for (i = 0; i < n; ++i)
9435 link_bblock (cfg, cfg->cbb, targets [i]);
9437 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9438 table->table = targets;
9439 table->table_size = n;
9441 use_op_switch = FALSE;
9443 /* ARM implements SWITCH statements differently */
9444 /* FIXME: Make it use the generic implementation */
9445 if (!cfg->compile_aot)
9446 use_op_switch = TRUE;
9449 if (COMPILE_LLVM (cfg))
9450 use_op_switch = TRUE;
9452 cfg->cbb->has_jump_table = 1;
9454 if (use_op_switch) {
9455 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9456 ins->sreg1 = src1->dreg;
9457 ins->inst_p0 = table;
9458 ins->inst_many_bb = targets;
9459 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9460 MONO_ADD_INS (cfg->cbb, ins);
9462 if (sizeof (gpointer) == 8)
9463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9467 #if SIZEOF_REGISTER == 8
9468 /* The upper word might not be zero, and we add it to a 64 bit address later */
9469 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9472 if (cfg->compile_aot) {
9473 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9475 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9476 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9477 ins->inst_p0 = table;
9478 ins->dreg = table_reg;
9479 MONO_ADD_INS (cfg->cbb, ins);
9482 /* FIXME: Use load_memindex */
9483 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9485 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9487 start_new_bblock = 1;
9488 inline_costs += (BRANCH_COST * 2);
9505 ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9521 if (ins_flag & MONO_INST_VOLATILE) {
9522 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9523 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9526 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9527 ins->flags |= ins_flag;
9530 MONO_ADD_INS (cfg->cbb, ins);
9532 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9533 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9542 MONO_INST_NEW (cfg, ins, (*ip));
9544 ins->sreg1 = sp [0]->dreg;
9545 ins->sreg2 = sp [1]->dreg;
9546 type_from_op (cfg, ins, sp [0], sp [1]);
9548 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9550 /* Use the immediate opcodes if possible */
9551 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9552 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9553 if (imm_opcode != -1) {
9554 ins->opcode = imm_opcode;
9555 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9558 NULLIFY_INS (sp [1]);
9562 MONO_ADD_INS ((cfg)->cbb, (ins));
9564 *sp++ = mono_decompose_opcode (cfg, ins);
9581 MONO_INST_NEW (cfg, ins, (*ip));
9583 ins->sreg1 = sp [0]->dreg;
9584 ins->sreg2 = sp [1]->dreg;
9585 type_from_op (cfg, ins, sp [0], sp [1]);
9587 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9588 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9590 /* FIXME: Pass opcode to is_inst_imm */
9592 /* Use the immediate opcodes if possible */
9593 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9594 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9595 if (imm_opcode != -1) {
9596 ins->opcode = imm_opcode;
9597 if (sp [1]->opcode == OP_I8CONST) {
9598 #if SIZEOF_REGISTER == 8
9599 ins->inst_imm = sp [1]->inst_l;
9601 ins->inst_ls_word = sp [1]->inst_ls_word;
9602 ins->inst_ms_word = sp [1]->inst_ms_word;
9606 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9609 /* Might be followed by an instruction added by add_widen_op */
9610 if (sp [1]->next == NULL)
9611 NULLIFY_INS (sp [1]);
9614 MONO_ADD_INS ((cfg)->cbb, (ins));
9616 *sp++ = mono_decompose_opcode (cfg, ins);
9629 case CEE_CONV_OVF_I8:
9630 case CEE_CONV_OVF_U8:
9634 /* Special case this earlier so we have long constants in the IR */
9635 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9636 int data = sp [-1]->inst_c0;
9637 sp [-1]->opcode = OP_I8CONST;
9638 sp [-1]->type = STACK_I8;
9639 #if SIZEOF_REGISTER == 8
9640 if ((*ip) == CEE_CONV_U8)
9641 sp [-1]->inst_c0 = (guint32)data;
9643 sp [-1]->inst_c0 = data;
9645 sp [-1]->inst_ls_word = data;
9646 if ((*ip) == CEE_CONV_U8)
9647 sp [-1]->inst_ms_word = 0;
9649 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9651 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9658 case CEE_CONV_OVF_I4:
9659 case CEE_CONV_OVF_I1:
9660 case CEE_CONV_OVF_I2:
9661 case CEE_CONV_OVF_I:
9662 case CEE_CONV_OVF_U:
9665 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9666 ADD_UNOP (CEE_CONV_OVF_I8);
9673 case CEE_CONV_OVF_U1:
9674 case CEE_CONV_OVF_U2:
9675 case CEE_CONV_OVF_U4:
9678 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9679 ADD_UNOP (CEE_CONV_OVF_U8);
9686 case CEE_CONV_OVF_I1_UN:
9687 case CEE_CONV_OVF_I2_UN:
9688 case CEE_CONV_OVF_I4_UN:
9689 case CEE_CONV_OVF_I8_UN:
9690 case CEE_CONV_OVF_U1_UN:
9691 case CEE_CONV_OVF_U2_UN:
9692 case CEE_CONV_OVF_U4_UN:
9693 case CEE_CONV_OVF_U8_UN:
9694 case CEE_CONV_OVF_I_UN:
9695 case CEE_CONV_OVF_U_UN:
9702 CHECK_CFG_EXCEPTION;
9706 case CEE_ADD_OVF_UN:
9708 case CEE_MUL_OVF_UN:
9710 case CEE_SUB_OVF_UN:
9716 GSHAREDVT_FAILURE (*ip);
9719 token = read32 (ip + 1);
9720 klass = mini_get_class (method, token, generic_context);
9721 CHECK_TYPELOAD (klass);
9723 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9734 token = read32 (ip + 1);
9735 klass = mini_get_class (method, token, generic_context);
9736 CHECK_TYPELOAD (klass);
9738 /* Optimize the common ldobj+stloc combination */
9748 loc_index = ip [5] - CEE_STLOC_0;
9755 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9756 CHECK_LOCAL (loc_index);
9758 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9759 ins->dreg = cfg->locals [loc_index]->dreg;
9760 ins->flags |= ins_flag;
9763 if (ins_flag & MONO_INST_VOLATILE) {
9764 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9765 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9771 /* Optimize the ldobj+stobj combination */
9772 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
9777 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9784 ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9793 CHECK_STACK_OVF (1);
9795 n = read32 (ip + 1);
9797 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9798 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9799 ins->type = STACK_OBJ;
9802 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9803 MonoInst *iargs [1];
9804 char *str = (char *)mono_method_get_wrapper_data (method, n);
9806 if (cfg->compile_aot)
9807 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9809 EMIT_NEW_PCONST (cfg, iargs [0], str);
9810 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9812 if (cfg->opt & MONO_OPT_SHARED) {
9813 MonoInst *iargs [3];
9815 if (cfg->compile_aot) {
9816 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9818 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9819 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9820 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9821 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9822 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9825 if (cfg->cbb->out_of_line) {
9826 MonoInst *iargs [2];
9828 if (image == mono_defaults.corlib) {
9830 * Avoid relocations in AOT and save some space by using a
9831 * version of helper_ldstr specialized to mscorlib.
9833 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9834 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9836 /* Avoid creating the string object */
9837 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9838 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9839 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9843 if (cfg->compile_aot) {
9844 NEW_LDSTRCONST (cfg, ins, image, n);
9846 MONO_ADD_INS (cfg->cbb, ins);
9849 NEW_PCONST (cfg, ins, NULL);
9850 ins->type = STACK_OBJ;
9851 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9855 OUT_OF_MEMORY_FAILURE;
9858 MONO_ADD_INS (cfg->cbb, ins);
9867 MonoInst *iargs [2];
9868 MonoMethodSignature *fsig;
9871 MonoInst *vtable_arg = NULL;
9874 token = read32 (ip + 1);
9875 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9878 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9881 mono_save_token_info (cfg, image, token, cmethod);
9883 if (!mono_class_init (cmethod->klass))
9884 TYPE_LOAD_ERROR (cmethod->klass);
9886 context_used = mini_method_check_context_used (cfg, cmethod);
9888 if (!dont_verify && !cfg->skip_visibility) {
9889 MonoMethod *cil_method = cmethod;
9890 MonoMethod *target_method = cil_method;
9892 if (method->is_inflated) {
9893 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9897 if (!mono_method_can_access_method (method_definition, target_method) &&
9898 !mono_method_can_access_method (method, cil_method))
9899 emit_method_access_failure (cfg, method, cil_method);
9902 if (mono_security_core_clr_enabled ())
9903 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
9905 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9906 emit_class_init (cfg, cmethod->klass);
9907 CHECK_TYPELOAD (cmethod->klass);
9911 if (cfg->gsharedvt) {
9912 if (mini_is_gsharedvt_variable_signature (sig))
9913 GSHAREDVT_FAILURE (*ip);
9917 n = fsig->param_count;
9921 * Generate smaller code for the common newobj <exception> instruction in
9922 * argument checking code.
9924 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9925 is_exception_class (cmethod->klass) && n <= 2 &&
9926 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9927 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9928 MonoInst *iargs [3];
9932 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9935 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9939 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9944 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9947 g_assert_not_reached ();
9955 /* move the args to allow room for 'this' in the first position */
9961 /* check_call_signature () requires sp[0] to be set */
9962 this_ins.type = STACK_OBJ;
9964 if (check_call_signature (cfg, fsig, sp))
9969 if (mini_class_is_system_array (cmethod->klass)) {
9970 *sp = emit_get_rgctx_method (cfg, context_used,
9971 cmethod, MONO_RGCTX_INFO_METHOD);
9973 /* Avoid varargs in the common case */
9974 if (fsig->param_count == 1)
9975 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9976 else if (fsig->param_count == 2)
9977 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9978 else if (fsig->param_count == 3)
9979 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9980 else if (fsig->param_count == 4)
9981 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9983 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9984 } else if (cmethod->string_ctor) {
9985 g_assert (!context_used);
9986 g_assert (!vtable_arg);
9987 /* we simply pass a null pointer */
9988 EMIT_NEW_PCONST (cfg, *sp, NULL);
9989 /* now call the string ctor */
9990 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9992 if (cmethod->klass->valuetype) {
9993 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9994 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9995 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10000 * The code generated by mini_emit_virtual_call () expects
10001 * iargs [0] to be a boxed instance, but luckily the vcall
10002 * will be transformed into a normal call there.
10004 } else if (context_used) {
10005 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10008 MonoVTable *vtable = NULL;
10010 if (!cfg->compile_aot)
10011 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10012 CHECK_TYPELOAD (cmethod->klass);
10015 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10016 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10017 * As a workaround, we call class cctors before allocating objects.
10019 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10020 emit_class_init (cfg, cmethod->klass);
10021 if (cfg->verbose_level > 2)
10022 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10023 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10026 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10029 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10032 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10034 /* Now call the actual ctor */
10035 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10036 CHECK_CFG_EXCEPTION;
10039 if (alloc == NULL) {
10041 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10042 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10050 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10051 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10054 case CEE_CASTCLASS:
10059 token = read32 (ip + 1);
10060 klass = mini_get_class (method, token, generic_context);
10061 CHECK_TYPELOAD (klass);
10062 if (sp [0]->type != STACK_OBJ)
10065 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10066 ins->dreg = alloc_preg (cfg);
10067 ins->sreg1 = (*sp)->dreg;
10068 ins->klass = klass;
10069 ins->type = STACK_OBJ;
10070 MONO_ADD_INS (cfg->cbb, ins);
10072 CHECK_CFG_EXCEPTION;
10076 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10079 case CEE_UNBOX_ANY: {
10080 MonoInst *res, *addr;
10085 token = read32 (ip + 1);
10086 klass = mini_get_class (method, token, generic_context);
10087 CHECK_TYPELOAD (klass);
10089 mono_save_token_info (cfg, image, token, klass);
10091 context_used = mini_class_check_context_used (cfg, klass);
10093 if (mini_is_gsharedvt_klass (klass)) {
10094 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10096 } else if (generic_class_is_reference_type (cfg, klass)) {
10097 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10098 EMIT_NEW_PCONST (cfg, res, NULL);
10099 res->type = STACK_OBJ;
10101 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10102 res->dreg = alloc_preg (cfg);
10103 res->sreg1 = (*sp)->dreg;
10104 res->klass = klass;
10105 res->type = STACK_OBJ;
10106 MONO_ADD_INS (cfg->cbb, res);
10107 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10109 } else if (mono_class_is_nullable (klass)) {
10110 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10112 addr = handle_unbox (cfg, klass, sp, context_used);
10114 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10125 MonoClass *enum_class;
10126 MonoMethod *has_flag;
10132 token = read32 (ip + 1);
10133 klass = mini_get_class (method, token, generic_context);
10134 CHECK_TYPELOAD (klass);
10136 mono_save_token_info (cfg, image, token, klass);
10138 context_used = mini_class_check_context_used (cfg, klass);
10140 if (generic_class_is_reference_type (cfg, klass)) {
10146 if (klass == mono_defaults.void_class)
10148 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10150 /* frequent check in generic code: box (struct), brtrue */
10155 * <push int/long ptr>
10158 * constrained. MyFlags
10159 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10161 * If we find this sequence and the operand types on box and constrained
10162 * are equal, we can emit a specialized instruction sequence instead of
10163 * the very slow HasFlag () call.
10165 if ((cfg->opt & MONO_OPT_INTRINS) &&
10166 /* Cheap checks first. */
10167 ip + 5 + 6 + 5 < end &&
10168 ip [5] == CEE_PREFIX1 &&
10169 ip [6] == CEE_CONSTRAINED_ &&
10170 ip [11] == CEE_CALLVIRT &&
10171 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10172 mono_class_is_enum (klass) &&
10173 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10174 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10175 has_flag->klass == mono_defaults.enum_class &&
10176 !strcmp (has_flag->name, "HasFlag") &&
10177 has_flag->signature->hasthis &&
10178 has_flag->signature->param_count == 1) {
10179 CHECK_TYPELOAD (enum_class);
10181 if (enum_class == klass) {
10182 MonoInst *enum_this, *enum_flag;
10187 enum_this = sp [0];
10188 enum_flag = sp [1];
10190 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10195 // FIXME: LLVM can't handle the inconsistent bb linking
10196 if (!mono_class_is_nullable (klass) &&
10197 !mini_is_gsharedvt_klass (klass) &&
10198 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10199 (ip [5] == CEE_BRTRUE ||
10200 ip [5] == CEE_BRTRUE_S ||
10201 ip [5] == CEE_BRFALSE ||
10202 ip [5] == CEE_BRFALSE_S)) {
10203 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10205 MonoBasicBlock *true_bb, *false_bb;
10209 if (cfg->verbose_level > 3) {
10210 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10211 printf ("<box+brtrue opt>\n");
10216 case CEE_BRFALSE_S:
10219 target = ip + 1 + (signed char)(*ip);
10226 target = ip + 4 + (gint)(read32 (ip));
10230 g_assert_not_reached ();
10234 * We need to link both bblocks, since it is needed for handling stack
10235 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10236 * Branching to only one of them would lead to inconsistencies, so
10237 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10239 GET_BBLOCK (cfg, true_bb, target);
10240 GET_BBLOCK (cfg, false_bb, ip);
10242 mono_link_bblock (cfg, cfg->cbb, true_bb);
10243 mono_link_bblock (cfg, cfg->cbb, false_bb);
10245 if (sp != stack_start) {
10246 handle_stack_args (cfg, stack_start, sp - stack_start);
10248 CHECK_UNVERIFIABLE (cfg);
10251 if (COMPILE_LLVM (cfg)) {
10252 dreg = alloc_ireg (cfg);
10253 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10256 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10258 /* The JIT can't eliminate the iconst+compare */
10259 MONO_INST_NEW (cfg, ins, OP_BR);
10260 ins->inst_target_bb = is_true ? true_bb : false_bb;
10261 MONO_ADD_INS (cfg->cbb, ins);
10264 start_new_bblock = 1;
10268 *sp++ = handle_box (cfg, val, klass, context_used);
10270 CHECK_CFG_EXCEPTION;
10279 token = read32 (ip + 1);
10280 klass = mini_get_class (method, token, generic_context);
10281 CHECK_TYPELOAD (klass);
10283 mono_save_token_info (cfg, image, token, klass);
10285 context_used = mini_class_check_context_used (cfg, klass);
10287 if (mono_class_is_nullable (klass)) {
10290 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10291 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10295 ins = handle_unbox (cfg, klass, sp, context_used);
10308 MonoClassField *field;
10309 #ifndef DISABLE_REMOTING
10313 gboolean is_instance;
10315 gpointer addr = NULL;
10316 gboolean is_special_static;
10318 MonoInst *store_val = NULL;
10319 MonoInst *thread_ins;
10322 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10324 if (op == CEE_STFLD) {
10327 store_val = sp [1];
10332 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10334 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10337 if (op == CEE_STSFLD) {
10340 store_val = sp [0];
10345 token = read32 (ip + 1);
10346 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10347 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10348 klass = field->parent;
10351 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10354 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10355 FIELD_ACCESS_FAILURE (method, field);
10356 mono_class_init (klass);
10358 /* if the class is Critical then transparent code cannot access it's fields */
10359 if (!is_instance && mono_security_core_clr_enabled ())
10360 ensure_method_is_allowed_to_access_field (cfg, method, field);
10362 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10363 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10364 if (mono_security_core_clr_enabled ())
10365 ensure_method_is_allowed_to_access_field (cfg, method, field);
10368 ftype = mono_field_get_type (field);
10371 * LDFLD etc. is usable on static fields as well, so convert those cases to
10374 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10386 g_assert_not_reached ();
10388 is_instance = FALSE;
10391 context_used = mini_class_check_context_used (cfg, klass);
10393 /* INSTANCE CASE */
10395 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10396 if (op == CEE_STFLD) {
10397 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10399 #ifndef DISABLE_REMOTING
10400 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10401 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10402 MonoInst *iargs [5];
10404 GSHAREDVT_FAILURE (op);
10406 iargs [0] = sp [0];
10407 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10408 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10409 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10411 iargs [4] = sp [1];
10413 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10414 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10415 iargs, ip, cfg->real_offset, TRUE);
10416 CHECK_CFG_EXCEPTION;
10417 g_assert (costs > 0);
10419 cfg->real_offset += 5;
10421 inline_costs += costs;
10423 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10428 MonoInst *store, *wbarrier_ptr_ins = NULL;
10430 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10432 if (ins_flag & MONO_INST_VOLATILE) {
10433 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10434 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10437 if (mini_is_gsharedvt_klass (klass)) {
10438 MonoInst *offset_ins;
10440 context_used = mini_class_check_context_used (cfg, klass);
10442 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10443 /* The value is offset by 1 */
10444 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10445 dreg = alloc_ireg_mp (cfg);
10446 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10447 wbarrier_ptr_ins = ins;
10448 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
10449 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10451 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10453 if (sp [0]->opcode != OP_LDADDR)
10454 store->flags |= MONO_INST_FAULT;
10456 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10457 if (mini_is_gsharedvt_klass (klass)) {
10458 g_assert (wbarrier_ptr_ins);
10459 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10461 /* insert call to write barrier */
10465 dreg = alloc_ireg_mp (cfg);
10466 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10467 mini_emit_write_barrier (cfg, ptr, sp [1]);
10471 store->flags |= ins_flag;
10478 #ifndef DISABLE_REMOTING
10479 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10480 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10481 MonoInst *iargs [4];
10483 GSHAREDVT_FAILURE (op);
10485 iargs [0] = sp [0];
10486 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10487 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10488 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10489 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10490 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10491 iargs, ip, cfg->real_offset, TRUE);
10492 CHECK_CFG_EXCEPTION;
10493 g_assert (costs > 0);
10495 cfg->real_offset += 5;
10499 inline_costs += costs;
10501 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10507 if (sp [0]->type == STACK_VTYPE) {
10510 /* Have to compute the address of the variable */
10512 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10514 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10516 g_assert (var->klass == klass);
10518 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10522 if (op == CEE_LDFLDA) {
10523 if (sp [0]->type == STACK_OBJ) {
10524 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10525 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10528 dreg = alloc_ireg_mp (cfg);
10530 if (mini_is_gsharedvt_klass (klass)) {
10531 MonoInst *offset_ins;
10533 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10534 /* The value is offset by 1 */
10535 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10536 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10538 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10540 ins->klass = mono_class_from_mono_type (field->type);
10541 ins->type = STACK_MP;
10546 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10548 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10549 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10558 MonoInst *field_add_inst = sp [0];
10559 if (mini_is_gsharedvt_klass (klass)) {
10560 MonoInst *offset_ins;
10562 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10563 /* The value is offset by 1 */
10564 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10565 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10569 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10571 if (sp [0]->opcode != OP_LDADDR)
10572 load->flags |= MONO_INST_FAULT;
10584 context_used = mini_class_check_context_used (cfg, klass);
10586 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10587 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10591 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10592 * to be called here.
10594 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10595 mono_class_vtable (cfg->domain, klass);
10596 CHECK_TYPELOAD (klass);
10598 mono_domain_lock (cfg->domain);
10599 if (cfg->domain->special_static_fields)
10600 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10601 mono_domain_unlock (cfg->domain);
10603 is_special_static = mono_class_field_is_special_static (field);
10605 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10606 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10610 /* Generate IR to compute the field address */
10611 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10613 * Fast access to TLS data
10614 * Inline version of get_thread_static_data () in
10618 int idx, static_data_reg, array_reg, dreg;
10620 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10621 GSHAREDVT_FAILURE (op);
10623 static_data_reg = alloc_ireg (cfg);
10624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10626 if (cfg->compile_aot) {
10627 int offset_reg, offset2_reg, idx_reg;
10629 /* For TLS variables, this will return the TLS offset */
10630 EMIT_NEW_SFLDACONST (cfg, ins, field);
10631 offset_reg = ins->dreg;
10632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10633 idx_reg = alloc_ireg (cfg);
10634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10636 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10637 array_reg = alloc_ireg (cfg);
10638 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10639 offset2_reg = alloc_ireg (cfg);
10640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10642 dreg = alloc_ireg (cfg);
10643 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10645 offset = (gsize)addr & 0x7fffffff;
10646 idx = offset & 0x3f;
10648 array_reg = alloc_ireg (cfg);
10649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10650 dreg = alloc_ireg (cfg);
10651 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10653 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10654 (cfg->compile_aot && is_special_static) ||
10655 (context_used && is_special_static)) {
10656 MonoInst *iargs [2];
10658 g_assert (field->parent);
10659 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10660 if (context_used) {
10661 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10662 field, MONO_RGCTX_INFO_CLASS_FIELD);
10664 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10666 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10667 } else if (context_used) {
10668 MonoInst *static_data;
10671 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10672 method->klass->name_space, method->klass->name, method->name,
10673 depth, field->offset);
10676 if (mono_class_needs_cctor_run (klass, method))
10677 emit_class_init (cfg, klass);
10680 * The pointer we're computing here is
10682 * super_info.static_data + field->offset
10684 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10685 klass, MONO_RGCTX_INFO_STATIC_DATA);
10687 if (mini_is_gsharedvt_klass (klass)) {
10688 MonoInst *offset_ins;
10690 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10691 /* The value is offset by 1 */
10692 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10693 dreg = alloc_ireg_mp (cfg);
10694 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10695 } else if (field->offset == 0) {
10698 int addr_reg = mono_alloc_preg (cfg);
10699 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10701 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10702 MonoInst *iargs [2];
10704 g_assert (field->parent);
10705 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10706 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10707 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10709 MonoVTable *vtable = NULL;
10711 if (!cfg->compile_aot)
10712 vtable = mono_class_vtable (cfg->domain, klass);
10713 CHECK_TYPELOAD (klass);
10716 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10717 if (!(g_slist_find (class_inits, klass))) {
10718 emit_class_init (cfg, klass);
10719 if (cfg->verbose_level > 2)
10720 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10721 class_inits = g_slist_prepend (class_inits, klass);
10724 if (cfg->run_cctors) {
10725 /* This makes so that inline cannot trigger */
10726 /* .cctors: too many apps depend on them */
10727 /* running with a specific order... */
10729 if (! vtable->initialized)
10730 INLINE_FAILURE ("class init");
10731 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10732 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10733 goto exception_exit;
10737 if (cfg->compile_aot)
10738 EMIT_NEW_SFLDACONST (cfg, ins, field);
10741 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10743 EMIT_NEW_PCONST (cfg, ins, addr);
10746 MonoInst *iargs [1];
10747 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10748 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10752 /* Generate IR to do the actual load/store operation */
10754 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10755 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10756 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10759 if (op == CEE_LDSFLDA) {
10760 ins->klass = mono_class_from_mono_type (ftype);
10761 ins->type = STACK_PTR;
10763 } else if (op == CEE_STSFLD) {
10766 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10767 store->flags |= ins_flag;
10769 gboolean is_const = FALSE;
10770 MonoVTable *vtable = NULL;
10771 gpointer addr = NULL;
10773 if (!context_used) {
10774 vtable = mono_class_vtable (cfg->domain, klass);
10775 CHECK_TYPELOAD (klass);
10777 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10778 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10779 int ro_type = ftype->type;
10781 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10782 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10783 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10786 GSHAREDVT_FAILURE (op);
10788 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10791 case MONO_TYPE_BOOLEAN:
10793 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10797 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10800 case MONO_TYPE_CHAR:
10802 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10806 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10811 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10815 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10820 case MONO_TYPE_PTR:
10821 case MONO_TYPE_FNPTR:
10822 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10823 type_to_eval_stack_type ((cfg), field->type, *sp);
10826 case MONO_TYPE_STRING:
10827 case MONO_TYPE_OBJECT:
10828 case MONO_TYPE_CLASS:
10829 case MONO_TYPE_SZARRAY:
10830 case MONO_TYPE_ARRAY:
10831 if (!mono_gc_is_moving ()) {
10832 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10833 type_to_eval_stack_type ((cfg), field->type, *sp);
10841 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10846 case MONO_TYPE_VALUETYPE:
10856 CHECK_STACK_OVF (1);
10858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10859 load->flags |= ins_flag;
10865 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10866 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10867 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10878 token = read32 (ip + 1);
10879 klass = mini_get_class (method, token, generic_context);
10880 CHECK_TYPELOAD (klass);
10882 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10883 mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
10894 const char *data_ptr;
10896 guint32 field_token;
10902 token = read32 (ip + 1);
10904 klass = mini_get_class (method, token, generic_context);
10905 CHECK_TYPELOAD (klass);
10906 if (klass->byval_arg.type == MONO_TYPE_VOID)
10909 context_used = mini_class_check_context_used (cfg, klass);
10911 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10912 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10913 ins->sreg1 = sp [0]->dreg;
10914 ins->type = STACK_I4;
10915 ins->dreg = alloc_ireg (cfg);
10916 MONO_ADD_INS (cfg->cbb, ins);
10917 *sp = mono_decompose_opcode (cfg, ins);
10920 if (context_used) {
10921 MonoInst *args [3];
10922 MonoClass *array_class = mono_array_class_get (klass, 1);
10923 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10925 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10928 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
10929 array_class, MONO_RGCTX_INFO_VTABLE);
10934 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10936 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
10938 if (cfg->opt & MONO_OPT_SHARED) {
10939 /* Decompose now to avoid problems with references to the domainvar */
10940 MonoInst *iargs [3];
10942 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10943 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10944 iargs [2] = sp [0];
10946 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
10948 /* Decompose later since it is needed by abcrem */
10949 MonoClass *array_type = mono_array_class_get (klass, 1);
10950 mono_class_vtable (cfg->domain, array_type);
10951 CHECK_TYPELOAD (array_type);
10953 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10954 ins->dreg = alloc_ireg_ref (cfg);
10955 ins->sreg1 = sp [0]->dreg;
10956 ins->inst_newa_class = klass;
10957 ins->type = STACK_OBJ;
10958 ins->klass = array_type;
10959 MONO_ADD_INS (cfg->cbb, ins);
10960 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10961 cfg->cbb->has_array_access = TRUE;
10963 /* Needed so mono_emit_load_get_addr () gets called */
10964 mono_get_got_var (cfg);
10974 * we inline/optimize the initialization sequence if possible.
10975 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10976 * for small sizes open code the memcpy
10977 * ensure the rva field is big enough
10979 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10980 MonoMethod *memcpy_method = mini_get_memcpy_method ();
10981 MonoInst *iargs [3];
10982 int add_reg = alloc_ireg_mp (cfg);
10984 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10985 if (cfg->compile_aot) {
10986 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10988 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10990 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10991 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11000 if (sp [0]->type != STACK_OBJ)
11003 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11004 ins->dreg = alloc_preg (cfg);
11005 ins->sreg1 = sp [0]->dreg;
11006 ins->type = STACK_I4;
11007 /* This flag will be inherited by the decomposition */
11008 ins->flags |= MONO_INST_FAULT;
11009 MONO_ADD_INS (cfg->cbb, ins);
11010 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11011 cfg->cbb->has_array_access = TRUE;
11019 if (sp [0]->type != STACK_OBJ)
11022 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11024 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11025 CHECK_TYPELOAD (klass);
11026 /* we need to make sure that this array is exactly the type it needs
11027 * to be for correctness. the wrappers are lax with their usage
11028 * so we need to ignore them here
11030 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11031 MonoClass *array_class = mono_array_class_get (klass, 1);
11032 mini_emit_check_array_type (cfg, sp [0], array_class);
11033 CHECK_TYPELOAD (array_class);
11037 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11042 case CEE_LDELEM_I1:
11043 case CEE_LDELEM_U1:
11044 case CEE_LDELEM_I2:
11045 case CEE_LDELEM_U2:
11046 case CEE_LDELEM_I4:
11047 case CEE_LDELEM_U4:
11048 case CEE_LDELEM_I8:
11050 case CEE_LDELEM_R4:
11051 case CEE_LDELEM_R8:
11052 case CEE_LDELEM_REF: {
11058 if (*ip == CEE_LDELEM) {
11060 token = read32 (ip + 1);
11061 klass = mini_get_class (method, token, generic_context);
11062 CHECK_TYPELOAD (klass);
11063 mono_class_init (klass);
11066 klass = array_access_to_klass (*ip);
11068 if (sp [0]->type != STACK_OBJ)
11071 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11073 if (mini_is_gsharedvt_variable_klass (klass)) {
11074 // FIXME-VT: OP_ICONST optimization
11075 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11076 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11077 ins->opcode = OP_LOADV_MEMBASE;
11078 } else if (sp [1]->opcode == OP_ICONST) {
11079 int array_reg = sp [0]->dreg;
11080 int index_reg = sp [1]->dreg;
11081 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11083 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11084 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11086 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11087 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11089 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11090 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11093 if (*ip == CEE_LDELEM)
11100 case CEE_STELEM_I1:
11101 case CEE_STELEM_I2:
11102 case CEE_STELEM_I4:
11103 case CEE_STELEM_I8:
11104 case CEE_STELEM_R4:
11105 case CEE_STELEM_R8:
11106 case CEE_STELEM_REF:
11111 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11113 if (*ip == CEE_STELEM) {
11115 token = read32 (ip + 1);
11116 klass = mini_get_class (method, token, generic_context);
11117 CHECK_TYPELOAD (klass);
11118 mono_class_init (klass);
11121 klass = array_access_to_klass (*ip);
11123 if (sp [0]->type != STACK_OBJ)
11126 emit_array_store (cfg, klass, sp, TRUE);
11128 if (*ip == CEE_STELEM)
11135 case CEE_CKFINITE: {
11139 if (cfg->llvm_only) {
11140 MonoInst *iargs [1];
11142 iargs [0] = sp [0];
11143 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11145 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11146 ins->sreg1 = sp [0]->dreg;
11147 ins->dreg = alloc_freg (cfg);
11148 ins->type = STACK_R8;
11149 MONO_ADD_INS (cfg->cbb, ins);
11151 *sp++ = mono_decompose_opcode (cfg, ins);
11157 case CEE_REFANYVAL: {
11158 MonoInst *src_var, *src;
11160 int klass_reg = alloc_preg (cfg);
11161 int dreg = alloc_preg (cfg);
11163 GSHAREDVT_FAILURE (*ip);
11166 MONO_INST_NEW (cfg, ins, *ip);
11169 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11170 CHECK_TYPELOAD (klass);
11172 context_used = mini_class_check_context_used (cfg, klass);
11175 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11177 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11178 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11179 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11181 if (context_used) {
11182 MonoInst *klass_ins;
11184 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11185 klass, MONO_RGCTX_INFO_KLASS);
11188 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11189 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11191 mini_emit_class_check (cfg, klass_reg, klass);
11193 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11194 ins->type = STACK_MP;
11195 ins->klass = klass;
11200 case CEE_MKREFANY: {
11201 MonoInst *loc, *addr;
11203 GSHAREDVT_FAILURE (*ip);
11206 MONO_INST_NEW (cfg, ins, *ip);
11209 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11210 CHECK_TYPELOAD (klass);
11212 context_used = mini_class_check_context_used (cfg, klass);
11214 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11215 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11217 if (context_used) {
11218 MonoInst *const_ins;
11219 int type_reg = alloc_preg (cfg);
11221 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11224 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11226 int const_reg = alloc_preg (cfg);
11227 int type_reg = alloc_preg (cfg);
11229 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11232 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11234 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11236 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11237 ins->type = STACK_VTYPE;
11238 ins->klass = mono_defaults.typed_reference_class;
11243 case CEE_LDTOKEN: {
11245 MonoClass *handle_class;
11247 CHECK_STACK_OVF (1);
11250 n = read32 (ip + 1);
11252 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11253 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11254 handle = mono_method_get_wrapper_data (method, n);
11255 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11256 if (handle_class == mono_defaults.typehandle_class)
11257 handle = &((MonoClass*)handle)->byval_arg;
11260 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11265 mono_class_init (handle_class);
11266 if (cfg->gshared) {
11267 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11268 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11269 /* This case handles ldtoken
11270 of an open type, like for
11273 } else if (handle_class == mono_defaults.typehandle_class) {
11274 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11275 } else if (handle_class == mono_defaults.fieldhandle_class)
11276 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11277 else if (handle_class == mono_defaults.methodhandle_class)
11278 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11280 g_assert_not_reached ();
11283 if ((cfg->opt & MONO_OPT_SHARED) &&
11284 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11285 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11286 MonoInst *addr, *vtvar, *iargs [3];
11287 int method_context_used;
11289 method_context_used = mini_method_check_context_used (cfg, method);
11291 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11293 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11294 EMIT_NEW_ICONST (cfg, iargs [1], n);
11295 if (method_context_used) {
11296 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11297 method, MONO_RGCTX_INFO_METHOD);
11298 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11300 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11301 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11303 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11307 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11309 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11310 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11311 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11312 (cmethod->klass == mono_defaults.systemtype_class) &&
11313 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11314 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11316 mono_class_init (tclass);
11317 if (context_used) {
11318 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11319 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11320 } else if (cfg->compile_aot) {
11321 if (method->wrapper_type) {
11322 error_init (&error); //got to do it since there are multiple conditionals below
11323 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11324 /* Special case for static synchronized wrappers */
11325 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11327 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11328 /* FIXME: n is not a normal token */
11330 EMIT_NEW_PCONST (cfg, ins, NULL);
11333 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11336 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11338 EMIT_NEW_PCONST (cfg, ins, rt);
11340 ins->type = STACK_OBJ;
11341 ins->klass = cmethod->klass;
11344 MonoInst *addr, *vtvar;
11346 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11348 if (context_used) {
11349 if (handle_class == mono_defaults.typehandle_class) {
11350 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11351 mono_class_from_mono_type ((MonoType *)handle),
11352 MONO_RGCTX_INFO_TYPE);
11353 } else if (handle_class == mono_defaults.methodhandle_class) {
11354 ins = emit_get_rgctx_method (cfg, context_used,
11355 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11356 } else if (handle_class == mono_defaults.fieldhandle_class) {
11357 ins = emit_get_rgctx_field (cfg, context_used,
11358 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11360 g_assert_not_reached ();
11362 } else if (cfg->compile_aot) {
11363 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11365 EMIT_NEW_PCONST (cfg, ins, handle);
11367 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11369 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11379 if (sp [-1]->type != STACK_OBJ)
11382 MONO_INST_NEW (cfg, ins, OP_THROW);
11384 ins->sreg1 = sp [0]->dreg;
11386 cfg->cbb->out_of_line = TRUE;
11387 MONO_ADD_INS (cfg->cbb, ins);
11388 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11389 MONO_ADD_INS (cfg->cbb, ins);
11392 link_bblock (cfg, cfg->cbb, end_bblock);
11393 start_new_bblock = 1;
11394 /* This can complicate code generation for llvm since the return value might not be defined */
11395 if (COMPILE_LLVM (cfg))
11396 INLINE_FAILURE ("throw");
11398 case CEE_ENDFINALLY:
11399 if (!ip_in_finally_clause (cfg, ip - header->code))
11401 /* mono_save_seq_point_info () depends on this */
11402 if (sp != stack_start)
11403 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11404 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11405 MONO_ADD_INS (cfg->cbb, ins);
11407 start_new_bblock = 1;
11410 * Control will leave the method so empty the stack, otherwise
11411 * the next basic block will start with a nonempty stack.
11413 while (sp != stack_start) {
11418 case CEE_LEAVE_S: {
11421 if (*ip == CEE_LEAVE) {
11423 target = ip + 5 + (gint32)read32(ip + 1);
11426 target = ip + 2 + (signed char)(ip [1]);
11429 /* empty the stack */
11430 while (sp != stack_start) {
11435 * If this leave statement is in a catch block, check for a
11436 * pending exception, and rethrow it if necessary.
11437 * We avoid doing this in runtime invoke wrappers, since those are called
11438 * by native code which excepts the wrapper to catch all exceptions.
11440 for (i = 0; i < header->num_clauses; ++i) {
11441 MonoExceptionClause *clause = &header->clauses [i];
11444 * Use <= in the final comparison to handle clauses with multiple
11445 * leave statements, like in bug #78024.
11446 * The ordering of the exception clauses guarantees that we find the
11447 * innermost clause.
11449 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11451 MonoBasicBlock *dont_throw;
11456 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11459 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11461 NEW_BBLOCK (cfg, dont_throw);
11464 * Currently, we always rethrow the abort exception, despite the
11465 * fact that this is not correct. See thread6.cs for an example.
11466 * But propagating the abort exception is more important than
11467 * getting the sematics right.
11469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11471 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11473 MONO_START_BB (cfg, dont_throw);
11478 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11481 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11484 for (tmp = handlers; tmp; tmp = tmp->next) {
11485 MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
11486 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
11487 MonoBasicBlock *dont_throw;
11489 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11491 link_bblock (cfg, cfg->cbb, tblock);
11493 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
11495 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11496 ins->inst_target_bb = tblock;
11497 ins->inst_eh_block = clause;
11498 MONO_ADD_INS (cfg->cbb, ins);
11499 cfg->cbb->has_call_handler = 1;
11501 /* Throw exception if exvar is set */
11502 /* FIXME Do we need this for calls from catch/filter ? */
11503 NEW_BBLOCK (cfg, dont_throw);
11504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
11505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11506 mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
11507 cfg->cbb->clause_hole = clause;
11509 MONO_START_BB (cfg, dont_throw);
11510 cfg->cbb->clause_hole = clause;
11512 if (COMPILE_LLVM (cfg)) {
11513 MonoBasicBlock *target_bb;
11516 * Link the finally bblock with the target, since it will
11517 * conceptually branch there.
11519 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11520 GET_BBLOCK (cfg, target_bb, target);
11521 link_bblock (cfg, tblock, target_bb);
11524 g_list_free (handlers);
11527 MONO_INST_NEW (cfg, ins, OP_BR);
11528 MONO_ADD_INS (cfg->cbb, ins);
11529 GET_BBLOCK (cfg, tblock, target);
11530 link_bblock (cfg, cfg->cbb, tblock);
11531 ins->inst_target_bb = tblock;
11533 start_new_bblock = 1;
11535 if (*ip == CEE_LEAVE)
11544 * Mono specific opcodes
11546 case MONO_CUSTOM_PREFIX: {
11548 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11552 case CEE_MONO_ICALL: {
11554 MonoJitICallInfo *info;
11556 token = read32 (ip + 2);
11557 func = mono_method_get_wrapper_data (method, token);
11558 info = mono_find_jit_icall_by_addr (func);
11560 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11563 CHECK_STACK (info->sig->param_count);
11564 sp -= info->sig->param_count;
11566 if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
11570 * This is called on unattached threads, so it cannot go through the trampoline
11571 * infrastructure. Use an indirect call through a got slot initialized at load time
11574 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
11575 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
11577 ins = mono_emit_jit_icall (cfg, info->func, sp);
11580 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11584 inline_costs += 10 * num_calls++;
11588 case CEE_MONO_LDPTR_CARD_TABLE:
11589 case CEE_MONO_LDPTR_NURSERY_START:
11590 case CEE_MONO_LDPTR_NURSERY_BITS:
11591 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11592 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
11593 CHECK_STACK_OVF (1);
11596 case CEE_MONO_LDPTR_CARD_TABLE:
11597 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11599 case CEE_MONO_LDPTR_NURSERY_START:
11600 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11602 case CEE_MONO_LDPTR_NURSERY_BITS:
11603 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11605 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11606 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11608 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
11609 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
11612 g_assert_not_reached ();
11618 inline_costs += 10 * num_calls++;
11621 case CEE_MONO_LDPTR: {
11624 CHECK_STACK_OVF (1);
11626 token = read32 (ip + 2);
11628 ptr = mono_method_get_wrapper_data (method, token);
11629 EMIT_NEW_PCONST (cfg, ins, ptr);
11632 inline_costs += 10 * num_calls++;
11633 /* Can't embed random pointers into AOT code */
11637 case CEE_MONO_JIT_ICALL_ADDR: {
11638 MonoJitICallInfo *callinfo;
11641 CHECK_STACK_OVF (1);
11643 token = read32 (ip + 2);
11645 ptr = mono_method_get_wrapper_data (method, token);
11646 callinfo = mono_find_jit_icall_by_addr (ptr);
11647 g_assert (callinfo);
11648 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11651 inline_costs += 10 * num_calls++;
11654 case CEE_MONO_ICALL_ADDR: {
11655 MonoMethod *cmethod;
11658 CHECK_STACK_OVF (1);
11660 token = read32 (ip + 2);
11662 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11664 if (cfg->compile_aot) {
11665 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11667 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11668 * before the call, its not needed when using direct pinvoke.
11669 * This is not an optimization, but its used to avoid looking up pinvokes
11670 * on platforms which don't support dlopen ().
11672 EMIT_NEW_PCONST (cfg, ins, NULL);
11674 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11677 ptr = mono_lookup_internal_call (cmethod);
11679 EMIT_NEW_PCONST (cfg, ins, ptr);
11685 case CEE_MONO_VTADDR: {
11686 MonoInst *src_var, *src;
11692 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11693 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11698 case CEE_MONO_NEWOBJ: {
11699 MonoInst *iargs [2];
11701 CHECK_STACK_OVF (1);
11703 token = read32 (ip + 2);
11704 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11705 mono_class_init (klass);
11706 NEW_DOMAINCONST (cfg, iargs [0]);
11707 MONO_ADD_INS (cfg->cbb, iargs [0]);
11708 NEW_CLASSCONST (cfg, iargs [1], klass);
11709 MONO_ADD_INS (cfg->cbb, iargs [1]);
11710 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11712 inline_costs += 10 * num_calls++;
11715 case CEE_MONO_OBJADDR:
11718 MONO_INST_NEW (cfg, ins, OP_MOVE);
11719 ins->dreg = alloc_ireg_mp (cfg);
11720 ins->sreg1 = sp [0]->dreg;
11721 ins->type = STACK_MP;
11722 MONO_ADD_INS (cfg->cbb, ins);
11726 case CEE_MONO_LDNATIVEOBJ:
11728 * Similar to LDOBJ, but instead load the unmanaged
11729 * representation of the vtype to the stack.
11734 token = read32 (ip + 2);
11735 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11736 g_assert (klass->valuetype);
11737 mono_class_init (klass);
11740 MonoInst *src, *dest, *temp;
11743 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11744 temp->backend.is_pinvoke = 1;
11745 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11746 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
11748 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11749 dest->type = STACK_VTYPE;
11750 dest->klass = klass;
11756 case CEE_MONO_RETOBJ: {
11758 * Same as RET, but return the native representation of a vtype
11761 g_assert (cfg->ret);
11762 g_assert (mono_method_signature (method)->pinvoke);
11767 token = read32 (ip + 2);
11768 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11770 if (!cfg->vret_addr) {
11771 g_assert (cfg->ret_var_is_local);
11773 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11775 EMIT_NEW_RETLOADA (cfg, ins);
11777 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
11779 if (sp != stack_start)
11782 MONO_INST_NEW (cfg, ins, OP_BR);
11783 ins->inst_target_bb = end_bblock;
11784 MONO_ADD_INS (cfg->cbb, ins);
11785 link_bblock (cfg, cfg->cbb, end_bblock);
11786 start_new_bblock = 1;
11790 case CEE_MONO_SAVE_LMF:
11791 case CEE_MONO_RESTORE_LMF:
11794 case CEE_MONO_CLASSCONST:
11795 CHECK_STACK_OVF (1);
11797 token = read32 (ip + 2);
11798 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11801 inline_costs += 10 * num_calls++;
11803 case CEE_MONO_NOT_TAKEN:
11804 cfg->cbb->out_of_line = TRUE;
11807 case CEE_MONO_TLS: {
11810 CHECK_STACK_OVF (1);
11812 key = (MonoTlsKey)read32 (ip + 2);
11813 g_assert (key < TLS_KEY_NUM);
11815 ins = mono_create_tls_get (cfg, key);
11817 ins->type = STACK_PTR;
11822 case CEE_MONO_DYN_CALL: {
11823 MonoCallInst *call;
11825 /* It would be easier to call a trampoline, but that would put an
11826 * extra frame on the stack, confusing exception handling. So
11827 * implement it inline using an opcode for now.
11830 if (!cfg->dyn_call_var) {
11831 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11832 /* prevent it from being register allocated */
11833 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11836 /* Has to use a call inst since it local regalloc expects it */
11837 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11838 ins = (MonoInst*)call;
11840 ins->sreg1 = sp [0]->dreg;
11841 ins->sreg2 = sp [1]->dreg;
11842 MONO_ADD_INS (cfg->cbb, ins);
11844 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11847 inline_costs += 10 * num_calls++;
11851 case CEE_MONO_MEMORY_BARRIER: {
11853 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11857 case CEE_MONO_ATOMIC_STORE_I4: {
11858 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11864 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11865 ins->dreg = sp [0]->dreg;
11866 ins->sreg1 = sp [1]->dreg;
11867 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11868 MONO_ADD_INS (cfg->cbb, ins);
11873 case CEE_MONO_JIT_ATTACH: {
11874 MonoInst *args [16], *domain_ins;
11875 MonoInst *ad_ins, *jit_tls_ins;
11876 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11878 g_assert (!mono_threads_is_blocking_transition_enabled ());
11880 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11882 EMIT_NEW_PCONST (cfg, ins, NULL);
11883 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11885 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11886 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11888 if (ad_ins && jit_tls_ins) {
11889 NEW_BBLOCK (cfg, next_bb);
11890 NEW_BBLOCK (cfg, call_bb);
11892 if (cfg->compile_aot) {
11893 /* AOT code is only used in the root domain */
11894 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11896 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11898 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11899 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11905 MONO_START_BB (cfg, call_bb);
11908 /* AOT code is only used in the root domain */
11909 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
11910 if (cfg->compile_aot) {
11914 * This is called on unattached threads, so it cannot go through the trampoline
11915 * infrastructure. Use an indirect call through a got slot initialized at load time
11918 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
11919 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
11921 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11923 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11926 MONO_START_BB (cfg, next_bb);
11931 case CEE_MONO_JIT_DETACH: {
11932 MonoInst *args [16];
11934 /* Restore the original domain */
11935 dreg = alloc_ireg (cfg);
11936 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11937 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11941 case CEE_MONO_CALLI_EXTRA_ARG: {
11943 MonoMethodSignature *fsig;
11947 * This is the same as CEE_CALLI, but passes an additional argument
11948 * to the called method in llvmonly mode.
11949 * This is only used by delegate invoke wrappers to call the
11950 * actual delegate method.
11952 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
11955 token = read32 (ip + 2);
11963 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
11966 if (cfg->llvm_only)
11967 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
11969 n = fsig->param_count + fsig->hasthis + 1;
11976 if (cfg->llvm_only) {
11978 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
11979 * cconv. This is set by mono_init_delegate ().
11981 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
11982 MonoInst *callee = addr;
11983 MonoInst *call, *localloc_ins;
11984 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11985 int low_bit_reg = alloc_preg (cfg);
11987 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11988 NEW_BBLOCK (cfg, end_bb);
11990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11992 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11994 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
11995 addr = emit_get_rgctx_sig (cfg, context_used,
11996 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11998 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12000 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12001 ins->dreg = alloc_preg (cfg);
12002 ins->inst_imm = 2 * SIZEOF_VOID_P;
12003 MONO_ADD_INS (cfg->cbb, ins);
12004 localloc_ins = ins;
12005 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12006 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12007 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12009 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12012 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12013 MONO_START_BB (cfg, is_gsharedvt_bb);
12014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12015 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12016 ins->dreg = call->dreg;
12018 MONO_START_BB (cfg, end_bb);
12020 /* Caller uses a normal calling conv */
12022 MonoInst *callee = addr;
12023 MonoInst *call, *localloc_ins;
12024 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12025 int low_bit_reg = alloc_preg (cfg);
12027 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12028 NEW_BBLOCK (cfg, end_bb);
12030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12032 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12034 /* Normal case: callee uses a normal cconv, no conversion is needed */
12035 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12037 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12038 MONO_START_BB (cfg, is_gsharedvt_bb);
12039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12040 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12041 MONO_ADD_INS (cfg->cbb, addr);
12043 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12045 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12046 ins->dreg = alloc_preg (cfg);
12047 ins->inst_imm = 2 * SIZEOF_VOID_P;
12048 MONO_ADD_INS (cfg->cbb, ins);
12049 localloc_ins = ins;
12050 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12054 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12055 ins->dreg = call->dreg;
12056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12058 MONO_START_BB (cfg, end_bb);
12061 /* Same as CEE_CALLI */
12062 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12064 * We pass the address to the gsharedvt trampoline in the rgctx reg
12066 MonoInst *callee = addr;
12068 addr = emit_get_rgctx_sig (cfg, context_used,
12069 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12070 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12072 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12076 if (!MONO_TYPE_IS_VOID (fsig->ret))
12077 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12079 CHECK_CFG_EXCEPTION;
12083 constrained_class = NULL;
12086 case CEE_MONO_LDDOMAIN:
12087 CHECK_STACK_OVF (1);
12088 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12092 case CEE_MONO_GET_LAST_ERROR:
12094 CHECK_STACK_OVF (1);
12096 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12097 ins->dreg = alloc_dreg (cfg, STACK_I4);
12098 ins->type = STACK_I4;
12099 MONO_ADD_INS (cfg->cbb, ins);
12104 case CEE_MONO_GET_RGCTX_ARG:
12106 CHECK_STACK_OVF (1);
12108 mono_create_rgctx_var (cfg);
12110 MONO_INST_NEW (cfg, ins, OP_MOVE);
12111 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12112 ins->sreg1 = cfg->rgctx_var->dreg;
12113 ins->type = STACK_PTR;
12114 MONO_ADD_INS (cfg->cbb, ins);
12120 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12126 case CEE_PREFIX1: {
12129 case CEE_ARGLIST: {
12130 /* somewhat similar to LDTOKEN */
12131 MonoInst *addr, *vtvar;
12132 CHECK_STACK_OVF (1);
12133 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12135 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12136 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12138 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12139 ins->type = STACK_VTYPE;
12140 ins->klass = mono_defaults.argumenthandle_class;
12150 MonoInst *cmp, *arg1, *arg2;
12158 * The following transforms:
12159 * CEE_CEQ into OP_CEQ
12160 * CEE_CGT into OP_CGT
12161 * CEE_CGT_UN into OP_CGT_UN
12162 * CEE_CLT into OP_CLT
12163 * CEE_CLT_UN into OP_CLT_UN
12165 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12167 MONO_INST_NEW (cfg, ins, cmp->opcode);
12168 cmp->sreg1 = arg1->dreg;
12169 cmp->sreg2 = arg2->dreg;
12170 type_from_op (cfg, cmp, arg1, arg2);
12172 add_widen_op (cfg, cmp, &arg1, &arg2);
12173 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12174 cmp->opcode = OP_LCOMPARE;
12175 else if (arg1->type == STACK_R4)
12176 cmp->opcode = OP_RCOMPARE;
12177 else if (arg1->type == STACK_R8)
12178 cmp->opcode = OP_FCOMPARE;
12180 cmp->opcode = OP_ICOMPARE;
12181 MONO_ADD_INS (cfg->cbb, cmp);
12182 ins->type = STACK_I4;
12183 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12184 type_from_op (cfg, ins, arg1, arg2);
12186 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12188 * The backends expect the fceq opcodes to do the
12191 ins->sreg1 = cmp->sreg1;
12192 ins->sreg2 = cmp->sreg2;
12195 MONO_ADD_INS (cfg->cbb, ins);
12201 MonoInst *argconst;
12202 MonoMethod *cil_method;
12204 CHECK_STACK_OVF (1);
12206 n = read32 (ip + 2);
12207 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12210 mono_class_init (cmethod->klass);
12212 mono_save_token_info (cfg, image, n, cmethod);
12214 context_used = mini_method_check_context_used (cfg, cmethod);
12216 cil_method = cmethod;
12217 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12218 emit_method_access_failure (cfg, method, cil_method);
12220 if (mono_security_core_clr_enabled ())
12221 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12224 * Optimize the common case of ldftn+delegate creation
12226 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12227 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12228 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12229 MonoInst *target_ins, *handle_ins;
12230 MonoMethod *invoke;
12231 int invoke_context_used;
12233 invoke = mono_get_delegate_invoke (ctor_method->klass);
12234 if (!invoke || !mono_method_signature (invoke))
12237 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12239 target_ins = sp [-1];
12241 if (mono_security_core_clr_enabled ())
12242 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12244 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12245 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12246 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12248 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12252 /* FIXME: SGEN support */
12253 if (invoke_context_used == 0 || cfg->llvm_only) {
12255 if (cfg->verbose_level > 3)
12256 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12257 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12260 CHECK_CFG_EXCEPTION;
12270 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12271 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12275 inline_costs += 10 * num_calls++;
12278 case CEE_LDVIRTFTN: {
12279 MonoInst *args [2];
12283 n = read32 (ip + 2);
12284 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12287 mono_class_init (cmethod->klass);
12289 context_used = mini_method_check_context_used (cfg, cmethod);
12291 if (mono_security_core_clr_enabled ())
12292 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12295 * Optimize the common case of ldvirtftn+delegate creation
12297 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12298 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12299 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12300 MonoInst *target_ins, *handle_ins;
12301 MonoMethod *invoke;
12302 int invoke_context_used;
12303 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12305 invoke = mono_get_delegate_invoke (ctor_method->klass);
12306 if (!invoke || !mono_method_signature (invoke))
12309 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12311 target_ins = sp [-1];
12313 if (mono_security_core_clr_enabled ())
12314 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12316 /* FIXME: SGEN support */
12317 if (invoke_context_used == 0 || cfg->llvm_only) {
12319 if (cfg->verbose_level > 3)
12320 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12321 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12324 CHECK_CFG_EXCEPTION;
12337 args [1] = emit_get_rgctx_method (cfg, context_used,
12338 cmethod, MONO_RGCTX_INFO_METHOD);
12341 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12343 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12346 inline_costs += 10 * num_calls++;
12350 CHECK_STACK_OVF (1);
12352 n = read16 (ip + 2);
12354 EMIT_NEW_ARGLOAD (cfg, ins, n);
12359 CHECK_STACK_OVF (1);
12361 n = read16 (ip + 2);
12363 NEW_ARGLOADA (cfg, ins, n);
12364 MONO_ADD_INS (cfg->cbb, ins);
12372 n = read16 (ip + 2);
12374 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12376 emit_starg_ir (cfg, sp, n);
12380 CHECK_STACK_OVF (1);
12382 n = read16 (ip + 2);
12384 if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
12385 /* Avoid loading a struct just to load one of its fields */
12386 EMIT_NEW_LOCLOADA (cfg, ins, n);
12388 EMIT_NEW_LOCLOAD (cfg, ins, n);
12394 unsigned char *tmp_ip;
12395 CHECK_STACK_OVF (1);
12397 n = read16 (ip + 2);
12400 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12406 EMIT_NEW_LOCLOADA (cfg, ins, n);
12415 n = read16 (ip + 2);
12417 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12419 emit_stloc_ir (cfg, sp, header, n);
12423 case CEE_LOCALLOC: {
12425 MonoBasicBlock *non_zero_bb, *end_bb;
12426 int alloc_ptr = alloc_preg (cfg);
12428 if (sp != stack_start)
12430 if (cfg->method != method)
12432 * Inlining this into a loop in a parent could lead to
12433 * stack overflows which is different behavior than the
12434 * non-inlined case, thus disable inlining in this case.
12436 INLINE_FAILURE("localloc");
12438 NEW_BBLOCK (cfg, non_zero_bb);
12439 NEW_BBLOCK (cfg, end_bb);
12441 /* if size != zero */
12442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12445 //size is zero, so result is NULL
12446 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12449 MONO_START_BB (cfg, non_zero_bb);
12450 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12451 ins->dreg = alloc_ptr;
12452 ins->sreg1 = sp [0]->dreg;
12453 ins->type = STACK_PTR;
12454 MONO_ADD_INS (cfg->cbb, ins);
12456 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12458 ins->flags |= MONO_INST_INIT;
12460 MONO_START_BB (cfg, end_bb);
12461 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12462 ins->type = STACK_PTR;
12468 case CEE_ENDFILTER: {
12469 MonoExceptionClause *clause, *nearest;
12474 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12476 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12477 ins->sreg1 = (*sp)->dreg;
12478 MONO_ADD_INS (cfg->cbb, ins);
12479 start_new_bblock = 1;
12483 for (cc = 0; cc < header->num_clauses; ++cc) {
12484 clause = &header->clauses [cc];
12485 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12486 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12487 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12490 g_assert (nearest);
12491 if ((ip - header->code) != nearest->handler_offset)
12496 case CEE_UNALIGNED_:
12497 ins_flag |= MONO_INST_UNALIGNED;
12498 /* FIXME: record alignment? we can assume 1 for now */
12502 case CEE_VOLATILE_:
12503 ins_flag |= MONO_INST_VOLATILE;
12507 ins_flag |= MONO_INST_TAILCALL;
12508 cfg->flags |= MONO_CFG_HAS_TAIL;
12509 /* Can't inline tail calls at this time */
12510 inline_costs += 100000;
12517 token = read32 (ip + 2);
12518 klass = mini_get_class (method, token, generic_context);
12519 CHECK_TYPELOAD (klass);
12520 if (generic_class_is_reference_type (cfg, klass))
12521 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12523 mini_emit_initobj (cfg, *sp, NULL, klass);
12527 case CEE_CONSTRAINED_:
12529 token = read32 (ip + 2);
12530 constrained_class = mini_get_class (method, token, generic_context);
12531 CHECK_TYPELOAD (constrained_class);
12537 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12545 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12553 ins_flag |= MONO_INST_NOTYPECHECK;
12555 ins_flag |= MONO_INST_NORANGECHECK;
12556 /* we ignore the no-nullcheck for now since we
12557 * really do it explicitly only when doing callvirt->call
12561 case CEE_RETHROW: {
12563 int handler_offset = -1;
12565 for (i = 0; i < header->num_clauses; ++i) {
12566 MonoExceptionClause *clause = &header->clauses [i];
12567 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12568 handler_offset = clause->handler_offset;
12573 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12575 if (handler_offset == -1)
12578 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12579 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12580 ins->sreg1 = load->dreg;
12581 MONO_ADD_INS (cfg->cbb, ins);
12583 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12584 MONO_ADD_INS (cfg->cbb, ins);
12587 link_bblock (cfg, cfg->cbb, end_bblock);
12588 start_new_bblock = 1;
12596 CHECK_STACK_OVF (1);
12598 token = read32 (ip + 2);
12599 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12600 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12603 val = mono_type_size (type, &ialign);
12605 MonoClass *klass = mini_get_class (method, token, generic_context);
12606 CHECK_TYPELOAD (klass);
12608 val = mono_type_size (&klass->byval_arg, &ialign);
12610 if (mini_is_gsharedvt_klass (klass))
12611 GSHAREDVT_FAILURE (*ip);
12613 EMIT_NEW_ICONST (cfg, ins, val);
12618 case CEE_REFANYTYPE: {
12619 MonoInst *src_var, *src;
12621 GSHAREDVT_FAILURE (*ip);
12627 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12629 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12630 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12631 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12636 case CEE_READONLY_:
12649 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12659 g_warning ("opcode 0x%02x not handled", *ip);
12663 if (start_new_bblock != 1)
12666 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12667 if (cfg->cbb->next_bb) {
12668 /* This could already be set because of inlining, #693905 */
12669 MonoBasicBlock *bb = cfg->cbb;
12671 while (bb->next_bb)
12673 bb->next_bb = end_bblock;
12675 cfg->cbb->next_bb = end_bblock;
12678 if (cfg->method == method && cfg->domainvar) {
12680 MonoInst *get_domain;
12682 cfg->cbb = init_localsbb;
12684 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12685 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12686 MONO_ADD_INS (cfg->cbb, store);
12689 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12690 if (cfg->compile_aot)
12691 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12692 mono_get_got_var (cfg);
12695 if (cfg->method == method && cfg->got_var)
12696 mono_emit_load_got_addr (cfg);
12698 if (init_localsbb) {
12699 cfg->cbb = init_localsbb;
12701 for (i = 0; i < header->num_locals; ++i) {
12702 emit_init_local (cfg, i, header->locals [i], init_locals);
12706 if (cfg->init_ref_vars && cfg->method == method) {
12707 /* Emit initialization for ref vars */
12708 // FIXME: Avoid duplication initialization for IL locals.
12709 for (i = 0; i < cfg->num_varinfo; ++i) {
12710 MonoInst *ins = cfg->varinfo [i];
12712 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12713 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12717 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12718 cfg->cbb = init_localsbb;
12719 emit_push_lmf (cfg);
12722 cfg->cbb = init_localsbb;
12723 mini_profiler_emit_enter (cfg);
12726 MonoBasicBlock *bb;
12729 * Make seq points at backward branch targets interruptable.
12731 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12732 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12733 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12736 /* Add a sequence point for method entry/exit events */
12737 if (seq_points && cfg->gen_sdb_seq_points) {
12738 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12739 MONO_ADD_INS (init_localsbb, ins);
12740 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12741 MONO_ADD_INS (cfg->bb_exit, ins);
12745 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12746 * the code they refer to was dead (#11880).
12748 if (sym_seq_points) {
12749 for (i = 0; i < header->code_size; ++i) {
12750 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12753 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12754 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12761 if (cfg->method == method) {
12762 MonoBasicBlock *bb;
12763 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12764 if (bb == cfg->bb_init)
12767 bb->region = mono_find_block_region (cfg, bb->real_offset);
12769 mono_create_spvar_for_region (cfg, bb->region);
12770 if (cfg->verbose_level > 2)
12771 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12774 MonoBasicBlock *bb;
12775 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12776 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12777 bb->real_offset = inline_offset;
12781 if (inline_costs < 0) {
12784 /* Method is too large */
12785 mname = mono_method_full_name (method, TRUE);
12786 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12790 if ((cfg->verbose_level > 2) && (cfg->method == method))
12791 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12796 g_assert (!mono_error_ok (&cfg->error));
12800 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12804 set_exception_type_from_invalid_il (cfg, method, ip);
12808 g_slist_free (class_inits);
12809 mono_basic_block_free (original_bb);
12810 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12811 if (cfg->exception_type)
12814 return inline_costs;
12818 store_membase_reg_to_store_membase_imm (int opcode)
12821 case OP_STORE_MEMBASE_REG:
12822 return OP_STORE_MEMBASE_IMM;
12823 case OP_STOREI1_MEMBASE_REG:
12824 return OP_STOREI1_MEMBASE_IMM;
12825 case OP_STOREI2_MEMBASE_REG:
12826 return OP_STOREI2_MEMBASE_IMM;
12827 case OP_STOREI4_MEMBASE_REG:
12828 return OP_STOREI4_MEMBASE_IMM;
12829 case OP_STOREI8_MEMBASE_REG:
12830 return OP_STOREI8_MEMBASE_IMM;
12832 g_assert_not_reached ();
12839 mono_op_to_op_imm (int opcode)
12843 return OP_IADD_IMM;
12845 return OP_ISUB_IMM;
12847 return OP_IDIV_IMM;
12849 return OP_IDIV_UN_IMM;
12851 return OP_IREM_IMM;
12853 return OP_IREM_UN_IMM;
12855 return OP_IMUL_IMM;
12857 return OP_IAND_IMM;
12861 return OP_IXOR_IMM;
12863 return OP_ISHL_IMM;
12865 return OP_ISHR_IMM;
12867 return OP_ISHR_UN_IMM;
12870 return OP_LADD_IMM;
12872 return OP_LSUB_IMM;
12874 return OP_LAND_IMM;
12878 return OP_LXOR_IMM;
12880 return OP_LSHL_IMM;
12882 return OP_LSHR_IMM;
12884 return OP_LSHR_UN_IMM;
12885 #if SIZEOF_REGISTER == 8
12887 return OP_LREM_IMM;
12891 return OP_COMPARE_IMM;
12893 return OP_ICOMPARE_IMM;
12895 return OP_LCOMPARE_IMM;
12897 case OP_STORE_MEMBASE_REG:
12898 return OP_STORE_MEMBASE_IMM;
12899 case OP_STOREI1_MEMBASE_REG:
12900 return OP_STOREI1_MEMBASE_IMM;
12901 case OP_STOREI2_MEMBASE_REG:
12902 return OP_STOREI2_MEMBASE_IMM;
12903 case OP_STOREI4_MEMBASE_REG:
12904 return OP_STOREI4_MEMBASE_IMM;
12906 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12908 return OP_X86_PUSH_IMM;
12909 case OP_X86_COMPARE_MEMBASE_REG:
12910 return OP_X86_COMPARE_MEMBASE_IMM;
12912 #if defined(TARGET_AMD64)
12913 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12914 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12916 case OP_VOIDCALL_REG:
12917 return OP_VOIDCALL;
12925 return OP_LOCALLOC_IMM;
12932 ldind_to_load_membase (int opcode)
12936 return OP_LOADI1_MEMBASE;
12938 return OP_LOADU1_MEMBASE;
12940 return OP_LOADI2_MEMBASE;
12942 return OP_LOADU2_MEMBASE;
12944 return OP_LOADI4_MEMBASE;
12946 return OP_LOADU4_MEMBASE;
12948 return OP_LOAD_MEMBASE;
12949 case CEE_LDIND_REF:
12950 return OP_LOAD_MEMBASE;
12952 return OP_LOADI8_MEMBASE;
12954 return OP_LOADR4_MEMBASE;
12956 return OP_LOADR8_MEMBASE;
12958 g_assert_not_reached ();
12965 stind_to_store_membase (int opcode)
12969 return OP_STOREI1_MEMBASE_REG;
12971 return OP_STOREI2_MEMBASE_REG;
12973 return OP_STOREI4_MEMBASE_REG;
12975 case CEE_STIND_REF:
12976 return OP_STORE_MEMBASE_REG;
12978 return OP_STOREI8_MEMBASE_REG;
12980 return OP_STORER4_MEMBASE_REG;
12982 return OP_STORER8_MEMBASE_REG;
12984 g_assert_not_reached ();
12991 mono_load_membase_to_load_mem (int opcode)
12993 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12994 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12996 case OP_LOAD_MEMBASE:
12997 return OP_LOAD_MEM;
12998 case OP_LOADU1_MEMBASE:
12999 return OP_LOADU1_MEM;
13000 case OP_LOADU2_MEMBASE:
13001 return OP_LOADU2_MEM;
13002 case OP_LOADI4_MEMBASE:
13003 return OP_LOADI4_MEM;
13004 case OP_LOADU4_MEMBASE:
13005 return OP_LOADU4_MEM;
13006 #if SIZEOF_REGISTER == 8
13007 case OP_LOADI8_MEMBASE:
13008 return OP_LOADI8_MEM;
13017 op_to_op_dest_membase (int store_opcode, int opcode)
13019 #if defined(TARGET_X86)
13020 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13025 return OP_X86_ADD_MEMBASE_REG;
13027 return OP_X86_SUB_MEMBASE_REG;
13029 return OP_X86_AND_MEMBASE_REG;
13031 return OP_X86_OR_MEMBASE_REG;
13033 return OP_X86_XOR_MEMBASE_REG;
13036 return OP_X86_ADD_MEMBASE_IMM;
13039 return OP_X86_SUB_MEMBASE_IMM;
13042 return OP_X86_AND_MEMBASE_IMM;
13045 return OP_X86_OR_MEMBASE_IMM;
13048 return OP_X86_XOR_MEMBASE_IMM;
13054 #if defined(TARGET_AMD64)
13055 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13060 return OP_X86_ADD_MEMBASE_REG;
13062 return OP_X86_SUB_MEMBASE_REG;
13064 return OP_X86_AND_MEMBASE_REG;
13066 return OP_X86_OR_MEMBASE_REG;
13068 return OP_X86_XOR_MEMBASE_REG;
13070 return OP_X86_ADD_MEMBASE_IMM;
13072 return OP_X86_SUB_MEMBASE_IMM;
13074 return OP_X86_AND_MEMBASE_IMM;
13076 return OP_X86_OR_MEMBASE_IMM;
13078 return OP_X86_XOR_MEMBASE_IMM;
13080 return OP_AMD64_ADD_MEMBASE_REG;
13082 return OP_AMD64_SUB_MEMBASE_REG;
13084 return OP_AMD64_AND_MEMBASE_REG;
13086 return OP_AMD64_OR_MEMBASE_REG;
13088 return OP_AMD64_XOR_MEMBASE_REG;
13091 return OP_AMD64_ADD_MEMBASE_IMM;
13094 return OP_AMD64_SUB_MEMBASE_IMM;
13097 return OP_AMD64_AND_MEMBASE_IMM;
13100 return OP_AMD64_OR_MEMBASE_IMM;
13103 return OP_AMD64_XOR_MEMBASE_IMM;
13113 op_to_op_store_membase (int store_opcode, int opcode)
13115 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13118 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13119 return OP_X86_SETEQ_MEMBASE;
13121 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13122 return OP_X86_SETNE_MEMBASE;
13130 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13133 /* FIXME: This has sign extension issues */
13135 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13136 return OP_X86_COMPARE_MEMBASE8_IMM;
13139 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13144 return OP_X86_PUSH_MEMBASE;
13145 case OP_COMPARE_IMM:
13146 case OP_ICOMPARE_IMM:
13147 return OP_X86_COMPARE_MEMBASE_IMM;
13150 return OP_X86_COMPARE_MEMBASE_REG;
13154 #ifdef TARGET_AMD64
13155 /* FIXME: This has sign extension issues */
13157 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13158 return OP_X86_COMPARE_MEMBASE8_IMM;
13163 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13164 return OP_X86_PUSH_MEMBASE;
13166 /* FIXME: This only works for 32 bit immediates
13167 case OP_COMPARE_IMM:
13168 case OP_LCOMPARE_IMM:
13169 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13170 return OP_AMD64_COMPARE_MEMBASE_IMM;
13172 case OP_ICOMPARE_IMM:
13173 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13174 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13178 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13179 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13180 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13181 return OP_AMD64_COMPARE_MEMBASE_REG;
13184 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13185 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13194 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13197 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13203 return OP_X86_COMPARE_REG_MEMBASE;
13205 return OP_X86_ADD_REG_MEMBASE;
13207 return OP_X86_SUB_REG_MEMBASE;
13209 return OP_X86_AND_REG_MEMBASE;
13211 return OP_X86_OR_REG_MEMBASE;
13213 return OP_X86_XOR_REG_MEMBASE;
13217 #ifdef TARGET_AMD64
13218 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13221 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13223 return OP_X86_ADD_REG_MEMBASE;
13225 return OP_X86_SUB_REG_MEMBASE;
13227 return OP_X86_AND_REG_MEMBASE;
13229 return OP_X86_OR_REG_MEMBASE;
13231 return OP_X86_XOR_REG_MEMBASE;
13233 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13237 return OP_AMD64_COMPARE_REG_MEMBASE;
13239 return OP_AMD64_ADD_REG_MEMBASE;
13241 return OP_AMD64_SUB_REG_MEMBASE;
13243 return OP_AMD64_AND_REG_MEMBASE;
13245 return OP_AMD64_OR_REG_MEMBASE;
13247 return OP_AMD64_XOR_REG_MEMBASE;
13256 mono_op_to_op_imm_noemul (int opcode)
13259 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13265 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13272 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13277 return mono_op_to_op_imm (opcode);
13282 * mono_handle_global_vregs:
13284 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13288 mono_handle_global_vregs (MonoCompile *cfg)
13290 gint32 *vreg_to_bb;
13291 MonoBasicBlock *bb;
13294 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13296 #ifdef MONO_ARCH_SIMD_INTRINSICS
13297 if (cfg->uses_simd_intrinsics)
13298 mono_simd_simplify_indirection (cfg);
13301 /* Find local vregs used in more than one bb */
13302 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13303 MonoInst *ins = bb->code;
13304 int block_num = bb->block_num;
13306 if (cfg->verbose_level > 2)
13307 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13310 for (; ins; ins = ins->next) {
13311 const char *spec = INS_INFO (ins->opcode);
13312 int regtype = 0, regindex;
13315 if (G_UNLIKELY (cfg->verbose_level > 2))
13316 mono_print_ins (ins);
13318 g_assert (ins->opcode >= MONO_CEE_LAST);
13320 for (regindex = 0; regindex < 4; regindex ++) {
13323 if (regindex == 0) {
13324 regtype = spec [MONO_INST_DEST];
13325 if (regtype == ' ')
13328 } else if (regindex == 1) {
13329 regtype = spec [MONO_INST_SRC1];
13330 if (regtype == ' ')
13333 } else if (regindex == 2) {
13334 regtype = spec [MONO_INST_SRC2];
13335 if (regtype == ' ')
13338 } else if (regindex == 3) {
13339 regtype = spec [MONO_INST_SRC3];
13340 if (regtype == ' ')
13345 #if SIZEOF_REGISTER == 4
13346 /* In the LLVM case, the long opcodes are not decomposed */
13347 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13349 * Since some instructions reference the original long vreg,
13350 * and some reference the two component vregs, it is quite hard
13351 * to determine when it needs to be global. So be conservative.
13353 if (!get_vreg_to_inst (cfg, vreg)) {
13354 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13356 if (cfg->verbose_level > 2)
13357 printf ("LONG VREG R%d made global.\n", vreg);
13361 * Make the component vregs volatile since the optimizations can
13362 * get confused otherwise.
13364 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13365 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13369 g_assert (vreg != -1);
13371 prev_bb = vreg_to_bb [vreg];
13372 if (prev_bb == 0) {
13373 /* 0 is a valid block num */
13374 vreg_to_bb [vreg] = block_num + 1;
13375 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13376 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13379 if (!get_vreg_to_inst (cfg, vreg)) {
13380 if (G_UNLIKELY (cfg->verbose_level > 2))
13381 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13385 if (vreg_is_ref (cfg, vreg))
13386 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13388 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13391 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13394 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13398 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13401 g_assert_not_reached ();
13405 /* Flag as having been used in more than one bb */
13406 vreg_to_bb [vreg] = -1;
13412 /* If a variable is used in only one bblock, convert it into a local vreg */
13413 for (i = 0; i < cfg->num_varinfo; i++) {
13414 MonoInst *var = cfg->varinfo [i];
13415 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13417 switch (var->type) {
13423 #if SIZEOF_REGISTER == 8
13426 #if !defined(TARGET_X86)
13427 /* Enabling this screws up the fp stack on x86 */
13430 if (mono_arch_is_soft_float ())
13434 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13438 /* Arguments are implicitly global */
13439 /* Putting R4 vars into registers doesn't work currently */
13440 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13441 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13443 * Make that the variable's liveness interval doesn't contain a call, since
13444 * that would cause the lvreg to be spilled, making the whole optimization
13447 /* This is too slow for JIT compilation */
13449 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13451 int def_index, call_index, ins_index;
13452 gboolean spilled = FALSE;
13457 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13458 const char *spec = INS_INFO (ins->opcode);
13460 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13461 def_index = ins_index;
13463 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13464 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13465 if (call_index > def_index) {
13471 if (MONO_IS_CALL (ins))
13472 call_index = ins_index;
13482 if (G_UNLIKELY (cfg->verbose_level > 2))
13483 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13484 var->flags |= MONO_INST_IS_DEAD;
13485 cfg->vreg_to_inst [var->dreg] = NULL;
13492 * Compress the varinfo and vars tables so the liveness computation is faster and
13493 * takes up less space.
13496 for (i = 0; i < cfg->num_varinfo; ++i) {
13497 MonoInst *var = cfg->varinfo [i];
13498 if (pos < i && cfg->locals_start == i)
13499 cfg->locals_start = pos;
13500 if (!(var->flags & MONO_INST_IS_DEAD)) {
13502 cfg->varinfo [pos] = cfg->varinfo [i];
13503 cfg->varinfo [pos]->inst_c0 = pos;
13504 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13505 cfg->vars [pos].idx = pos;
13506 #if SIZEOF_REGISTER == 4
13507 if (cfg->varinfo [pos]->type == STACK_I8) {
13508 /* Modify the two component vars too */
13511 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13512 var1->inst_c0 = pos;
13513 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13514 var1->inst_c0 = pos;
13521 cfg->num_varinfo = pos;
13522 if (cfg->locals_start > cfg->num_varinfo)
13523 cfg->locals_start = cfg->num_varinfo;
13527 * mono_allocate_gsharedvt_vars:
13529 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13530 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13533 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13537 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13539 for (i = 0; i < cfg->num_varinfo; ++i) {
13540 MonoInst *ins = cfg->varinfo [i];
13543 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13544 if (i >= cfg->locals_start) {
13546 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13547 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13548 ins->opcode = OP_GSHAREDVT_LOCAL;
13549 ins->inst_imm = idx;
13552 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13553 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13560 * mono_spill_global_vars:
13562 * Generate spill code for variables which are not allocated to registers,
13563 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13564 * code is generated which could be optimized by the local optimization passes.
13567 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13569 MonoBasicBlock *bb;
13571 int orig_next_vreg;
13572 guint32 *vreg_to_lvreg;
13574 guint32 i, lvregs_len, lvregs_size;
13575 gboolean dest_has_lvreg = FALSE;
13576 MonoStackType stacktypes [128];
13577 MonoInst **live_range_start, **live_range_end;
13578 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13580 *need_local_opts = FALSE;
13582 memset (spec2, 0, sizeof (spec2));
13584 /* FIXME: Move this function to mini.c */
13585 stacktypes ['i'] = STACK_PTR;
13586 stacktypes ['l'] = STACK_I8;
13587 stacktypes ['f'] = STACK_R8;
13588 #ifdef MONO_ARCH_SIMD_INTRINSICS
13589 stacktypes ['x'] = STACK_VTYPE;
13592 #if SIZEOF_REGISTER == 4
13593 /* Create MonoInsts for longs */
13594 for (i = 0; i < cfg->num_varinfo; i++) {
13595 MonoInst *ins = cfg->varinfo [i];
13597 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13598 switch (ins->type) {
13603 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13606 g_assert (ins->opcode == OP_REGOFFSET);
13608 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13610 tree->opcode = OP_REGOFFSET;
13611 tree->inst_basereg = ins->inst_basereg;
13612 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13614 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13616 tree->opcode = OP_REGOFFSET;
13617 tree->inst_basereg = ins->inst_basereg;
13618 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13628 if (cfg->compute_gc_maps) {
13629 /* registers need liveness info even for !non refs */
13630 for (i = 0; i < cfg->num_varinfo; i++) {
13631 MonoInst *ins = cfg->varinfo [i];
13633 if (ins->opcode == OP_REGVAR)
13634 ins->flags |= MONO_INST_GC_TRACK;
13638 /* FIXME: widening and truncation */
13641 * As an optimization, when a variable allocated to the stack is first loaded into
13642 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13643 * the variable again.
13645 orig_next_vreg = cfg->next_vreg;
13646 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13647 lvregs_size = 1024;
13648 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13652 * These arrays contain the first and last instructions accessing a given
13654 * Since we emit bblocks in the same order we process them here, and we
13655 * don't split live ranges, these will precisely describe the live range of
13656 * the variable, i.e. the instruction range where a valid value can be found
13657 * in the variables location.
13658 * The live range is computed using the liveness info computed by the liveness pass.
13659 * We can't use vmv->range, since that is an abstract live range, and we need
13660 * one which is instruction precise.
13661 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13663 /* FIXME: Only do this if debugging info is requested */
13664 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13665 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13666 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13667 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13669 /* Add spill loads/stores */
13670 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13673 if (cfg->verbose_level > 2)
13674 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13676 /* Clear vreg_to_lvreg array */
13677 for (i = 0; i < lvregs_len; i++)
13678 vreg_to_lvreg [lvregs [i]] = 0;
13682 MONO_BB_FOR_EACH_INS (bb, ins) {
13683 const char *spec = INS_INFO (ins->opcode);
13684 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13685 gboolean store, no_lvreg;
13686 int sregs [MONO_MAX_SRC_REGS];
13688 if (G_UNLIKELY (cfg->verbose_level > 2))
13689 mono_print_ins (ins);
13691 if (ins->opcode == OP_NOP)
13695 * We handle LDADDR here as well, since it can only be decomposed
13696 * when variable addresses are known.
13698 if (ins->opcode == OP_LDADDR) {
13699 MonoInst *var = (MonoInst *)ins->inst_p0;
13701 if (var->opcode == OP_VTARG_ADDR) {
13702 /* Happens on SPARC/S390 where vtypes are passed by reference */
13703 MonoInst *vtaddr = var->inst_left;
13704 if (vtaddr->opcode == OP_REGVAR) {
13705 ins->opcode = OP_MOVE;
13706 ins->sreg1 = vtaddr->dreg;
13708 else if (var->inst_left->opcode == OP_REGOFFSET) {
13709 ins->opcode = OP_LOAD_MEMBASE;
13710 ins->inst_basereg = vtaddr->inst_basereg;
13711 ins->inst_offset = vtaddr->inst_offset;
13714 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13715 /* gsharedvt arg passed by ref */
13716 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13718 ins->opcode = OP_LOAD_MEMBASE;
13719 ins->inst_basereg = var->inst_basereg;
13720 ins->inst_offset = var->inst_offset;
13721 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13722 MonoInst *load, *load2, *load3;
13723 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13724 int reg1, reg2, reg3;
13725 MonoInst *info_var = cfg->gsharedvt_info_var;
13726 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13730 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13733 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13735 g_assert (info_var);
13736 g_assert (locals_var);
13738 /* Mark the instruction used to compute the locals var as used */
13739 cfg->gsharedvt_locals_var_ins = NULL;
13741 /* Load the offset */
13742 if (info_var->opcode == OP_REGOFFSET) {
13743 reg1 = alloc_ireg (cfg);
13744 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13745 } else if (info_var->opcode == OP_REGVAR) {
13747 reg1 = info_var->dreg;
13749 g_assert_not_reached ();
13751 reg2 = alloc_ireg (cfg);
13752 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13753 /* Load the locals area address */
13754 reg3 = alloc_ireg (cfg);
13755 if (locals_var->opcode == OP_REGOFFSET) {
13756 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13757 } else if (locals_var->opcode == OP_REGVAR) {
13758 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13760 g_assert_not_reached ();
13762 /* Compute the address */
13763 ins->opcode = OP_PADD;
13767 mono_bblock_insert_before_ins (bb, ins, load3);
13768 mono_bblock_insert_before_ins (bb, load3, load2);
13770 mono_bblock_insert_before_ins (bb, load2, load);
13772 g_assert (var->opcode == OP_REGOFFSET);
13774 ins->opcode = OP_ADD_IMM;
13775 ins->sreg1 = var->inst_basereg;
13776 ins->inst_imm = var->inst_offset;
13779 *need_local_opts = TRUE;
13780 spec = INS_INFO (ins->opcode);
13783 if (ins->opcode < MONO_CEE_LAST) {
13784 mono_print_ins (ins);
13785 g_assert_not_reached ();
13789 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13793 if (MONO_IS_STORE_MEMBASE (ins)) {
13794 tmp_reg = ins->dreg;
13795 ins->dreg = ins->sreg2;
13796 ins->sreg2 = tmp_reg;
13799 spec2 [MONO_INST_DEST] = ' ';
13800 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13801 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13802 spec2 [MONO_INST_SRC3] = ' ';
13804 } else if (MONO_IS_STORE_MEMINDEX (ins))
13805 g_assert_not_reached ();
13810 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13811 printf ("\t %.3s %d", spec, ins->dreg);
13812 num_sregs = mono_inst_get_src_registers (ins, sregs);
13813 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13814 printf (" %d", sregs [srcindex]);
13821 regtype = spec [MONO_INST_DEST];
13822 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13825 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13826 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13827 MonoInst *store_ins;
13829 MonoInst *def_ins = ins;
13830 int dreg = ins->dreg; /* The original vreg */
13832 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13834 if (var->opcode == OP_REGVAR) {
13835 ins->dreg = var->dreg;
13836 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13838 * Instead of emitting a load+store, use a _membase opcode.
13840 g_assert (var->opcode == OP_REGOFFSET);
13841 if (ins->opcode == OP_MOVE) {
13845 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13846 ins->inst_basereg = var->inst_basereg;
13847 ins->inst_offset = var->inst_offset;
13850 spec = INS_INFO (ins->opcode);
13854 g_assert (var->opcode == OP_REGOFFSET);
13856 prev_dreg = ins->dreg;
13858 /* Invalidate any previous lvreg for this vreg */
13859 vreg_to_lvreg [ins->dreg] = 0;
13863 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13865 store_opcode = OP_STOREI8_MEMBASE_REG;
13868 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13870 #if SIZEOF_REGISTER != 8
13871 if (regtype == 'l') {
13872 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13873 mono_bblock_insert_after_ins (bb, ins, store_ins);
13874 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13875 mono_bblock_insert_after_ins (bb, ins, store_ins);
13876 def_ins = store_ins;
13881 g_assert (store_opcode != OP_STOREV_MEMBASE);
13883 /* Try to fuse the store into the instruction itself */
13884 /* FIXME: Add more instructions */
13885 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13886 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13887 ins->inst_imm = ins->inst_c0;
13888 ins->inst_destbasereg = var->inst_basereg;
13889 ins->inst_offset = var->inst_offset;
13890 spec = INS_INFO (ins->opcode);
13891 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13892 ins->opcode = store_opcode;
13893 ins->inst_destbasereg = var->inst_basereg;
13894 ins->inst_offset = var->inst_offset;
13898 tmp_reg = ins->dreg;
13899 ins->dreg = ins->sreg2;
13900 ins->sreg2 = tmp_reg;
13903 spec2 [MONO_INST_DEST] = ' ';
13904 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13905 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13906 spec2 [MONO_INST_SRC3] = ' ';
13908 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13909 // FIXME: The backends expect the base reg to be in inst_basereg
13910 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13912 ins->inst_basereg = var->inst_basereg;
13913 ins->inst_offset = var->inst_offset;
13914 spec = INS_INFO (ins->opcode);
13916 /* printf ("INS: "); mono_print_ins (ins); */
13917 /* Create a store instruction */
13918 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13920 /* Insert it after the instruction */
13921 mono_bblock_insert_after_ins (bb, ins, store_ins);
13923 def_ins = store_ins;
13926 * We can't assign ins->dreg to var->dreg here, since the
13927 * sregs could use it. So set a flag, and do it after
13930 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13931 dest_has_lvreg = TRUE;
13936 if (def_ins && !live_range_start [dreg]) {
13937 live_range_start [dreg] = def_ins;
13938 live_range_start_bb [dreg] = bb;
13941 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13944 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13945 tmp->inst_c1 = dreg;
13946 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13953 num_sregs = mono_inst_get_src_registers (ins, sregs);
13954 for (srcindex = 0; srcindex < 3; ++srcindex) {
13955 regtype = spec [MONO_INST_SRC1 + srcindex];
13956 sreg = sregs [srcindex];
13958 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13959 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13960 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13961 MonoInst *use_ins = ins;
13962 MonoInst *load_ins;
13963 guint32 load_opcode;
13965 if (var->opcode == OP_REGVAR) {
13966 sregs [srcindex] = var->dreg;
13967 //mono_inst_set_src_registers (ins, sregs);
13968 live_range_end [sreg] = use_ins;
13969 live_range_end_bb [sreg] = bb;
13971 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13974 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13975 /* var->dreg is a hreg */
13976 tmp->inst_c1 = sreg;
13977 mono_bblock_insert_after_ins (bb, ins, tmp);
13983 g_assert (var->opcode == OP_REGOFFSET);
13985 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13987 g_assert (load_opcode != OP_LOADV_MEMBASE);
13989 if (vreg_to_lvreg [sreg]) {
13990 g_assert (vreg_to_lvreg [sreg] != -1);
13992 /* The variable is already loaded to an lvreg */
13993 if (G_UNLIKELY (cfg->verbose_level > 2))
13994 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13995 sregs [srcindex] = vreg_to_lvreg [sreg];
13996 //mono_inst_set_src_registers (ins, sregs);
14000 /* Try to fuse the load into the instruction */
14001 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14002 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14003 sregs [0] = var->inst_basereg;
14004 //mono_inst_set_src_registers (ins, sregs);
14005 ins->inst_offset = var->inst_offset;
14006 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14007 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14008 sregs [1] = var->inst_basereg;
14009 //mono_inst_set_src_registers (ins, sregs);
14010 ins->inst_offset = var->inst_offset;
14012 if (MONO_IS_REAL_MOVE (ins)) {
14013 ins->opcode = OP_NOP;
14016 //printf ("%d ", srcindex); mono_print_ins (ins);
14018 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14020 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14021 if (var->dreg == prev_dreg) {
14023 * sreg refers to the value loaded by the load
14024 * emitted below, but we need to use ins->dreg
14025 * since it refers to the store emitted earlier.
14029 g_assert (sreg != -1);
14030 vreg_to_lvreg [var->dreg] = sreg;
14031 if (lvregs_len >= lvregs_size) {
14032 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14033 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14034 lvregs = new_lvregs;
14037 lvregs [lvregs_len ++] = var->dreg;
14041 sregs [srcindex] = sreg;
14042 //mono_inst_set_src_registers (ins, sregs);
14044 #if SIZEOF_REGISTER != 8
14045 if (regtype == 'l') {
14046 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14047 mono_bblock_insert_before_ins (bb, ins, load_ins);
14048 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14049 mono_bblock_insert_before_ins (bb, ins, load_ins);
14050 use_ins = load_ins;
14055 #if SIZEOF_REGISTER == 4
14056 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14058 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14059 mono_bblock_insert_before_ins (bb, ins, load_ins);
14060 use_ins = load_ins;
14064 if (var->dreg < orig_next_vreg) {
14065 live_range_end [var->dreg] = use_ins;
14066 live_range_end_bb [var->dreg] = bb;
14069 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14072 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14073 tmp->inst_c1 = var->dreg;
14074 mono_bblock_insert_after_ins (bb, ins, tmp);
14078 mono_inst_set_src_registers (ins, sregs);
14080 if (dest_has_lvreg) {
14081 g_assert (ins->dreg != -1);
14082 vreg_to_lvreg [prev_dreg] = ins->dreg;
14083 if (lvregs_len >= lvregs_size) {
14084 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14085 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14086 lvregs = new_lvregs;
14089 lvregs [lvregs_len ++] = prev_dreg;
14090 dest_has_lvreg = FALSE;
14094 tmp_reg = ins->dreg;
14095 ins->dreg = ins->sreg2;
14096 ins->sreg2 = tmp_reg;
14099 if (MONO_IS_CALL (ins)) {
14100 /* Clear vreg_to_lvreg array */
14101 for (i = 0; i < lvregs_len; i++)
14102 vreg_to_lvreg [lvregs [i]] = 0;
14104 } else if (ins->opcode == OP_NOP) {
14106 MONO_INST_NULLIFY_SREGS (ins);
14109 if (cfg->verbose_level > 2)
14110 mono_print_ins_index (1, ins);
14113 /* Extend the live range based on the liveness info */
14114 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14115 for (i = 0; i < cfg->num_varinfo; i ++) {
14116 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14118 if (vreg_is_volatile (cfg, vi->vreg))
14119 /* The liveness info is incomplete */
14122 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14123 /* Live from at least the first ins of this bb */
14124 live_range_start [vi->vreg] = bb->code;
14125 live_range_start_bb [vi->vreg] = bb;
14128 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14129 /* Live at least until the last ins of this bb */
14130 live_range_end [vi->vreg] = bb->last_ins;
14131 live_range_end_bb [vi->vreg] = bb;
14138 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14139 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14141 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14142 for (i = 0; i < cfg->num_varinfo; ++i) {
14143 int vreg = MONO_VARINFO (cfg, i)->vreg;
14146 if (live_range_start [vreg]) {
14147 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14149 ins->inst_c1 = vreg;
14150 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14152 if (live_range_end [vreg]) {
14153 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14155 ins->inst_c1 = vreg;
14156 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14157 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14159 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14164 if (cfg->gsharedvt_locals_var_ins) {
14165 /* Nullify if unused */
14166 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14167 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14170 g_free (live_range_start);
14171 g_free (live_range_end);
14172 g_free (live_range_start_bb);
14173 g_free (live_range_end_bb);
14179 * - use 'iadd' instead of 'int_add'
14180 * - handling ovf opcodes: decompose in method_to_ir.
14181 * - unify iregs/fregs
14182 * -> partly done, the missing parts are:
14183 * - a more complete unification would involve unifying the hregs as well, so
14184 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14185 * would no longer map to the machine hregs, so the code generators would need to
14186 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14187 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14188 * fp/non-fp branches speeds it up by about 15%.
14189 * - use sext/zext opcodes instead of shifts
14191 * - get rid of TEMPLOADs if possible and use vregs instead
14192 * - clean up usage of OP_P/OP_ opcodes
14193 * - cleanup usage of DUMMY_USE
14194 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14196 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14197 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14198 * - make sure handle_stack_args () is called before the branch is emitted
14199 * - when the new IR is done, get rid of all unused stuff
14200 * - COMPARE/BEQ as separate instructions or unify them ?
14201 * - keeping them separate allows specialized compare instructions like
14202 * compare_imm, compare_membase
14203 * - most back ends unify fp compare+branch, fp compare+ceq
14204 * - integrate mono_save_args into inline_method
14205 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14206 * - handle long shift opts on 32 bit platforms somehow: they require
14207 * 3 sregs (2 for arg1 and 1 for arg2)
14208 * - make byref a 'normal' type.
14209 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14210 * variable if needed.
14211 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14212 * like inline_method.
14213 * - remove inlining restrictions
14214 * - fix LNEG and enable cfold of INEG
14215 * - generalize x86 optimizations like ldelema as a peephole optimization
14216 * - add store_mem_imm for amd64
14217 * - optimize the loading of the interruption flag in the managed->native wrappers
14218 * - avoid special handling of OP_NOP in passes
14219 * - move code inserting instructions into one function/macro.
14220 * - try a coalescing phase after liveness analysis
14221 * - add float -> vreg conversion + local optimizations on !x86
14222 * - figure out how to handle decomposed branches during optimizations, ie.
14223 * compare+branch, op_jump_table+op_br etc.
14224 * - promote RuntimeXHandles to vregs
14225 * - vtype cleanups:
14226 * - add a NEW_VARLOADA_VREG macro
14227 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14228 * accessing vtype fields.
14229 * - get rid of I8CONST on 64 bit platforms
14230 * - dealing with the increase in code size due to branches created during opcode
14232 * - use extended basic blocks
14233 * - all parts of the JIT
14234 * - handle_global_vregs () && local regalloc
14235 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14236 * - sources of increase in code size:
14239 * - isinst and castclass
14240 * - lvregs not allocated to global registers even if used multiple times
14241 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14243 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14244 * - add all micro optimizations from the old JIT
14245 * - put tree optimizations into the deadce pass
14246 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14247 * specific function.
14248 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14249 * fcompare + branchCC.
14250 * - create a helper function for allocating a stack slot, taking into account
14251 * MONO_CFG_HAS_SPILLUP.
14253 * - optimize mono_regstate2_alloc_int/float.
14254 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14255 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14256 * parts of the tree could be separated by other instructions, killing the tree
14257 * arguments, or stores killing loads etc. Also, should we fold loads into other
14258 * instructions if the result of the load is used multiple times ?
14259 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14260 * - LAST MERGE: 108395.
14261 * - when returning vtypes in registers, generate IR and append it to the end of the
14262 * last bb instead of doing it in the epilog.
14263 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14271 - When to decompose opcodes:
14272 - earlier: this makes some optimizations hard to implement, since the low level IR
14273 no longer contains the neccessary information. But it is easier to do.
14274 - later: harder to implement, enables more optimizations.
14275 - Branches inside bblocks:
14276 - created when decomposing complex opcodes.
14277 - branches to another bblock: harmless, but not tracked by the branch
14278 optimizations, so need to branch to a label at the start of the bblock.
14279 - branches to inside the same bblock: very problematic, trips up the local
14280 reg allocator. Can be fixed by spitting the current bblock, but that is a
14281 complex operation, since some local vregs can become global vregs etc.
14282 - Local/global vregs:
14283 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14284 local register allocator.
14285 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14286 structure, created by mono_create_var (). Assigned to hregs or the stack by
14287 the global register allocator.
14288 - When to do optimizations like alu->alu_imm:
14289 - earlier -> saves work later on since the IR will be smaller/simpler
14290 - later -> can work on more instructions
14291 - Handling of valuetypes:
14292 - When a vtype is pushed on the stack, a new temporary is created, an
14293 instruction computing its address (LDADDR) is emitted and pushed on
14294 the stack. Need to optimize cases when the vtype is used immediately as in
14295 argument passing, stloc etc.
14296 - Instead of the to_end stuff in the old JIT, simply call the function handling
14297 the values on the stack before emitting the last instruction of the bb.
14300 #endif /* !DISABLE_JIT */