3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1165 ldind_to_type (int op)
1168 case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1169 case CEE_LDIND_U1: return mono_defaults.byte_class;
1170 case CEE_LDIND_I2: return mono_defaults.int16_class;
1171 case CEE_LDIND_U2: return mono_defaults.uint16_class;
1172 case CEE_LDIND_I4: return mono_defaults.int32_class;
1173 case CEE_LDIND_U4: return mono_defaults.uint32_class;
1174 case CEE_LDIND_I8: return mono_defaults.int64_class;
1175 case CEE_LDIND_I: return mono_defaults.int_class;
1176 case CEE_LDIND_R4: return mono_defaults.single_class;
1177 case CEE_LDIND_R8: return mono_defaults.double_class;
1178 case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1179 default: g_error ("Unknown ldind type %d", op);
1186 param_table [STACK_MAX] [STACK_MAX] = {
1191 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1196 switch (args->type) {
1206 for (i = 0; i < sig->param_count; ++i) {
1207 switch (args [i].type) {
1211 if (!sig->params [i]->byref)
1215 if (sig->params [i]->byref)
1217 switch (sig->params [i]->type) {
1218 case MONO_TYPE_CLASS:
1219 case MONO_TYPE_STRING:
1220 case MONO_TYPE_OBJECT:
1221 case MONO_TYPE_SZARRAY:
1222 case MONO_TYPE_ARRAY:
1229 if (sig->params [i]->byref)
1231 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1240 /*if (!param_table [args [i].type] [sig->params [i]->type])
1248 * When we need a pointer to the current domain many times in a method, we
1249 * call mono_domain_get() once and we store the result in a local variable.
1250 * This function returns the variable that represents the MonoDomain*.
1252 inline static MonoInst *
1253 mono_get_domainvar (MonoCompile *cfg)
1255 if (!cfg->domainvar)
1256 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 return cfg->domainvar;
1261 * The got_var contains the address of the Global Offset Table when AOT
1265 mono_get_got_var (MonoCompile *cfg)
1267 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1269 if (!cfg->got_var) {
1270 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 return cfg->got_var;
1276 mono_create_rgctx_var (MonoCompile *cfg)
1278 if (!cfg->rgctx_var) {
1279 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1280 /* force the var to be stack allocated */
1281 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1286 mono_get_vtable_var (MonoCompile *cfg)
1288 g_assert (cfg->gshared);
1290 mono_create_rgctx_var (cfg);
1292 return cfg->rgctx_var;
1296 type_from_stack_type (MonoInst *ins) {
1297 switch (ins->type) {
1298 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1299 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1300 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1301 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1302 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1304 return &ins->klass->this_arg;
1305 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1306 case STACK_VTYPE: return &ins->klass->byval_arg;
1308 g_error ("stack type %d to monotype not handled\n", ins->type);
1313 static G_GNUC_UNUSED int
1314 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1316 t = mono_type_get_underlying_type (t);
1328 case MONO_TYPE_FNPTR:
1330 case MONO_TYPE_CLASS:
1331 case MONO_TYPE_STRING:
1332 case MONO_TYPE_OBJECT:
1333 case MONO_TYPE_SZARRAY:
1334 case MONO_TYPE_ARRAY:
1340 return cfg->r4_stack_type;
1343 case MONO_TYPE_VALUETYPE:
1344 case MONO_TYPE_TYPEDBYREF:
1346 case MONO_TYPE_GENERICINST:
1347 if (mono_type_generic_inst_is_valuetype (t))
1353 g_assert_not_reached ();
1360 array_access_to_klass (int opcode)
1364 return mono_defaults.byte_class;
1366 return mono_defaults.uint16_class;
1369 return mono_defaults.int_class;
1372 return mono_defaults.sbyte_class;
1375 return mono_defaults.int16_class;
1378 return mono_defaults.int32_class;
1380 return mono_defaults.uint32_class;
1383 return mono_defaults.int64_class;
1386 return mono_defaults.single_class;
1389 return mono_defaults.double_class;
1390 case CEE_LDELEM_REF:
1391 case CEE_STELEM_REF:
1392 return mono_defaults.object_class;
1394 g_assert_not_reached ();
1400 * We try to share variables when possible
1403 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1408 /* inlining can result in deeper stacks */
1409 if (slot >= cfg->header->max_stack)
1410 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 pos = ins->type - 1 + slot * STACK_MAX;
1414 switch (ins->type) {
1421 if ((vnum = cfg->intvars [pos]))
1422 return cfg->varinfo [vnum];
1423 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1424 cfg->intvars [pos] = res->inst_c0;
1427 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1433 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 * Don't use this if a generic_context is set, since that means AOT can't
1437 * look up the method using just the image+token.
1438 * table == 0 means this is a reference made from a wrapper.
1440 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1441 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1442 jump_info_token->image = image;
1443 jump_info_token->token = token;
1444 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1449 * This function is called to handle items that are left on the evaluation stack
1450 * at basic block boundaries. What happens is that we save the values to local variables
1451 * and we reload them later when first entering the target basic block (with the
1452 * handle_loaded_temps () function).
1453 * A single joint point will use the same variables (stored in the array bb->out_stack or
1454 * bb->in_stack, if the basic block is before or after the joint point).
1456 * This function needs to be called _before_ emitting the last instruction of
1457 * the bb (i.e. before emitting a branch).
1458 * If the stack merge fails at a join point, cfg->unverifiable is set.
1461 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 MonoBasicBlock *bb = cfg->cbb;
1465 MonoBasicBlock *outb;
1466 MonoInst *inst, **locals;
1471 if (cfg->verbose_level > 3)
1472 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1473 if (!bb->out_scount) {
1474 bb->out_scount = count;
1475 //printf ("bblock %d has out:", bb->block_num);
1477 for (i = 0; i < bb->out_count; ++i) {
1478 outb = bb->out_bb [i];
1479 /* exception handlers are linked, but they should not be considered for stack args */
1480 if (outb->flags & BB_EXCEPTION_HANDLER)
1482 //printf (" %d", outb->block_num);
1483 if (outb->in_stack) {
1485 bb->out_stack = outb->in_stack;
1491 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1492 for (i = 0; i < count; ++i) {
1494 * try to reuse temps already allocated for this purpouse, if they occupy the same
1495 * stack slot and if they are of the same type.
1496 * This won't cause conflicts since if 'local' is used to
1497 * store one of the values in the in_stack of a bblock, then
1498 * the same variable will be used for the same outgoing stack
1500 * This doesn't work when inlining methods, since the bblocks
1501 * in the inlined methods do not inherit their in_stack from
1502 * the bblock they are inlined to. See bug #58863 for an
1505 if (cfg->inlined_method)
1506 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1508 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1513 for (i = 0; i < bb->out_count; ++i) {
1514 outb = bb->out_bb [i];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER)
1518 if (outb->in_scount) {
1519 if (outb->in_scount != bb->out_scount) {
1520 cfg->unverifiable = TRUE;
1523 continue; /* check they are the same locals */
1525 outb->in_scount = count;
1526 outb->in_stack = bb->out_stack;
1529 locals = bb->out_stack;
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1540 * It is possible that the out bblocks already have in_stack assigned, and
1541 * the in_stacks differ. In this case, we will store to all the different
1548 /* Find a bblock which has a different in_stack */
1550 while (bindex < bb->out_count) {
1551 outb = bb->out_bb [bindex];
1552 /* exception handlers are linked, but they should not be considered for stack args */
1553 if (outb->flags & BB_EXCEPTION_HANDLER) {
1557 if (outb->in_stack != locals) {
1558 for (i = 0; i < count; ++i) {
1559 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1560 inst->cil_code = sp [i]->cil_code;
1561 sp [i] = locals [i];
1562 if (cfg->verbose_level > 3)
1563 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1565 locals = outb->in_stack;
1575 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1579 if (cfg->compile_aot) {
1580 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1586 ji.type = patch_type;
1587 ji.data.target = data;
1588 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1589 mono_error_assert_ok (&error);
1591 EMIT_NEW_PCONST (cfg, ins, target);
1597 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1599 int tls_offset = mono_tls_get_tls_offset (key);
1601 if (cfg->compile_aot)
1604 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1606 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1607 ins->dreg = mono_alloc_preg (cfg);
1608 ins->inst_offset = tls_offset;
1615 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1617 int tls_offset = mono_tls_get_tls_offset (key);
1619 if (cfg->compile_aot)
1622 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1624 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1625 ins->sreg1 = value->dreg;
1626 ins->inst_offset = tls_offset;
1634 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1636 MonoInst *fast_tls = NULL;
1638 if (!mini_get_debug_options ()->use_fallback_tls)
1639 fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 MONO_ADD_INS (cfg->cbb, fast_tls);
1646 if (cfg->compile_aot) {
1649 * tls getters are critical pieces of code and we don't want to resolve them
1650 * through the standard plt/tramp mechanism since we might expose ourselves
1651 * to crashes and infinite recursions.
1653 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1654 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1656 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1657 return mono_emit_jit_icall (cfg, getter, NULL);
1662 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1664 MonoInst *fast_tls = NULL;
1666 if (!mini_get_debug_options ()->use_fallback_tls)
1667 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 MONO_ADD_INS (cfg->cbb, fast_tls);
1674 if (cfg->compile_aot) {
1676 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1677 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1679 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1680 return mono_emit_jit_icall (cfg, setter, &value);
1687 * Emit IR to push the current LMF onto the LMF stack.
1690 emit_push_lmf (MonoCompile *cfg)
1693 * Emit IR to push the LMF:
1694 * lmf_addr = <lmf_addr from tls>
1695 * lmf->lmf_addr = lmf_addr
1696 * lmf->prev_lmf = *lmf_addr
1699 MonoInst *ins, *lmf_ins;
1704 int lmf_reg, prev_lmf_reg;
1706 * Store lmf_addr in a variable, so it can be allocated to a global register.
1708 if (!cfg->lmf_addr_var)
1709 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1714 int jit_tls_dreg = ins->dreg;
1716 lmf_reg = alloc_preg (cfg);
1717 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1719 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1724 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1725 lmf_reg = ins->dreg;
1727 prev_lmf_reg = alloc_preg (cfg);
1728 /* Save previous_lmf */
1729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1730 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1738 * Emit IR to pop the current LMF from the LMF stack.
1741 emit_pop_lmf (MonoCompile *cfg)
1743 int lmf_reg, lmf_addr_reg;
1749 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1750 lmf_reg = ins->dreg;
1754 * Emit IR to pop the LMF:
1755 * *(lmf->lmf_addr) = lmf->prev_lmf
1757 /* This could be called before emit_push_lmf () */
1758 if (!cfg->lmf_addr_var)
1759 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1760 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1762 prev_lmf_reg = alloc_preg (cfg);
1763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1764 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1768 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1771 type = mini_get_underlying_type (type);
1772 switch (type->type) {
1773 case MONO_TYPE_VOID:
1774 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1781 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1785 case MONO_TYPE_FNPTR:
1786 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1787 case MONO_TYPE_CLASS:
1788 case MONO_TYPE_STRING:
1789 case MONO_TYPE_OBJECT:
1790 case MONO_TYPE_SZARRAY:
1791 case MONO_TYPE_ARRAY:
1792 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1795 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1798 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1800 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = mono_class_enum_basetype (type->data.klass);
1808 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 case MONO_TYPE_MVAR:
1817 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1819 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1824 //XXX this ignores if t is byref
1825 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1828 * target_type_is_incompatible:
1829 * @cfg: MonoCompile context
1831 * Check that the item @arg on the evaluation stack can be stored
1832 * in the target type (can be a local, or field, etc).
1833 * The cfg arg can be used to check if we need verification or just
1836 * Returns: non-0 value if arg can't be stored on a target.
1839 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1841 MonoType *simple_type;
1844 if (target->byref) {
1845 /* FIXME: check that the pointed to types match */
1846 if (arg->type == STACK_MP) {
1847 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1848 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1849 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1851 /* if the target is native int& or same type */
1852 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1855 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1856 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1857 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1861 if (arg->type == STACK_PTR)
1866 simple_type = mini_get_underlying_type (target);
1867 switch (simple_type->type) {
1868 case MONO_TYPE_VOID:
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1880 /* STACK_MP is needed when setting pinned locals */
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1886 case MONO_TYPE_FNPTR:
1888 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1889 * in native int. (#688008).
1891 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1894 case MONO_TYPE_CLASS:
1895 case MONO_TYPE_STRING:
1896 case MONO_TYPE_OBJECT:
1897 case MONO_TYPE_SZARRAY:
1898 case MONO_TYPE_ARRAY:
1899 if (arg->type != STACK_OBJ)
1901 /* FIXME: check type compatibility */
1905 if (arg->type != STACK_I8)
1909 if (arg->type != cfg->r4_stack_type)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 MonoClass *target_class;
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 target_class = mono_class_from_mono_type (target);
1937 /* The second cases is needed when doing partial sharing */
1938 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1942 if (arg->type != STACK_OBJ)
1944 /* FIXME: check type compatibility */
1948 case MONO_TYPE_MVAR:
1949 g_assert (cfg->gshared);
1950 if (mini_type_var_is_vt (simple_type)) {
1951 if (arg->type != STACK_VTYPE)
1954 if (arg->type != STACK_OBJ)
1959 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1965 * Prepare arguments for passing to a function call.
1966 * Return a non-zero value if the arguments can't be passed to the given
1968 * The type checks are not yet complete and some conversions may need
1969 * casts on 32 or 64 bit architectures.
1971 * FIXME: implement this using target_type_is_incompatible ()
1974 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1976 MonoType *simple_type;
1980 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1984 for (i = 0; i < sig->param_count; ++i) {
1985 if (sig->params [i]->byref) {
1986 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1990 simple_type = mini_get_underlying_type (sig->params [i]);
1992 switch (simple_type->type) {
1993 case MONO_TYPE_VOID:
2002 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2008 case MONO_TYPE_FNPTR:
2009 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 if (args [i]->type != STACK_OBJ)
2022 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != cfg->r4_stack_type)
2030 if (args [i]->type != STACK_R8)
2033 case MONO_TYPE_VALUETYPE:
2034 if (simple_type->data.klass->enumtype) {
2035 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_TYPEDBYREF:
2042 if (args [i]->type != STACK_VTYPE)
2045 case MONO_TYPE_GENERICINST:
2046 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2049 case MONO_TYPE_MVAR:
2051 if (args [i]->type != STACK_VTYPE)
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2066 case OP_CALL_MEMBASE:
2068 case OP_VOIDCALL_MEMBASE:
2070 case OP_FCALL_MEMBASE:
2072 case OP_RCALL_MEMBASE:
2074 case OP_VCALL_MEMBASE:
2076 case OP_LCALL_MEMBASE:
2079 g_assert_not_reached ();
2086 callvirt_to_call_reg (int opcode)
2089 case OP_CALL_MEMBASE:
2091 case OP_VOIDCALL_MEMBASE:
2092 return OP_VOIDCALL_REG;
2093 case OP_FCALL_MEMBASE:
2094 return OP_FCALL_REG;
2095 case OP_RCALL_MEMBASE:
2096 return OP_RCALL_REG;
2097 case OP_VCALL_MEMBASE:
2098 return OP_VCALL_REG;
2099 case OP_LCALL_MEMBASE:
2100 return OP_LCALL_REG;
2102 g_assert_not_reached ();
2108 /* Either METHOD or IMT_ARG needs to be set */
2110 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2114 if (COMPILE_LLVM (cfg)) {
2116 method_reg = alloc_preg (cfg);
2117 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2119 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2120 method_reg = ins->dreg;
2124 call->imt_arg_reg = method_reg;
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2131 method_reg = alloc_preg (cfg);
2132 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2134 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2135 method_reg = ins->dreg;
2138 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2154 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2157 return mono_class_check_context_used (klass);
2163 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2166 return mono_method_check_context_used (method);
2172 * check_method_sharing:
2174 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2177 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2179 gboolean pass_vtable = FALSE;
2180 gboolean pass_mrgctx = FALSE;
2182 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2183 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2184 gboolean sharable = FALSE;
2186 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2190 * Pass vtable iff target method might
2191 * be shared, which means that sharing
2192 * is enabled for its class and its
2193 * context is sharable (and it's not a
2196 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2200 if (mini_method_get_context (cmethod) &&
2201 mini_method_get_context (cmethod)->method_inst) {
2202 g_assert (!pass_vtable);
2204 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2207 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2212 if (out_pass_vtable)
2213 *out_pass_vtable = pass_vtable;
2214 if (out_pass_mrgctx)
2215 *out_pass_mrgctx = pass_mrgctx;
2218 inline static MonoCallInst *
2219 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2220 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline, MonoMethod *target)
2224 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2232 mini_profiler_emit_tail_call (cfg, target);
2234 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2236 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2239 call->signature = sig;
2240 call->rgctx_reg = rgctx;
2241 sig_ret = mini_get_underlying_type (sig->ret);
2243 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2246 if (mini_type_is_vtype (sig_ret)) {
2247 call->vret_var = cfg->vret_addr;
2248 //g_assert_not_reached ();
2250 } else if (mini_type_is_vtype (sig_ret)) {
2251 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2254 temp->backend.is_pinvoke = sig->pinvoke;
2257 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2258 * address of return value to increase optimization opportunities.
2259 * Before vtype decomposition, the dreg of the call ins itself represents the
2260 * fact the call modifies the return value. After decomposition, the call will
2261 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2262 * will be transformed into an LDADDR.
2264 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2265 loada->dreg = alloc_preg (cfg);
2266 loada->inst_p0 = temp;
2267 /* We reference the call too since call->dreg could change during optimization */
2268 loada->inst_p1 = call;
2269 MONO_ADD_INS (cfg->cbb, loada);
2271 call->inst.dreg = temp->dreg;
2273 call->vret_var = loada;
2274 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2275 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2277 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2278 if (COMPILE_SOFT_FLOAT (cfg)) {
2280 * If the call has a float argument, we would need to do an r8->r4 conversion using
2281 * an icall, but that cannot be done during the call sequence since it would clobber
2282 * the call registers + the stack. So we do it before emitting the call.
2284 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2286 MonoInst *in = call->args [i];
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mono_type_get_underlying_type (t);
2294 if (!t->byref && t->type == MONO_TYPE_R4) {
2295 MonoInst *iargs [1];
2299 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2301 /* The result will be in an int vreg */
2302 call->args [i] = conv;
2308 call->need_unbox_trampoline = unbox_trampoline;
2311 if (COMPILE_LLVM (cfg))
2312 mono_llvm_emit_call (cfg, call);
2314 mono_arch_emit_call (cfg, call);
2316 mono_arch_emit_call (cfg, call);
2319 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2320 cfg->flags |= MONO_CFG_HAS_CALLS;
2326 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2328 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2329 cfg->uses_rgctx_reg = TRUE;
2330 call->rgctx_reg = TRUE;
2332 call->rgctx_arg_reg = rgctx_reg;
2337 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2342 gboolean check_sp = FALSE;
2344 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2345 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2347 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2352 rgctx_reg = mono_alloc_preg (cfg);
2353 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2357 if (!cfg->stack_inbalance_var)
2358 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2360 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2361 ins->dreg = cfg->stack_inbalance_var->dreg;
2362 MONO_ADD_INS (cfg->cbb, ins);
2365 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
2367 call->inst.sreg1 = addr->dreg;
2370 emit_imt_argument (cfg, call, NULL, imt_arg);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 sp_reg = mono_alloc_preg (cfg);
2379 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2381 MONO_ADD_INS (cfg->cbb, ins);
2383 /* Restore the stack so we don't crash when throwing the exception */
2384 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2385 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2386 MONO_ADD_INS (cfg->cbb, ins);
2388 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2389 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2393 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2395 return (MonoInst*)call;
2399 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2402 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2403 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2405 #ifndef DISABLE_REMOTING
2406 gboolean might_be_remote = FALSE;
2408 gboolean virtual_ = this_ins != NULL;
2409 gboolean enable_for_aot = TRUE;
2412 MonoInst *call_target = NULL;
2414 gboolean need_unbox_trampoline;
2417 sig = mono_method_signature (method);
2419 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2420 g_assert_not_reached ();
2423 rgctx_reg = mono_alloc_preg (cfg);
2424 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2427 if (method->string_ctor) {
2428 /* Create the real signature */
2429 /* FIXME: Cache these */
2430 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2431 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2436 context_used = mini_method_check_context_used (cfg, method);
2438 #ifndef DISABLE_REMOTING
2439 might_be_remote = this_ins && sig->hasthis &&
2440 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2441 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2443 if (might_be_remote && context_used) {
2446 g_assert (cfg->gshared);
2448 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2450 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2454 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2455 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2457 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2459 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
2461 #ifndef DISABLE_REMOTING
2462 if (might_be_remote)
2463 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2466 call->method = method;
2467 call->inst.flags |= MONO_INST_HAS_METHOD;
2468 call->inst.inst_left = this_ins;
2469 call->tail_call = tail;
2472 int vtable_reg, slot_reg, this_reg;
2475 this_reg = this_ins->dreg;
2477 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2478 MonoInst *dummy_use;
2480 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2482 /* Make a call to delegate->invoke_impl */
2483 call->inst.inst_basereg = this_reg;
2484 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2485 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2487 /* We must emit a dummy use here because the delegate trampoline will
2488 replace the 'this' argument with the delegate target making this activation
2489 no longer a root for the delegate.
2490 This is an issue for delegates that target collectible code such as dynamic
2491 methods of GC'able assemblies.
2493 For a test case look into #667921.
2495 FIXME: a dummy use is not the best way to do it as the local register allocator
2496 will put it on a caller save register and spil it around the call.
2497 Ideally, we would either put it on a callee save register or only do the store part.
2499 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2501 return (MonoInst*)call;
2504 if ((!cfg->compile_aot || enable_for_aot) &&
2505 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2506 (MONO_METHOD_IS_FINAL (method) &&
2507 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2508 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2510 * the method is not virtual, we just need to ensure this is not null
2511 * and then we can call the method directly.
2513 #ifndef DISABLE_REMOTING
2514 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2516 * The check above ensures method is not gshared, this is needed since
2517 * gshared methods can't have wrappers.
2519 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2523 if (!method->string_ctor)
2524 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2526 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2527 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2529 * the method is virtual, but we can statically dispatch since either
2530 * it's class or the method itself are sealed.
2531 * But first we need to ensure it's not a null reference.
2533 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2535 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2536 } else if (call_target) {
2537 vtable_reg = alloc_preg (cfg);
2538 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2540 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2541 call->inst.sreg1 = call_target->dreg;
2542 call->inst.flags &= !MONO_INST_HAS_METHOD;
2544 vtable_reg = alloc_preg (cfg);
2545 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2546 if (mono_class_is_interface (method->klass)) {
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2552 slot_reg = vtable_reg;
2553 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2554 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2556 g_assert (mono_method_signature (method)->generic_param_count);
2557 emit_imt_argument (cfg, call, call->method, imt_arg);
2561 call->inst.sreg1 = slot_reg;
2562 call->inst.inst_offset = offset;
2563 call->is_virtual = TRUE;
2567 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2572 return (MonoInst*)call;
2576 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2578 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2582 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2589 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
2592 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2594 return (MonoInst*)call;
2598 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2600 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2604 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2608 * mono_emit_abs_call:
2610 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2612 inline static MonoInst*
2613 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2614 MonoMethodSignature *sig, MonoInst **args)
2616 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2620 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 if (cfg->abs_patches == NULL)
2624 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2625 g_hash_table_insert (cfg->abs_patches, ji, ji);
2626 ins = mono_emit_native_call (cfg, ji, sig, args);
2627 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2631 static MonoMethodSignature*
2632 sig_to_rgctx_sig (MonoMethodSignature *sig)
2634 // FIXME: memory allocation
2635 MonoMethodSignature *res;
2638 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2639 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2640 res->param_count = sig->param_count + 1;
2641 for (i = 0; i < sig->param_count; ++i)
2642 res->params [i] = sig->params [i];
2643 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2647 /* Make an indirect call to FSIG passing an additional argument */
2649 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2651 MonoMethodSignature *csig;
2652 MonoInst *args_buf [16];
2654 int i, pindex, tmp_reg;
2656 /* Make a call with an rgctx/extra arg */
2657 if (fsig->param_count + 2 < 16)
2660 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2663 args [pindex ++] = orig_args [0];
2664 for (i = 0; i < fsig->param_count; ++i)
2665 args [pindex ++] = orig_args [fsig->hasthis + i];
2666 tmp_reg = alloc_preg (cfg);
2667 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2668 csig = sig_to_rgctx_sig (fsig);
2669 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2672 /* Emit an indirect call to the function descriptor ADDR */
2674 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2676 int addr_reg, arg_reg;
2677 MonoInst *call_target;
2679 g_assert (cfg->llvm_only);
2682 * addr points to a <addr, arg> pair, load both of them, and
2683 * make a call to addr, passing arg as an extra arg.
2685 addr_reg = alloc_preg (cfg);
2686 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2687 arg_reg = alloc_preg (cfg);
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2690 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2694 direct_icalls_enabled (MonoCompile *cfg)
2698 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2700 if (cfg->compile_llvm && !cfg->llvm_only)
2703 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2709 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2712 * Call the jit icall without a wrapper if possible.
2713 * The wrapper is needed for the following reasons:
2714 * - to handle exceptions thrown using mono_raise_exceptions () from the
2715 * icall function. The EH code needs the lmf frame pushed by the
2716 * wrapper to be able to unwind back to managed code.
2717 * - to be able to do stack walks for asynchronously suspended
2718 * threads when debugging.
2720 if (info->no_raise && direct_icalls_enabled (cfg)) {
2724 if (!info->wrapper_method) {
2725 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2726 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2728 mono_memory_barrier ();
2732 * Inline the wrapper method, which is basically a call to the C icall, and
2733 * an exception check.
2735 costs = inline_method (cfg, info->wrapper_method, NULL,
2736 args, NULL, il_offset, TRUE);
2737 g_assert (costs > 0);
2738 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2742 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2747 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2749 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2750 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2754 * Native code might return non register sized integers
2755 * without initializing the upper bits.
2757 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2758 case OP_LOADI1_MEMBASE:
2759 widen_op = OP_ICONV_TO_I1;
2761 case OP_LOADU1_MEMBASE:
2762 widen_op = OP_ICONV_TO_U1;
2764 case OP_LOADI2_MEMBASE:
2765 widen_op = OP_ICONV_TO_I2;
2767 case OP_LOADU2_MEMBASE:
2768 widen_op = OP_ICONV_TO_U2;
2774 if (widen_op != -1) {
2775 int dreg = alloc_preg (cfg);
2778 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2779 widen->type = ins->type;
2790 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2792 MonoInst *args [16];
2794 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2795 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2797 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2801 mini_get_memcpy_method (void)
2803 static MonoMethod *memcpy_method = NULL;
2804 if (!memcpy_method) {
2805 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2807 g_error ("Old corlib found. Install a new one");
2809 return memcpy_method;
2813 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2815 int card_table_shift_bits;
2816 gpointer card_table_mask;
2818 MonoInst *dummy_use;
2819 int nursery_shift_bits;
2820 size_t nursery_size;
2822 if (!cfg->gen_write_barriers)
2825 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2827 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2829 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2831 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2834 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2835 wbarrier->sreg1 = ptr->dreg;
2836 wbarrier->sreg2 = value->dreg;
2837 MONO_ADD_INS (cfg->cbb, wbarrier);
2838 } else if (card_table) {
2839 int offset_reg = alloc_preg (cfg);
2844 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2845 * collector case, so, for the serial collector, it might slightly slow down nursery
2846 * collections. We also expect that the host system and the target system have the same card
2847 * table configuration, which is the case if they have the same pointer size.
2850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2851 if (card_table_mask)
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2854 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2855 * IMM's larger than 32bits.
2857 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2858 card_reg = ins->dreg;
2860 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2863 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2864 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2867 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2871 mini_get_memset_method (void)
2873 static MonoMethod *memset_method = NULL;
2874 if (!memset_method) {
2875 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2877 g_error ("Old corlib found. Install a new one");
2879 return memset_method;
2883 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2885 MonoInst *iargs [3];
2888 MonoMethod *memset_method;
2889 MonoInst *size_ins = NULL;
2890 MonoInst *bzero_ins = NULL;
2891 static MonoMethod *bzero_method;
2893 /* FIXME: Optimize this for the case when dest is an LDADDR */
2894 mono_class_init (klass);
2895 if (mini_is_gsharedvt_klass (klass)) {
2896 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2897 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2899 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
2900 g_assert (bzero_method);
2902 iargs [1] = size_ins;
2903 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
2907 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2909 n = mono_class_value_size (klass, &align);
2911 if (n <= sizeof (gpointer) * 8) {
2912 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2915 memset_method = mini_get_memset_method ();
2917 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2918 EMIT_NEW_ICONST (cfg, iargs [2], n);
2919 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2926 * Emit IR to return either the this pointer for instance method,
2927 * or the mrgctx for static methods.
2930 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2932 MonoInst *this_ins = NULL;
2934 g_assert (cfg->gshared);
2936 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2937 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2938 !method->klass->valuetype)
2939 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
2941 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2942 MonoInst *mrgctx_loc, *mrgctx_var;
2944 g_assert (!this_ins);
2945 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2947 mrgctx_loc = mono_get_vtable_var (cfg);
2948 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2951 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
2952 MonoInst *mrgctx_loc, *mrgctx_var;
2954 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
2955 mrgctx_loc = mono_get_vtable_var (cfg);
2956 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2958 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
2961 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2962 MonoInst *vtable_loc, *vtable_var;
2964 g_assert (!this_ins);
2966 vtable_loc = mono_get_vtable_var (cfg);
2967 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2969 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2970 MonoInst *mrgctx_var = vtable_var;
2973 vtable_reg = alloc_preg (cfg);
2974 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2975 vtable_var->type = STACK_PTR;
2983 vtable_reg = alloc_preg (cfg);
2984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2989 static MonoJumpInfoRgctxEntry *
2990 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2992 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2993 res->method = method;
2994 res->in_mrgctx = in_mrgctx;
2995 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2996 res->data->type = patch_type;
2997 res->data->data.target = patch_data;
2998 res->info_type = info_type;
3003 static inline MonoInst*
3004 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3006 MonoInst *args [16];
3009 // FIXME: No fastpath since the slot is not a compile time constant
3011 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3012 if (entry->in_mrgctx)
3013 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3015 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3019 * FIXME: This can be called during decompose, which is a problem since it creates
3021 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3023 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3025 MonoBasicBlock *is_null_bb, *end_bb;
3026 MonoInst *res, *ins, *call;
3029 slot = mini_get_rgctx_entry_slot (entry);
3031 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3032 index = MONO_RGCTX_SLOT_INDEX (slot);
3034 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3035 for (depth = 0; ; ++depth) {
3036 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3038 if (index < size - 1)
3043 NEW_BBLOCK (cfg, end_bb);
3044 NEW_BBLOCK (cfg, is_null_bb);
3047 rgctx_reg = rgctx->dreg;
3049 rgctx_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3052 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3053 NEW_BBLOCK (cfg, is_null_bb);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3059 for (i = 0; i < depth; ++i) {
3060 int array_reg = alloc_preg (cfg);
3062 /* load ptr to next array */
3063 if (mrgctx && i == 0)
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3067 rgctx_reg = array_reg;
3068 /* is the ptr null? */
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3070 /* if yes, jump to actual trampoline */
3071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3075 val_reg = alloc_preg (cfg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3077 /* is the slot null? */
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3079 /* if yes, jump to actual trampoline */
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3083 res_reg = alloc_preg (cfg);
3084 MONO_INST_NEW (cfg, ins, OP_MOVE);
3085 ins->dreg = res_reg;
3086 ins->sreg1 = val_reg;
3087 MONO_ADD_INS (cfg->cbb, ins);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3092 MONO_START_BB (cfg, is_null_bb);
3094 EMIT_NEW_ICONST (cfg, args [1], index);
3096 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3098 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3099 MONO_INST_NEW (cfg, ins, OP_MOVE);
3100 ins->dreg = res_reg;
3101 ins->sreg1 = call->dreg;
3102 MONO_ADD_INS (cfg->cbb, ins);
3103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3105 MONO_START_BB (cfg, end_bb);
3114 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3117 static inline MonoInst*
3118 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3121 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3123 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3127 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3128 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3130 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3131 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3133 return emit_rgctx_fetch (cfg, rgctx, entry);
3137 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3138 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3140 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3141 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3143 return emit_rgctx_fetch (cfg, rgctx, entry);
3147 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3148 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3150 MonoJumpInfoGSharedVtCall *call_info;
3151 MonoJumpInfoRgctxEntry *entry;
3154 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3155 call_info->sig = sig;
3156 call_info->method = cmethod;
3158 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3159 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3161 return emit_rgctx_fetch (cfg, rgctx, entry);
3165 * emit_get_rgctx_virt_method:
3167 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3170 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3171 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3173 MonoJumpInfoVirtMethod *info;
3174 MonoJumpInfoRgctxEntry *entry;
3177 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3178 info->klass = klass;
3179 info->method = virt_method;
3181 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3182 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3184 return emit_rgctx_fetch (cfg, rgctx, entry);
3188 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3189 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3191 MonoJumpInfoRgctxEntry *entry;
3194 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3195 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3197 return emit_rgctx_fetch (cfg, rgctx, entry);
3201 * emit_get_rgctx_method:
3203 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3204 * normal constants, else emit a load from the rgctx.
3207 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3208 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3210 if (!context_used) {
3213 switch (rgctx_type) {
3214 case MONO_RGCTX_INFO_METHOD:
3215 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3217 case MONO_RGCTX_INFO_METHOD_RGCTX:
3218 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3221 g_assert_not_reached ();
3224 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3225 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3227 return emit_rgctx_fetch (cfg, rgctx, entry);
3232 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3233 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3235 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3236 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3238 return emit_rgctx_fetch (cfg, rgctx, entry);
3242 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3244 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3245 MonoRuntimeGenericContextInfoTemplate *template_;
3250 for (i = 0; i < info->num_entries; ++i) {
3251 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3253 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3257 if (info->num_entries == info->count_entries) {
3258 MonoRuntimeGenericContextInfoTemplate *new_entries;
3259 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3261 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3263 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3264 info->entries = new_entries;
3265 info->count_entries = new_count_entries;
3268 idx = info->num_entries;
3269 template_ = &info->entries [idx];
3270 template_->info_type = rgctx_type;
3271 template_->data = data;
3273 info->num_entries ++;
3279 * emit_get_gsharedvt_info:
3281 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3284 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3289 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3290 /* Load info->entries [idx] */
3291 dreg = alloc_preg (cfg);
3292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3298 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3300 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3304 * On return the caller must check @klass for load errors.
3307 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3309 MonoInst *vtable_arg;
3312 context_used = mini_class_check_context_used (cfg, klass);
3315 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3316 klass, MONO_RGCTX_INFO_VTABLE);
3318 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3322 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3325 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3329 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3330 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3332 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3333 ins->sreg1 = vtable_arg->dreg;
3334 MONO_ADD_INS (cfg->cbb, ins);
3337 MonoBasicBlock *inited_bb;
3338 MonoInst *args [16];
3340 inited_reg = alloc_ireg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3344 NEW_BBLOCK (cfg, inited_bb);
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3349 args [0] = vtable_arg;
3350 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3352 MONO_START_BB (cfg, inited_bb);
3357 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3361 if (cfg->gen_seq_points && cfg->method == method) {
3362 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3364 ins->flags |= MONO_INST_NONEMPTY_STACK;
3365 MONO_ADD_INS (cfg->cbb, ins);
3370 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 int vtable_reg = alloc_preg (cfg);
3374 int klass_reg = alloc_preg (cfg);
3375 MonoBasicBlock *is_null_bb = NULL;
3377 int to_klass_reg, context_used;
3380 NEW_BBLOCK (cfg, is_null_bb);
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3386 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3388 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3397 context_used = mini_class_check_context_used (cfg, klass);
3399 MonoInst *class_ins;
3401 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3402 to_klass_reg = class_ins->dreg;
3404 to_klass_reg = alloc_preg (cfg);
3405 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3410 MONO_START_BB (cfg, is_null_bb);
3415 mini_reset_cast_details (MonoCompile *cfg)
3417 /* Reset the variables holding the cast details */
3418 if (mini_get_debug_options ()->better_cast_details) {
3419 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3420 /* It is enough to reset the from field */
3421 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3426 * On return the caller must check @array_class for load errors
3429 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3431 int vtable_reg = alloc_preg (cfg);
3434 context_used = mini_class_check_context_used (cfg, array_class);
3436 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3438 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3440 if (cfg->opt & MONO_OPT_SHARED) {
3441 int class_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3445 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3447 } else if (context_used) {
3448 MonoInst *vtable_ins;
3450 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3453 if (cfg->compile_aot) {
3457 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3459 vt_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3461 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3464 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3470 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3472 mini_reset_cast_details (cfg);
3476 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3477 * generic code is generated.
3480 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3482 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3485 MonoInst *rgctx, *addr;
3487 /* FIXME: What if the class is shared? We might not
3488 have to get the address of the method from the
3490 addr = emit_get_rgctx_method (cfg, context_used, method,
3491 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3492 if (cfg->llvm_only) {
3493 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3494 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3496 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3498 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3501 gboolean pass_vtable, pass_mrgctx;
3502 MonoInst *rgctx_arg = NULL;
3504 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3505 g_assert (!pass_mrgctx);
3508 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3511 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3514 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3519 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3523 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3524 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3525 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3526 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3528 obj_reg = sp [0]->dreg;
3529 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3532 /* FIXME: generics */
3533 g_assert (klass->rank == 0);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3543 MonoInst *element_class;
3545 /* This assertion is from the unboxcast insn */
3546 g_assert (klass->rank == 0);
3548 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3549 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3554 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3555 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3556 mini_reset_cast_details (cfg);
3559 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3560 MONO_ADD_INS (cfg->cbb, add);
3561 add->type = STACK_MP;
3568 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3570 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3571 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3575 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3581 args [1] = klass_inst;
3584 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3586 NEW_BBLOCK (cfg, is_ref_bb);
3587 NEW_BBLOCK (cfg, is_nullable_bb);
3588 NEW_BBLOCK (cfg, end_bb);
3589 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3596 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3597 addr_reg = alloc_dreg (cfg, STACK_MP);
3601 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3602 MONO_ADD_INS (cfg->cbb, addr);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_ref_bb);
3609 /* Save the ref to a temporary */
3610 dreg = alloc_ireg (cfg);
3611 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3612 addr->dreg = addr_reg;
3613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3617 MONO_START_BB (cfg, is_nullable_bb);
3620 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3621 MonoInst *unbox_call;
3622 MonoMethodSignature *unbox_sig;
3624 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3625 unbox_sig->ret = &klass->byval_arg;
3626 unbox_sig->param_count = 1;
3627 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3630 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3632 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3634 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3635 addr->dreg = addr_reg;
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3641 MONO_START_BB (cfg, end_bb);
3644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3650 * Returns NULL and set the cfg exception on error.
3653 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3655 MonoInst *iargs [2];
3660 MonoRgctxInfoType rgctx_info;
3661 MonoInst *iargs [2];
3662 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3664 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3666 if (cfg->opt & MONO_OPT_SHARED)
3667 rgctx_info = MONO_RGCTX_INFO_KLASS;
3669 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3670 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3672 if (cfg->opt & MONO_OPT_SHARED) {
3673 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3675 alloc_ftn = ves_icall_object_new;
3678 alloc_ftn = ves_icall_object_new_specific;
3681 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3682 if (known_instance_size) {
3683 int size = mono_class_instance_size (klass);
3684 if (size < sizeof (MonoObject))
3685 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3687 EMIT_NEW_ICONST (cfg, iargs [1], size);
3689 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3692 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3695 if (cfg->opt & MONO_OPT_SHARED) {
3696 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3697 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3699 alloc_ftn = ves_icall_object_new;
3700 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3701 /* This happens often in argument checking code, eg. throw new FooException... */
3702 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3703 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3704 alloc_ftn = mono_helper_newobj_mscorlib;
3706 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3707 MonoMethod *managed_alloc = NULL;
3710 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3711 cfg->exception_ptr = klass;
3715 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3717 if (managed_alloc) {
3718 int size = mono_class_instance_size (klass);
3719 if (size < sizeof (MonoObject))
3720 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3722 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3723 EMIT_NEW_ICONST (cfg, iargs [1], size);
3724 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3726 alloc_ftn = ves_icall_object_new_specific;
3727 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3730 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3734 * Returns NULL and set the cfg exception on error.
3737 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3739 MonoInst *alloc, *ins;
3741 if (mono_class_is_nullable (klass)) {
3742 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3745 if (cfg->llvm_only && cfg->gsharedvt) {
3746 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3747 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3748 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3750 /* FIXME: What if the class is shared? We might not
3751 have to get the method address from the RGCTX. */
3752 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3753 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3754 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3756 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3759 gboolean pass_vtable, pass_mrgctx;
3760 MonoInst *rgctx_arg = NULL;
3762 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3763 g_assert (!pass_mrgctx);
3766 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3769 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3772 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3776 if (mini_is_gsharedvt_klass (klass)) {
3777 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3778 MonoInst *res, *is_ref, *src_var, *addr;
3781 dreg = alloc_ireg (cfg);
3783 NEW_BBLOCK (cfg, is_ref_bb);
3784 NEW_BBLOCK (cfg, is_nullable_bb);
3785 NEW_BBLOCK (cfg, end_bb);
3786 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3794 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3797 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3798 ins->opcode = OP_STOREV_MEMBASE;
3800 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3801 res->type = STACK_OBJ;
3803 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3806 MONO_START_BB (cfg, is_ref_bb);
3808 /* val is a vtype, so has to load the value manually */
3809 src_var = get_vreg_to_inst (cfg, val->dreg);
3811 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3812 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3817 MONO_START_BB (cfg, is_nullable_bb);
3820 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3821 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3823 MonoMethodSignature *box_sig;
3826 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3827 * construct that method at JIT time, so have to do things by hand.
3829 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3830 box_sig->ret = &mono_defaults.object_class->byval_arg;
3831 box_sig->param_count = 1;
3832 box_sig->params [0] = &klass->byval_arg;
3835 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
3837 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3838 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3839 res->type = STACK_OBJ;
3843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3845 MONO_START_BB (cfg, end_bb);
3849 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3853 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3858 static GHashTable* direct_icall_type_hash;
3861 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
3863 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3864 if (!direct_icalls_enabled (cfg))
3868 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
3869 * Whitelist a few icalls for now.
3871 if (!direct_icall_type_hash) {
3872 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
3874 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
3875 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
3876 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
3877 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
3878 mono_memory_barrier ();
3879 direct_icall_type_hash = h;
3882 if (cmethod->klass == mono_defaults.math_class)
3884 /* No locking needed */
3885 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
3891 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3893 if (cmethod->klass == mono_defaults.systemtype_class) {
3894 if (!strcmp (cmethod->name, "GetType"))
3900 static G_GNUC_UNUSED MonoInst*
3901 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
3903 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
3904 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3907 switch (enum_type->type) {
3910 #if SIZEOF_REGISTER == 8
3922 MonoInst *load, *and_, *cmp, *ceq;
3923 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3924 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3925 int dest_reg = alloc_ireg (cfg);
3927 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3928 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3929 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3930 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3932 ceq->type = STACK_I4;
3935 load = mono_decompose_opcode (cfg, load);
3936 and_ = mono_decompose_opcode (cfg, and_);
3937 cmp = mono_decompose_opcode (cfg, cmp);
3938 ceq = mono_decompose_opcode (cfg, ceq);
3946 * Returns NULL and set the cfg exception on error.
3948 static G_GNUC_UNUSED MonoInst*
3949 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
3953 gpointer trampoline;
3954 MonoInst *obj, *method_ins, *tramp_ins;
3958 if (virtual_ && !cfg->llvm_only) {
3959 MonoMethod *invoke = mono_get_delegate_invoke (klass);
3962 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
3966 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
3970 /* Inline the contents of mono_delegate_ctor */
3972 /* Set target field */
3973 /* Optimize away setting of NULL target */
3974 if (!MONO_INS_IS_PCONST_NULL (target)) {
3975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3976 if (cfg->gen_write_barriers) {
3977 dreg = alloc_preg (cfg);
3978 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3979 mini_emit_write_barrier (cfg, ptr, target);
3983 /* Set method field */
3984 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3988 * To avoid looking up the compiled code belonging to the target method
3989 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3990 * store it, and we fill it after the method has been compiled.
3992 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3993 MonoInst *code_slot_ins;
3996 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3998 domain = mono_domain_get ();
3999 mono_domain_lock (domain);
4000 if (!domain_jit_info (domain)->method_code_hash)
4001 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4002 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4004 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4005 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4007 mono_domain_unlock (domain);
4009 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4011 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4014 if (cfg->llvm_only) {
4015 MonoInst *args [16];
4020 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4021 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4024 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4030 if (cfg->compile_aot) {
4031 MonoDelegateClassMethodPair *del_tramp;
4033 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4034 del_tramp->klass = klass;
4035 del_tramp->method = context_used ? NULL : method;
4036 del_tramp->is_virtual = virtual_;
4037 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4040 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4042 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4043 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4046 /* Set invoke_impl field */
4048 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4050 dreg = alloc_preg (cfg);
4051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4054 dreg = alloc_preg (cfg);
4055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4056 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4059 dreg = alloc_preg (cfg);
4060 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4063 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4069 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4071 MonoJitICallInfo *info;
4073 /* Need to register the icall so it gets an icall wrapper */
4074 info = mono_get_array_new_va_icall (rank);
4076 cfg->flags |= MONO_CFG_HAS_VARARGS;
4078 /* mono_array_new_va () needs a vararg calling convention */
4079 cfg->exception_message = g_strdup ("array-new");
4080 cfg->disable_llvm = TRUE;
4082 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4083 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4087 * handle_constrained_gsharedvt_call:
4089 * Handle constrained calls where the receiver is a gsharedvt type.
4090 * Return the instruction representing the call. Set the cfg exception on failure.
4093 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4094 gboolean *ref_emit_widen)
4096 MonoInst *ins = NULL;
4097 gboolean emit_widen = *ref_emit_widen;
4100 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4101 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4102 * pack the arguments into an array, and do the rest of the work in in an icall.
4104 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4105 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4106 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4107 MonoInst *args [16];
4110 * This case handles calls to
4111 * - object:ToString()/Equals()/GetHashCode(),
4112 * - System.IComparable<T>:CompareTo()
4113 * - System.IEquatable<T>:Equals ()
4114 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4118 if (mono_method_check_context_used (cmethod))
4119 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4121 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4122 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4124 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4125 if (fsig->hasthis && fsig->param_count) {
4126 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4127 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4128 ins->dreg = alloc_preg (cfg);
4129 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4130 MONO_ADD_INS (cfg->cbb, ins);
4133 if (mini_is_gsharedvt_type (fsig->params [0])) {
4134 int addr_reg, deref_arg_reg;
4136 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4137 deref_arg_reg = alloc_preg (cfg);
4138 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4139 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4141 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4142 addr_reg = ins->dreg;
4143 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4145 EMIT_NEW_ICONST (cfg, args [3], 0);
4146 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4149 EMIT_NEW_ICONST (cfg, args [3], 0);
4150 EMIT_NEW_ICONST (cfg, args [4], 0);
4152 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4155 if (mini_is_gsharedvt_type (fsig->ret)) {
4156 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4157 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4162 MONO_ADD_INS (cfg->cbb, add);
4164 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4165 MONO_ADD_INS (cfg->cbb, ins);
4166 /* ins represents the call result */
4169 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4172 *ref_emit_widen = emit_widen;
4181 mono_emit_load_got_addr (MonoCompile *cfg)
4183 MonoInst *getaddr, *dummy_use;
4185 if (!cfg->got_var || cfg->got_var_allocated)
4188 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4189 getaddr->cil_code = cfg->header->code;
4190 getaddr->dreg = cfg->got_var->dreg;
4192 /* Add it to the start of the first bblock */
4193 if (cfg->bb_entry->code) {
4194 getaddr->next = cfg->bb_entry->code;
4195 cfg->bb_entry->code = getaddr;
4198 MONO_ADD_INS (cfg->bb_entry, getaddr);
4200 cfg->got_var_allocated = TRUE;
4203 * Add a dummy use to keep the got_var alive, since real uses might
4204 * only be generated by the back ends.
4205 * Add it to end_bblock, so the variable's lifetime covers the whole
4207 * It would be better to make the usage of the got var explicit in all
4208 * cases when the backend needs it (i.e. calls, throw etc.), so this
4209 * wouldn't be needed.
4211 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4212 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4215 static int inline_limit;
4216 static gboolean inline_limit_inited;
4219 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4221 MonoMethodHeaderSummary header;
4223 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4224 MonoMethodSignature *sig = mono_method_signature (method);
4228 if (cfg->disable_inline)
4233 if (cfg->inline_depth > 10)
4236 if (!mono_method_get_header_summary (method, &header))
4239 /*runtime, icall and pinvoke are checked by summary call*/
4240 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4241 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4242 (mono_class_is_marshalbyref (method->klass)) ||
4246 /* also consider num_locals? */
4247 /* Do the size check early to avoid creating vtables */
4248 if (!inline_limit_inited) {
4250 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4251 inline_limit = atoi (inlinelimit);
4252 g_free (inlinelimit);
4254 inline_limit = INLINE_LENGTH_LIMIT;
4255 inline_limit_inited = TRUE;
4257 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4261 * if we can initialize the class of the method right away, we do,
4262 * otherwise we don't allow inlining if the class needs initialization,
4263 * since it would mean inserting a call to mono_runtime_class_init()
4264 * inside the inlined code
4266 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4269 if (!(cfg->opt & MONO_OPT_SHARED)) {
4270 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4271 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4272 if (method->klass->has_cctor) {
4273 vtable = mono_class_vtable (cfg->domain, method->klass);
4276 if (!cfg->compile_aot) {
4278 if (!mono_runtime_class_init_full (vtable, &error)) {
4279 mono_error_cleanup (&error);
4284 } else if (mono_class_is_before_field_init (method->klass)) {
4285 if (cfg->run_cctors && method->klass->has_cctor) {
4286 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4287 if (!method->klass->runtime_info)
4288 /* No vtable created yet */
4290 vtable = mono_class_vtable (cfg->domain, method->klass);
4293 /* This makes so that inline cannot trigger */
4294 /* .cctors: too many apps depend on them */
4295 /* running with a specific order... */
4296 if (! vtable->initialized)
4299 if (!mono_runtime_class_init_full (vtable, &error)) {
4300 mono_error_cleanup (&error);
4304 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4305 if (!method->klass->runtime_info)
4306 /* No vtable created yet */
4308 vtable = mono_class_vtable (cfg->domain, method->klass);
4311 if (!vtable->initialized)
4316 * If we're compiling for shared code
4317 * the cctor will need to be run at aot method load time, for example,
4318 * or at the end of the compilation of the inlining method.
4320 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4324 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4325 if (mono_arch_is_soft_float ()) {
4327 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4329 for (i = 0; i < sig->param_count; ++i)
4330 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4335 if (g_list_find (cfg->dont_inline, method))
4338 if (mono_profiler_get_call_instrumentation_flags (method))
4345 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4347 if (!cfg->compile_aot) {
4349 if (vtable->initialized)
4353 if (mono_class_is_before_field_init (klass)) {
4354 if (cfg->method == method)
4358 if (!mono_class_needs_cctor_run (klass, method))
4361 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4362 /* The initialization is already done before the method is called */
4369 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4373 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4376 if (mini_is_gsharedvt_variable_klass (klass)) {
4379 mono_class_init (klass);
4380 size = mono_class_array_element_size (klass);
4383 mult_reg = alloc_preg (cfg);
4384 array_reg = arr->dreg;
4385 index_reg = index->dreg;
4387 #if SIZEOF_REGISTER == 8
4388 /* The array reg is 64 bits but the index reg is only 32 */
4389 if (COMPILE_LLVM (cfg)) {
4391 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4392 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4393 * of OP_X86_LEA for llvm.
4395 index2_reg = index_reg;
4397 index2_reg = alloc_preg (cfg);
4398 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4401 if (index->type == STACK_I8) {
4402 index2_reg = alloc_preg (cfg);
4403 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4405 index2_reg = index_reg;
4410 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4412 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4413 if (size == 1 || size == 2 || size == 4 || size == 8) {
4414 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4416 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4417 ins->klass = mono_class_get_element_class (klass);
4418 ins->type = STACK_MP;
4424 add_reg = alloc_ireg_mp (cfg);
4427 MonoInst *rgctx_ins;
4430 g_assert (cfg->gshared);
4431 context_used = mini_class_check_context_used (cfg, klass);
4432 g_assert (context_used);
4433 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4434 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4438 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4439 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4440 ins->klass = mono_class_get_element_class (klass);
4441 ins->type = STACK_MP;
4442 MONO_ADD_INS (cfg->cbb, ins);
4448 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4450 int bounds_reg = alloc_preg (cfg);
4451 int add_reg = alloc_ireg_mp (cfg);
4452 int mult_reg = alloc_preg (cfg);
4453 int mult2_reg = alloc_preg (cfg);
4454 int low1_reg = alloc_preg (cfg);
4455 int low2_reg = alloc_preg (cfg);
4456 int high1_reg = alloc_preg (cfg);
4457 int high2_reg = alloc_preg (cfg);
4458 int realidx1_reg = alloc_preg (cfg);
4459 int realidx2_reg = alloc_preg (cfg);
4460 int sum_reg = alloc_preg (cfg);
4461 int index1, index2, tmpreg;
4465 mono_class_init (klass);
4466 size = mono_class_array_element_size (klass);
4468 index1 = index_ins1->dreg;
4469 index2 = index_ins2->dreg;
4471 #if SIZEOF_REGISTER == 8
4472 /* The array reg is 64 bits but the index reg is only 32 */
4473 if (COMPILE_LLVM (cfg)) {
4476 tmpreg = alloc_preg (cfg);
4477 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4479 tmpreg = alloc_preg (cfg);
4480 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4484 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4488 /* range checking */
4489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4490 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4493 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4494 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4496 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4497 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4498 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4500 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4501 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4502 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4503 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4504 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4506 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4508 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4509 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4511 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4512 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4514 ins->type = STACK_MP;
4516 MONO_ADD_INS (cfg->cbb, ins);
4522 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4526 MonoMethod *addr_method;
4528 MonoClass *eclass = cmethod->klass->element_class;
4530 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4533 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4535 /* emit_ldelema_2 depends on OP_LMUL */
4536 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4537 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4540 if (mini_is_gsharedvt_variable_klass (eclass))
4543 element_size = mono_class_array_element_size (eclass);
4544 addr_method = mono_marshal_get_array_address (rank, element_size);
4545 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4550 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4552 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4554 MonoInst *addr, *store, *load;
4555 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4557 /* the bounds check is already done by the callers */
4558 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4560 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4561 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4562 if (mini_type_is_reference (&eklass->byval_arg))
4563 mini_emit_write_barrier (cfg, addr, load);
4565 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4566 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4573 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4575 return mini_type_is_reference (&klass->byval_arg);
4579 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4581 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4582 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4583 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4584 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4585 MonoInst *iargs [3];
4588 mono_class_setup_vtable (obj_array);
4589 g_assert (helper->slot);
4591 if (sp [0]->type != STACK_OBJ)
4593 if (sp [2]->type != STACK_OBJ)
4600 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4604 if (mini_is_gsharedvt_variable_klass (klass)) {
4607 // FIXME-VT: OP_ICONST optimization
4608 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4609 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4610 ins->opcode = OP_STOREV_MEMBASE;
4611 } else if (sp [1]->opcode == OP_ICONST) {
4612 int array_reg = sp [0]->dreg;
4613 int index_reg = sp [1]->dreg;
4614 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4616 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4617 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4620 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4621 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4623 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4624 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4625 if (generic_class_is_reference_type (cfg, klass))
4626 mini_emit_write_barrier (cfg, addr, sp [2]);
4633 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4638 eklass = mono_class_from_mono_type (fsig->params [2]);
4640 eklass = mono_class_from_mono_type (fsig->ret);
4643 return emit_array_store (cfg, eklass, args, FALSE);
4645 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4646 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4652 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4655 int param_size, return_size;
4657 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4658 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4660 if (cfg->verbose_level > 3)
4661 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4663 //Don't allow mixing reference types with value types
4664 if (param_klass->valuetype != return_klass->valuetype) {
4665 if (cfg->verbose_level > 3)
4666 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4670 if (!param_klass->valuetype) {
4671 if (cfg->verbose_level > 3)
4672 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4677 if (param_klass->has_references || return_klass->has_references)
4680 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4681 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4682 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4683 if (cfg->verbose_level > 3)
4684 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4688 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4689 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4690 if (cfg->verbose_level > 3)
4691 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4695 param_size = mono_class_value_size (param_klass, &align);
4696 return_size = mono_class_value_size (return_klass, &align);
4698 //We can do it if sizes match
4699 if (param_size == return_size) {
4700 if (cfg->verbose_level > 3)
4701 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4705 //No simple way to handle struct if sizes don't match
4706 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4707 if (cfg->verbose_level > 3)
4708 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4713 * Same reg size category.
4714 * A quick note on why we don't require widening here.
4715 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4717 * Since the source value comes from a function argument, the JIT will already have
4718 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4720 if (param_size <= 4 && return_size <= 4) {
4721 if (cfg->verbose_level > 3)
4722 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4730 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4732 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4733 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4735 if (mini_is_gsharedvt_variable_type (fsig->ret))
4738 //Valuetypes that are semantically equivalent or numbers than can be widened to
4739 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4742 //Arrays of valuetypes that are semantically equivalent
4743 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
4750 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4752 #ifdef MONO_ARCH_SIMD_INTRINSICS
4753 MonoInst *ins = NULL;
4755 if (cfg->opt & MONO_OPT_SIMD) {
4756 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4762 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
4766 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4768 MonoInst *ins = NULL;
4769 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4770 MONO_ADD_INS (cfg->cbb, ins);
4771 ins->backend.memory_barrier_kind = kind;
4777 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4779 MonoInst *ins = NULL;
4782 /* The LLVM backend supports these intrinsics */
4783 if (cmethod->klass == mono_defaults.math_class) {
4784 if (strcmp (cmethod->name, "Sin") == 0) {
4786 } else if (strcmp (cmethod->name, "Cos") == 0) {
4788 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4790 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4794 if (opcode && fsig->param_count == 1) {
4795 MONO_INST_NEW (cfg, ins, opcode);
4796 ins->type = STACK_R8;
4797 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4798 ins->sreg1 = args [0]->dreg;
4799 MONO_ADD_INS (cfg->cbb, ins);
4803 if (cfg->opt & MONO_OPT_CMOV) {
4804 if (strcmp (cmethod->name, "Min") == 0) {
4805 if (fsig->params [0]->type == MONO_TYPE_I4)
4807 if (fsig->params [0]->type == MONO_TYPE_U4)
4808 opcode = OP_IMIN_UN;
4809 else if (fsig->params [0]->type == MONO_TYPE_I8)
4811 else if (fsig->params [0]->type == MONO_TYPE_U8)
4812 opcode = OP_LMIN_UN;
4813 } else if (strcmp (cmethod->name, "Max") == 0) {
4814 if (fsig->params [0]->type == MONO_TYPE_I4)
4816 if (fsig->params [0]->type == MONO_TYPE_U4)
4817 opcode = OP_IMAX_UN;
4818 else if (fsig->params [0]->type == MONO_TYPE_I8)
4820 else if (fsig->params [0]->type == MONO_TYPE_U8)
4821 opcode = OP_LMAX_UN;
4825 if (opcode && fsig->param_count == 2) {
4826 MONO_INST_NEW (cfg, ins, opcode);
4827 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4828 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4829 ins->sreg1 = args [0]->dreg;
4830 ins->sreg2 = args [1]->dreg;
4831 MONO_ADD_INS (cfg->cbb, ins);
4839 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4841 if (cmethod->klass == mono_defaults.array_class) {
4842 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4843 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4844 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4845 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4846 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
4847 return emit_array_unsafe_mov (cfg, fsig, args);
4855 mono_type_is_native_blittable (MonoType *t)
4857 if (MONO_TYPE_IS_REFERENCE (t))
4860 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
4863 MonoClass *klass = mono_class_from_mono_type (t);
4865 //MonoClass::blitable depends on mono_class_setup_fields being done.
4866 mono_class_setup_fields (klass);
4867 if (!klass->blittable)
4870 // If the native marshal size is different we can't convert PtrToStructure to a type load
4871 if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
4879 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4881 MonoInst *ins = NULL;
4882 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
4884 if (cmethod->klass == mono_defaults.string_class) {
4885 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
4886 int dreg = alloc_ireg (cfg);
4887 int index_reg = alloc_preg (cfg);
4888 int add_reg = alloc_preg (cfg);
4890 #if SIZEOF_REGISTER == 8
4891 if (COMPILE_LLVM (cfg)) {
4892 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
4894 /* The array reg is 64 bits but the index reg is only 32 */
4895 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4898 index_reg = args [1]->dreg;
4900 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4902 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4903 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
4904 add_reg = ins->dreg;
4905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4908 int mult_reg = alloc_preg (cfg);
4909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4910 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4912 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
4914 type_from_op (cfg, ins, NULL, NULL);
4916 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4917 int dreg = alloc_ireg (cfg);
4918 /* Decompose later to allow more optimizations */
4919 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4920 ins->type = STACK_I4;
4921 ins->flags |= MONO_INST_FAULT;
4922 cfg->cbb->has_array_access = TRUE;
4923 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4928 } else if (cmethod->klass == mono_defaults.object_class) {
4929 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
4930 int dreg = alloc_ireg_ref (cfg);
4931 int vt_reg = alloc_preg (cfg);
4932 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4933 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
4934 type_from_op (cfg, ins, NULL, NULL);
4937 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
4938 int dreg = alloc_ireg (cfg);
4939 int t1 = alloc_ireg (cfg);
4941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4942 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4943 ins->type = STACK_I4;
4946 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
4947 MONO_INST_NEW (cfg, ins, OP_NOP);
4948 MONO_ADD_INS (cfg->cbb, ins);
4952 } else if (cmethod->klass == mono_defaults.array_class) {
4953 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4954 return emit_array_generic_access (cfg, fsig, args, FALSE);
4955 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4956 return emit_array_generic_access (cfg, fsig, args, TRUE);
4958 #ifndef MONO_BIG_ARRAYS
4960 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4963 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
4964 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
4965 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4966 int dreg = alloc_ireg (cfg);
4967 int bounds_reg = alloc_ireg_mp (cfg);
4968 MonoBasicBlock *end_bb, *szarray_bb;
4969 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4971 NEW_BBLOCK (cfg, end_bb);
4972 NEW_BBLOCK (cfg, szarray_bb);
4974 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4975 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4978 /* Non-szarray case */
4980 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4981 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4983 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4984 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4986 MONO_START_BB (cfg, szarray_bb);
4989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4990 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
4992 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4993 MONO_START_BB (cfg, end_bb);
4995 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4996 ins->type = STACK_I4;
5002 if (cmethod->name [0] != 'g')
5005 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5006 int dreg = alloc_ireg (cfg);
5007 int vtable_reg = alloc_preg (cfg);
5008 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5009 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5010 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5011 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5012 type_from_op (cfg, ins, NULL, NULL);
5015 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5016 int dreg = alloc_ireg (cfg);
5018 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5019 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5020 type_from_op (cfg, ins, NULL, NULL);
5025 } else if (cmethod->klass == runtime_helpers_class) {
5026 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5027 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5029 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5030 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5032 g_assert (ctx->method_inst);
5033 g_assert (ctx->method_inst->type_argc == 1);
5034 MonoType *arg_type = ctx->method_inst->type_argv [0];
5040 /* Resolve the argument class as possible so we can handle common cases fast */
5041 t = mini_get_underlying_type (arg_type);
5042 klass = mono_class_from_mono_type (t);
5043 mono_class_init (klass);
5044 if (MONO_TYPE_IS_REFERENCE (t))
5045 EMIT_NEW_ICONST (cfg, ins, 1);
5046 else if (MONO_TYPE_IS_PRIMITIVE (t))
5047 EMIT_NEW_ICONST (cfg, ins, 0);
5048 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5049 EMIT_NEW_ICONST (cfg, ins, 1);
5050 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5051 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5053 g_assert (cfg->gshared);
5055 /* Have to use the original argument class here */
5056 MonoClass *arg_class = mono_class_from_mono_type (arg_type);
5057 int context_used = mini_class_check_context_used (cfg, arg_class);
5059 /* This returns 1 or 2 */
5060 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5061 int dreg = alloc_ireg (cfg);
5062 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5068 } else if (cmethod->klass == mono_defaults.monitor_class) {
5069 gboolean is_enter = FALSE;
5070 gboolean is_v4 = FALSE;
5072 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5076 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5081 * To make async stack traces work, icalls which can block should have a wrapper.
5082 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5084 MonoBasicBlock *end_bb;
5086 NEW_BBLOCK (cfg, end_bb);
5088 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5090 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5091 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5092 MONO_START_BB (cfg, end_bb);
5095 } else if (cmethod->klass == mono_defaults.thread_class) {
5096 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5097 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5098 MONO_ADD_INS (cfg->cbb, ins);
5100 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5101 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5102 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5104 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5106 if (fsig->params [0]->type == MONO_TYPE_I1)
5107 opcode = OP_LOADI1_MEMBASE;
5108 else if (fsig->params [0]->type == MONO_TYPE_U1)
5109 opcode = OP_LOADU1_MEMBASE;
5110 else if (fsig->params [0]->type == MONO_TYPE_I2)
5111 opcode = OP_LOADI2_MEMBASE;
5112 else if (fsig->params [0]->type == MONO_TYPE_U2)
5113 opcode = OP_LOADU2_MEMBASE;
5114 else if (fsig->params [0]->type == MONO_TYPE_I4)
5115 opcode = OP_LOADI4_MEMBASE;
5116 else if (fsig->params [0]->type == MONO_TYPE_U4)
5117 opcode = OP_LOADU4_MEMBASE;
5118 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5119 opcode = OP_LOADI8_MEMBASE;
5120 else if (fsig->params [0]->type == MONO_TYPE_R4)
5121 opcode = OP_LOADR4_MEMBASE;
5122 else if (fsig->params [0]->type == MONO_TYPE_R8)
5123 opcode = OP_LOADR8_MEMBASE;
5124 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5125 opcode = OP_LOAD_MEMBASE;
5128 MONO_INST_NEW (cfg, ins, opcode);
5129 ins->inst_basereg = args [0]->dreg;
5130 ins->inst_offset = 0;
5131 MONO_ADD_INS (cfg->cbb, ins);
5133 switch (fsig->params [0]->type) {
5140 ins->dreg = mono_alloc_ireg (cfg);
5141 ins->type = STACK_I4;
5145 ins->dreg = mono_alloc_lreg (cfg);
5146 ins->type = STACK_I8;
5150 ins->dreg = mono_alloc_ireg (cfg);
5151 #if SIZEOF_REGISTER == 8
5152 ins->type = STACK_I8;
5154 ins->type = STACK_I4;
5159 ins->dreg = mono_alloc_freg (cfg);
5160 ins->type = STACK_R8;
5163 g_assert (mini_type_is_reference (fsig->params [0]));
5164 ins->dreg = mono_alloc_ireg_ref (cfg);
5165 ins->type = STACK_OBJ;
5169 if (opcode == OP_LOADI8_MEMBASE)
5170 ins = mono_decompose_opcode (cfg, ins);
5172 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5176 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5178 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5180 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5181 opcode = OP_STOREI1_MEMBASE_REG;
5182 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5183 opcode = OP_STOREI2_MEMBASE_REG;
5184 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5185 opcode = OP_STOREI4_MEMBASE_REG;
5186 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5187 opcode = OP_STOREI8_MEMBASE_REG;
5188 else if (fsig->params [0]->type == MONO_TYPE_R4)
5189 opcode = OP_STORER4_MEMBASE_REG;
5190 else if (fsig->params [0]->type == MONO_TYPE_R8)
5191 opcode = OP_STORER8_MEMBASE_REG;
5192 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5193 opcode = OP_STORE_MEMBASE_REG;
5196 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5198 MONO_INST_NEW (cfg, ins, opcode);
5199 ins->sreg1 = args [1]->dreg;
5200 ins->inst_destbasereg = args [0]->dreg;
5201 ins->inst_offset = 0;
5202 MONO_ADD_INS (cfg->cbb, ins);
5204 if (opcode == OP_STOREI8_MEMBASE_REG)
5205 ins = mono_decompose_opcode (cfg, ins);
5210 } else if (cmethod->klass->image == mono_defaults.corlib &&
5211 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5212 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5215 #if SIZEOF_REGISTER == 8
5216 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5217 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5218 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5219 ins->dreg = mono_alloc_preg (cfg);
5220 ins->sreg1 = args [0]->dreg;
5221 ins->type = STACK_I8;
5222 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5223 MONO_ADD_INS (cfg->cbb, ins);
5227 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5229 /* 64 bit reads are already atomic */
5230 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5231 load_ins->dreg = mono_alloc_preg (cfg);
5232 load_ins->inst_basereg = args [0]->dreg;
5233 load_ins->inst_offset = 0;
5234 load_ins->type = STACK_I8;
5235 MONO_ADD_INS (cfg->cbb, load_ins);
5237 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5244 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5245 MonoInst *ins_iconst;
5248 if (fsig->params [0]->type == MONO_TYPE_I4) {
5249 opcode = OP_ATOMIC_ADD_I4;
5250 cfg->has_atomic_add_i4 = TRUE;
5252 #if SIZEOF_REGISTER == 8
5253 else if (fsig->params [0]->type == MONO_TYPE_I8)
5254 opcode = OP_ATOMIC_ADD_I8;
5257 if (!mono_arch_opcode_supported (opcode))
5259 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5260 ins_iconst->inst_c0 = 1;
5261 ins_iconst->dreg = mono_alloc_ireg (cfg);
5262 MONO_ADD_INS (cfg->cbb, ins_iconst);
5264 MONO_INST_NEW (cfg, ins, opcode);
5265 ins->dreg = mono_alloc_ireg (cfg);
5266 ins->inst_basereg = args [0]->dreg;
5267 ins->inst_offset = 0;
5268 ins->sreg2 = ins_iconst->dreg;
5269 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5270 MONO_ADD_INS (cfg->cbb, ins);
5272 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5273 MonoInst *ins_iconst;
5276 if (fsig->params [0]->type == MONO_TYPE_I4) {
5277 opcode = OP_ATOMIC_ADD_I4;
5278 cfg->has_atomic_add_i4 = TRUE;
5280 #if SIZEOF_REGISTER == 8
5281 else if (fsig->params [0]->type == MONO_TYPE_I8)
5282 opcode = OP_ATOMIC_ADD_I8;
5285 if (!mono_arch_opcode_supported (opcode))
5287 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5288 ins_iconst->inst_c0 = -1;
5289 ins_iconst->dreg = mono_alloc_ireg (cfg);
5290 MONO_ADD_INS (cfg->cbb, ins_iconst);
5292 MONO_INST_NEW (cfg, ins, opcode);
5293 ins->dreg = mono_alloc_ireg (cfg);
5294 ins->inst_basereg = args [0]->dreg;
5295 ins->inst_offset = 0;
5296 ins->sreg2 = ins_iconst->dreg;
5297 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5298 MONO_ADD_INS (cfg->cbb, ins);
5300 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5303 if (fsig->params [0]->type == MONO_TYPE_I4) {
5304 opcode = OP_ATOMIC_ADD_I4;
5305 cfg->has_atomic_add_i4 = TRUE;
5307 #if SIZEOF_REGISTER == 8
5308 else if (fsig->params [0]->type == MONO_TYPE_I8)
5309 opcode = OP_ATOMIC_ADD_I8;
5312 if (!mono_arch_opcode_supported (opcode))
5314 MONO_INST_NEW (cfg, ins, opcode);
5315 ins->dreg = mono_alloc_ireg (cfg);
5316 ins->inst_basereg = args [0]->dreg;
5317 ins->inst_offset = 0;
5318 ins->sreg2 = args [1]->dreg;
5319 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5320 MONO_ADD_INS (cfg->cbb, ins);
5323 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5324 MonoInst *f2i = NULL, *i2f;
5325 guint32 opcode, f2i_opcode, i2f_opcode;
5326 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5327 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5329 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5330 fsig->params [0]->type == MONO_TYPE_R4) {
5331 opcode = OP_ATOMIC_EXCHANGE_I4;
5332 f2i_opcode = OP_MOVE_F_TO_I4;
5333 i2f_opcode = OP_MOVE_I4_TO_F;
5334 cfg->has_atomic_exchange_i4 = TRUE;
5336 #if SIZEOF_REGISTER == 8
5338 fsig->params [0]->type == MONO_TYPE_I8 ||
5339 fsig->params [0]->type == MONO_TYPE_R8 ||
5340 fsig->params [0]->type == MONO_TYPE_I) {
5341 opcode = OP_ATOMIC_EXCHANGE_I8;
5342 f2i_opcode = OP_MOVE_F_TO_I8;
5343 i2f_opcode = OP_MOVE_I8_TO_F;
5346 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5347 opcode = OP_ATOMIC_EXCHANGE_I4;
5348 cfg->has_atomic_exchange_i4 = TRUE;
5354 if (!mono_arch_opcode_supported (opcode))
5358 /* TODO: Decompose these opcodes instead of bailing here. */
5359 if (COMPILE_SOFT_FLOAT (cfg))
5362 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5363 f2i->dreg = mono_alloc_ireg (cfg);
5364 f2i->sreg1 = args [1]->dreg;
5365 if (f2i_opcode == OP_MOVE_F_TO_I4)
5366 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5367 MONO_ADD_INS (cfg->cbb, f2i);
5370 MONO_INST_NEW (cfg, ins, opcode);
5371 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5372 ins->inst_basereg = args [0]->dreg;
5373 ins->inst_offset = 0;
5374 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5375 MONO_ADD_INS (cfg->cbb, ins);
5377 switch (fsig->params [0]->type) {
5379 ins->type = STACK_I4;
5382 ins->type = STACK_I8;
5385 #if SIZEOF_REGISTER == 8
5386 ins->type = STACK_I8;
5388 ins->type = STACK_I4;
5393 ins->type = STACK_R8;
5396 g_assert (mini_type_is_reference (fsig->params [0]));
5397 ins->type = STACK_OBJ;
5402 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5403 i2f->dreg = mono_alloc_freg (cfg);
5404 i2f->sreg1 = ins->dreg;
5405 i2f->type = STACK_R8;
5406 if (i2f_opcode == OP_MOVE_I4_TO_F)
5407 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5408 MONO_ADD_INS (cfg->cbb, i2f);
5413 if (cfg->gen_write_barriers && is_ref)
5414 mini_emit_write_barrier (cfg, args [0], args [1]);
5416 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5417 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5418 guint32 opcode, f2i_opcode, i2f_opcode;
5419 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5420 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5422 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5423 fsig->params [1]->type == MONO_TYPE_R4) {
5424 opcode = OP_ATOMIC_CAS_I4;
5425 f2i_opcode = OP_MOVE_F_TO_I4;
5426 i2f_opcode = OP_MOVE_I4_TO_F;
5427 cfg->has_atomic_cas_i4 = TRUE;
5429 #if SIZEOF_REGISTER == 8
5431 fsig->params [1]->type == MONO_TYPE_I8 ||
5432 fsig->params [1]->type == MONO_TYPE_R8 ||
5433 fsig->params [1]->type == MONO_TYPE_I) {
5434 opcode = OP_ATOMIC_CAS_I8;
5435 f2i_opcode = OP_MOVE_F_TO_I8;
5436 i2f_opcode = OP_MOVE_I8_TO_F;
5439 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5440 opcode = OP_ATOMIC_CAS_I4;
5441 cfg->has_atomic_cas_i4 = TRUE;
5447 if (!mono_arch_opcode_supported (opcode))
5451 /* TODO: Decompose these opcodes instead of bailing here. */
5452 if (COMPILE_SOFT_FLOAT (cfg))
5455 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5456 f2i_new->dreg = mono_alloc_ireg (cfg);
5457 f2i_new->sreg1 = args [1]->dreg;
5458 if (f2i_opcode == OP_MOVE_F_TO_I4)
5459 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5460 MONO_ADD_INS (cfg->cbb, f2i_new);
5462 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5463 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5464 f2i_cmp->sreg1 = args [2]->dreg;
5465 if (f2i_opcode == OP_MOVE_F_TO_I4)
5466 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5467 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5470 MONO_INST_NEW (cfg, ins, opcode);
5471 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5472 ins->sreg1 = args [0]->dreg;
5473 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5474 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5475 MONO_ADD_INS (cfg->cbb, ins);
5477 switch (fsig->params [1]->type) {
5479 ins->type = STACK_I4;
5482 ins->type = STACK_I8;
5485 #if SIZEOF_REGISTER == 8
5486 ins->type = STACK_I8;
5488 ins->type = STACK_I4;
5492 ins->type = cfg->r4_stack_type;
5495 ins->type = STACK_R8;
5498 g_assert (mini_type_is_reference (fsig->params [1]));
5499 ins->type = STACK_OBJ;
5504 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5505 i2f->dreg = mono_alloc_freg (cfg);
5506 i2f->sreg1 = ins->dreg;
5507 i2f->type = STACK_R8;
5508 if (i2f_opcode == OP_MOVE_I4_TO_F)
5509 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5510 MONO_ADD_INS (cfg->cbb, i2f);
5515 if (cfg->gen_write_barriers && is_ref)
5516 mini_emit_write_barrier (cfg, args [0], args [1]);
5518 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5519 fsig->params [1]->type == MONO_TYPE_I4) {
5520 MonoInst *cmp, *ceq;
5522 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5525 /* int32 r = CAS (location, value, comparand); */
5526 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5527 ins->dreg = alloc_ireg (cfg);
5528 ins->sreg1 = args [0]->dreg;
5529 ins->sreg2 = args [1]->dreg;
5530 ins->sreg3 = args [2]->dreg;
5531 ins->type = STACK_I4;
5532 MONO_ADD_INS (cfg->cbb, ins);
5534 /* bool result = r == comparand; */
5535 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5536 cmp->sreg1 = ins->dreg;
5537 cmp->sreg2 = args [2]->dreg;
5538 cmp->type = STACK_I4;
5539 MONO_ADD_INS (cfg->cbb, cmp);
5541 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5542 ceq->dreg = alloc_ireg (cfg);
5543 ceq->type = STACK_I4;
5544 MONO_ADD_INS (cfg->cbb, ceq);
5546 /* *success = result; */
5547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5549 cfg->has_atomic_cas_i4 = TRUE;
5551 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5552 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5556 } else if (cmethod->klass->image == mono_defaults.corlib &&
5557 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5558 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5561 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5563 MonoType *t = fsig->params [0];
5565 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5567 g_assert (t->byref);
5568 /* t is a byref type, so the reference check is more complicated */
5569 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5570 if (t->type == MONO_TYPE_I1)
5571 opcode = OP_ATOMIC_LOAD_I1;
5572 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5573 opcode = OP_ATOMIC_LOAD_U1;
5574 else if (t->type == MONO_TYPE_I2)
5575 opcode = OP_ATOMIC_LOAD_I2;
5576 else if (t->type == MONO_TYPE_U2)
5577 opcode = OP_ATOMIC_LOAD_U2;
5578 else if (t->type == MONO_TYPE_I4)
5579 opcode = OP_ATOMIC_LOAD_I4;
5580 else if (t->type == MONO_TYPE_U4)
5581 opcode = OP_ATOMIC_LOAD_U4;
5582 else if (t->type == MONO_TYPE_R4)
5583 opcode = OP_ATOMIC_LOAD_R4;
5584 else if (t->type == MONO_TYPE_R8)
5585 opcode = OP_ATOMIC_LOAD_R8;
5586 #if SIZEOF_REGISTER == 8
5587 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5588 opcode = OP_ATOMIC_LOAD_I8;
5589 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5590 opcode = OP_ATOMIC_LOAD_U8;
5592 else if (t->type == MONO_TYPE_I)
5593 opcode = OP_ATOMIC_LOAD_I4;
5594 else if (is_ref || t->type == MONO_TYPE_U)
5595 opcode = OP_ATOMIC_LOAD_U4;
5599 if (!mono_arch_opcode_supported (opcode))
5602 MONO_INST_NEW (cfg, ins, opcode);
5603 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5604 ins->sreg1 = args [0]->dreg;
5605 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5606 MONO_ADD_INS (cfg->cbb, ins);
5609 case MONO_TYPE_BOOLEAN:
5616 ins->type = STACK_I4;
5620 ins->type = STACK_I8;
5624 #if SIZEOF_REGISTER == 8
5625 ins->type = STACK_I8;
5627 ins->type = STACK_I4;
5631 ins->type = cfg->r4_stack_type;
5634 ins->type = STACK_R8;
5638 ins->type = STACK_OBJ;
5644 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5646 MonoType *t = fsig->params [0];
5649 g_assert (t->byref);
5650 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5651 if (t->type == MONO_TYPE_I1)
5652 opcode = OP_ATOMIC_STORE_I1;
5653 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5654 opcode = OP_ATOMIC_STORE_U1;
5655 else if (t->type == MONO_TYPE_I2)
5656 opcode = OP_ATOMIC_STORE_I2;
5657 else if (t->type == MONO_TYPE_U2)
5658 opcode = OP_ATOMIC_STORE_U2;
5659 else if (t->type == MONO_TYPE_I4)
5660 opcode = OP_ATOMIC_STORE_I4;
5661 else if (t->type == MONO_TYPE_U4)
5662 opcode = OP_ATOMIC_STORE_U4;
5663 else if (t->type == MONO_TYPE_R4)
5664 opcode = OP_ATOMIC_STORE_R4;
5665 else if (t->type == MONO_TYPE_R8)
5666 opcode = OP_ATOMIC_STORE_R8;
5667 #if SIZEOF_REGISTER == 8
5668 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5669 opcode = OP_ATOMIC_STORE_I8;
5670 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5671 opcode = OP_ATOMIC_STORE_U8;
5673 else if (t->type == MONO_TYPE_I)
5674 opcode = OP_ATOMIC_STORE_I4;
5675 else if (is_ref || t->type == MONO_TYPE_U)
5676 opcode = OP_ATOMIC_STORE_U4;
5680 if (!mono_arch_opcode_supported (opcode))
5683 MONO_INST_NEW (cfg, ins, opcode);
5684 ins->dreg = args [0]->dreg;
5685 ins->sreg1 = args [1]->dreg;
5686 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5687 MONO_ADD_INS (cfg->cbb, ins);
5689 if (cfg->gen_write_barriers && is_ref)
5690 mini_emit_write_barrier (cfg, args [0], args [1]);
5696 } else if (cmethod->klass->image == mono_defaults.corlib &&
5697 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5698 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5699 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5700 if (mini_should_insert_breakpoint (cfg->method)) {
5701 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5703 MONO_INST_NEW (cfg, ins, OP_NOP);
5704 MONO_ADD_INS (cfg->cbb, ins);
5708 } else if (cmethod->klass->image == mono_defaults.corlib &&
5709 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5710 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5711 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5713 EMIT_NEW_ICONST (cfg, ins, 1);
5715 EMIT_NEW_ICONST (cfg, ins, 0);
5718 } else if (cmethod->klass->image == mono_defaults.corlib &&
5719 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5720 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5721 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5722 /* No stack walks are currently available, so implement this as an intrinsic */
5723 MonoInst *assembly_ins;
5725 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5726 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5729 } else if (cmethod->klass->image == mono_defaults.corlib &&
5730 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5731 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5732 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5733 /* No stack walks are currently available, so implement this as an intrinsic */
5734 MonoInst *method_ins;
5735 MonoMethod *declaring = cfg->method;
5737 /* This returns the declaring generic method */
5738 if (declaring->is_inflated)
5739 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5740 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5741 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5742 cfg->no_inline = TRUE;
5743 if (cfg->method != cfg->current_method)
5744 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5747 } else if (cmethod->klass == mono_defaults.math_class) {
5749 * There is general branchless code for Min/Max, but it does not work for
5751 * http://everything2.com/?node_id=1051618
5753 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5754 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5755 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5756 ins->dreg = alloc_preg (cfg);
5757 ins->type = STACK_I4;
5758 MONO_ADD_INS (cfg->cbb, ins);
5760 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5761 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5762 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5763 !strcmp (cmethod->klass->name, "Selector")) ||
5764 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5765 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5766 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5767 !strcmp (cmethod->klass->name, "Selector"))
5769 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5770 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5771 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5774 MonoJumpInfoToken *ji;
5777 if (args [0]->opcode == OP_GOT_ENTRY) {
5778 pi = (MonoInst *)args [0]->inst_p1;
5779 g_assert (pi->opcode == OP_PATCH_INFO);
5780 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5781 ji = (MonoJumpInfoToken *)pi->inst_p0;
5783 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5784 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
5787 NULLIFY_INS (args [0]);
5789 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
5790 return_val_if_nok (&cfg->error, NULL);
5792 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5793 ins->dreg = mono_alloc_ireg (cfg);
5796 MONO_ADD_INS (cfg->cbb, ins);
5799 } else if (cmethod->klass->image == mono_defaults.corlib &&
5800 (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
5801 (strcmp (cmethod->klass->name, "Marshal") == 0)) {
5802 //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
5803 if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
5804 cmethod->is_inflated &&
5805 fsig->param_count == 1 &&
5806 !mini_method_check_context_used (cfg, cmethod)) {
5808 MonoGenericContext *method_context = mono_method_get_context (cmethod);
5809 MonoType *arg0 = method_context->method_inst->type_argv [0];
5810 if (mono_type_is_native_blittable (arg0))
5811 return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
5815 #ifdef MONO_ARCH_SIMD_INTRINSICS
5816 if (cfg->opt & MONO_OPT_SIMD) {
5817 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5823 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5827 if (COMPILE_LLVM (cfg)) {
5828 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5833 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5837 * This entry point could be used later for arbitrary method
5840 inline static MonoInst*
5841 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5842 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
5844 if (method->klass == mono_defaults.string_class) {
5845 /* managed string allocation support */
5846 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
5847 MonoInst *iargs [2];
5848 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5849 MonoMethod *managed_alloc = NULL;
5851 g_assert (vtable); /*Should not fail since it System.String*/
5852 #ifndef MONO_CROSS_COMPILE
5853 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5857 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5858 iargs [1] = args [0];
5859 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
5866 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5868 MonoInst *store, *temp;
5871 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5872 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5875 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5876 * would be different than the MonoInst's used to represent arguments, and
5877 * the ldelema implementation can't deal with that.
5878 * Solution: When ldelema is used on an inline argument, create a var for
5879 * it, emit ldelema on that var, and emit the saving code below in
5880 * inline_method () if needed.
5882 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5883 cfg->args [i] = temp;
5884 /* This uses cfg->args [i] which is set by the preceeding line */
5885 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5886 store->cil_code = sp [0]->cil_code;
5891 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5892 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5894 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5896 check_inline_called_method_name_limit (MonoMethod *called_method)
5899 static const char *limit = NULL;
5901 if (limit == NULL) {
5902 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5904 if (limit_string != NULL)
5905 limit = limit_string;
5910 if (limit [0] != '\0') {
5911 char *called_method_name = mono_method_full_name (called_method, TRUE);
5913 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5914 g_free (called_method_name);
5916 //return (strncmp_result <= 0);
5917 return (strncmp_result == 0);
5924 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5926 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5929 static const char *limit = NULL;
5931 if (limit == NULL) {
5932 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5933 if (limit_string != NULL) {
5934 limit = limit_string;
5940 if (limit [0] != '\0') {
5941 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5943 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5944 g_free (caller_method_name);
5946 //return (strncmp_result <= 0);
5947 return (strncmp_result == 0);
5955 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5957 static double r8_0 = 0.0;
5958 static float r4_0 = 0.0;
5962 rtype = mini_get_underlying_type (rtype);
5966 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5967 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5968 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5969 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5970 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5971 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
5972 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5973 ins->type = STACK_R4;
5974 ins->inst_p0 = (void*)&r4_0;
5976 MONO_ADD_INS (cfg->cbb, ins);
5977 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5978 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5979 ins->type = STACK_R8;
5980 ins->inst_p0 = (void*)&r8_0;
5982 MONO_ADD_INS (cfg->cbb, ins);
5983 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5984 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5985 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5986 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5987 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5989 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5994 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5998 rtype = mini_get_underlying_type (rtype);
6002 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6003 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6004 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6005 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6006 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6007 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6008 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6009 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6010 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6011 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6012 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6014 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6017 emit_init_rvar (cfg, dreg, rtype);
6021 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6023 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6025 MonoInst *var = cfg->locals [local];
6026 if (COMPILE_SOFT_FLOAT (cfg)) {
6028 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6029 emit_init_rvar (cfg, reg, type);
6030 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6033 emit_init_rvar (cfg, var->dreg, type);
6035 emit_dummy_init_rvar (cfg, var->dreg, type);
6040 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6042 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6048 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6051 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6052 guchar *ip, guint real_offset, gboolean inline_always)
6055 MonoInst *ins, *rvar = NULL;
6056 MonoMethodHeader *cheader;
6057 MonoBasicBlock *ebblock, *sbblock;
6059 MonoMethod *prev_inlined_method;
6060 MonoInst **prev_locals, **prev_args;
6061 MonoType **prev_arg_types;
6062 guint prev_real_offset;
6063 GHashTable *prev_cbb_hash;
6064 MonoBasicBlock **prev_cil_offset_to_bb;
6065 MonoBasicBlock *prev_cbb;
6066 const unsigned char *prev_ip;
6067 unsigned char *prev_cil_start;
6068 guint32 prev_cil_offset_to_bb_len;
6069 MonoMethod *prev_current_method;
6070 MonoGenericContext *prev_generic_context;
6071 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6073 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6075 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6076 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6079 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6080 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6085 fsig = mono_method_signature (cmethod);
6087 if (cfg->verbose_level > 2)
6088 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6090 if (!cmethod->inline_info) {
6091 cfg->stat_inlineable_methods++;
6092 cmethod->inline_info = 1;
6095 /* allocate local variables */
6096 cheader = mono_method_get_header_checked (cmethod, &error);
6098 if (inline_always) {
6099 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6100 mono_error_move (&cfg->error, &error);
6102 mono_error_cleanup (&error);
6107 /*Must verify before creating locals as it can cause the JIT to assert.*/
6108 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6109 mono_metadata_free_mh (cheader);
6113 /* allocate space to store the return value */
6114 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6115 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6118 prev_locals = cfg->locals;
6119 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6120 for (i = 0; i < cheader->num_locals; ++i)
6121 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6123 /* allocate start and end blocks */
6124 /* This is needed so if the inline is aborted, we can clean up */
6125 NEW_BBLOCK (cfg, sbblock);
6126 sbblock->real_offset = real_offset;
6128 NEW_BBLOCK (cfg, ebblock);
6129 ebblock->block_num = cfg->num_bblocks++;
6130 ebblock->real_offset = real_offset;
6132 prev_args = cfg->args;
6133 prev_arg_types = cfg->arg_types;
6134 prev_inlined_method = cfg->inlined_method;
6135 cfg->inlined_method = cmethod;
6136 cfg->ret_var_set = FALSE;
6137 cfg->inline_depth ++;
6138 prev_real_offset = cfg->real_offset;
6139 prev_cbb_hash = cfg->cbb_hash;
6140 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6141 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6142 prev_cil_start = cfg->cil_start;
6144 prev_cbb = cfg->cbb;
6145 prev_current_method = cfg->current_method;
6146 prev_generic_context = cfg->generic_context;
6147 prev_ret_var_set = cfg->ret_var_set;
6148 prev_disable_inline = cfg->disable_inline;
6150 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6153 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6155 ret_var_set = cfg->ret_var_set;
6157 cfg->inlined_method = prev_inlined_method;
6158 cfg->real_offset = prev_real_offset;
6159 cfg->cbb_hash = prev_cbb_hash;
6160 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6161 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6162 cfg->cil_start = prev_cil_start;
6164 cfg->locals = prev_locals;
6165 cfg->args = prev_args;
6166 cfg->arg_types = prev_arg_types;
6167 cfg->current_method = prev_current_method;
6168 cfg->generic_context = prev_generic_context;
6169 cfg->ret_var_set = prev_ret_var_set;
6170 cfg->disable_inline = prev_disable_inline;
6171 cfg->inline_depth --;
6173 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6174 if (cfg->verbose_level > 2)
6175 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6177 cfg->stat_inlined_methods++;
6179 /* always add some code to avoid block split failures */
6180 MONO_INST_NEW (cfg, ins, OP_NOP);
6181 MONO_ADD_INS (prev_cbb, ins);
6183 prev_cbb->next_bb = sbblock;
6184 link_bblock (cfg, prev_cbb, sbblock);
6187 * Get rid of the begin and end bblocks if possible to aid local
6190 if (prev_cbb->out_count == 1)
6191 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6193 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6194 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6196 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6197 MonoBasicBlock *prev = ebblock->in_bb [0];
6199 if (prev->next_bb == ebblock) {
6200 mono_merge_basic_blocks (cfg, prev, ebblock);
6202 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6203 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6204 cfg->cbb = prev_cbb;
6207 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6212 * Its possible that the rvar is set in some prev bblock, but not in others.
6218 for (i = 0; i < ebblock->in_count; ++i) {
6219 bb = ebblock->in_bb [i];
6221 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6224 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6234 * If the inlined method contains only a throw, then the ret var is not
6235 * set, so set it to a dummy value.
6238 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6240 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6243 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6246 if (cfg->verbose_level > 2)
6247 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6248 cfg->exception_type = MONO_EXCEPTION_NONE;
6250 /* This gets rid of the newly added bblocks */
6251 cfg->cbb = prev_cbb;
6253 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6258 * Some of these comments may well be out-of-date.
6259 * Design decisions: we do a single pass over the IL code (and we do bblock
6260 * splitting/merging in the few cases when it's required: a back jump to an IL
6261 * address that was not already seen as bblock starting point).
6262 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6263 * Complex operations are decomposed in simpler ones right away. We need to let the
6264 * arch-specific code peek and poke inside this process somehow (except when the
6265 * optimizations can take advantage of the full semantic info of coarse opcodes).
6266 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6267 * MonoInst->opcode initially is the IL opcode or some simplification of that
6268 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6269 * opcode with value bigger than OP_LAST.
6270 * At this point the IR can be handed over to an interpreter, a dumb code generator
6271 * or to the optimizing code generator that will translate it to SSA form.
6273 * Profiling directed optimizations.
6274 * We may compile by default with few or no optimizations and instrument the code
6275 * or the user may indicate what methods to optimize the most either in a config file
6276 * or through repeated runs where the compiler applies offline the optimizations to
6277 * each method and then decides if it was worth it.
6280 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6281 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6282 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6283 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6284 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6285 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6286 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6287 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6289 /* offset from br.s -> br like opcodes */
6290 #define BIG_BRANCH_OFFSET 13
6293 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6295 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6297 return b == NULL || b == bb;
6301 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6303 unsigned char *ip = start;
6304 unsigned char *target;
6307 MonoBasicBlock *bblock;
6308 const MonoOpcode *opcode;
6311 cli_addr = ip - start;
6312 i = mono_opcode_value ((const guint8 **)&ip, end);
6315 opcode = &mono_opcodes [i];
6316 switch (opcode->argument) {
6317 case MonoInlineNone:
6320 case MonoInlineString:
6321 case MonoInlineType:
6322 case MonoInlineField:
6323 case MonoInlineMethod:
6326 case MonoShortInlineR:
6333 case MonoShortInlineVar:
6334 case MonoShortInlineI:
6337 case MonoShortInlineBrTarget:
6338 target = start + cli_addr + 2 + (signed char)ip [1];
6339 GET_BBLOCK (cfg, bblock, target);
6342 GET_BBLOCK (cfg, bblock, ip);
6344 case MonoInlineBrTarget:
6345 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6346 GET_BBLOCK (cfg, bblock, target);
6349 GET_BBLOCK (cfg, bblock, ip);
6351 case MonoInlineSwitch: {
6352 guint32 n = read32 (ip + 1);
6355 cli_addr += 5 + 4 * n;
6356 target = start + cli_addr;
6357 GET_BBLOCK (cfg, bblock, target);
6359 for (j = 0; j < n; ++j) {
6360 target = start + cli_addr + (gint32)read32 (ip);
6361 GET_BBLOCK (cfg, bblock, target);
6371 g_assert_not_reached ();
6374 if (i == CEE_THROW) {
6375 unsigned char *bb_start = ip - 1;
6377 /* Find the start of the bblock containing the throw */
6379 while ((bb_start >= start) && !bblock) {
6380 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6384 bblock->out_of_line = 1;
6394 static inline MonoMethod *
6395 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6401 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6402 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6404 method = mono_class_inflate_generic_method_checked (method, context, error);
6407 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6413 static inline MonoMethod *
6414 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6417 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6419 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6420 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6424 if (!method && !cfg)
6425 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6430 static inline MonoMethodSignature*
6431 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6433 MonoMethodSignature *fsig;
6436 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6437 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6439 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6440 return_val_if_nok (error, NULL);
6443 fsig = mono_inflate_generic_signature(fsig, context, error);
6449 throw_exception (void)
6451 static MonoMethod *method = NULL;
6454 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6455 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6462 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6464 MonoMethod *thrower = throw_exception ();
6467 EMIT_NEW_PCONST (cfg, args [0], ex);
6468 mono_emit_method_call (cfg, thrower, args, NULL);
6472 * Return the original method is a wrapper is specified. We can only access
6473 * the custom attributes from the original method.
6476 get_original_method (MonoMethod *method)
6478 if (method->wrapper_type == MONO_WRAPPER_NONE)
6481 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6482 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6485 /* in other cases we need to find the original method */
6486 return mono_marshal_method_from_wrapper (method);
6490 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6492 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6493 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6495 emit_throw_exception (cfg, ex);
6499 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6501 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6502 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6504 emit_throw_exception (cfg, ex);
6508 * Check that the IL instructions at ip are the array initialization
6509 * sequence and return the pointer to the data and the size.
6512 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6515 * newarr[System.Int32]
6517 * ldtoken field valuetype ...
6518 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6520 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6522 guint32 token = read32 (ip + 7);
6523 guint32 field_token = read32 (ip + 2);
6524 guint32 field_index = field_token & 0xffffff;
6526 const char *data_ptr;
6528 MonoMethod *cmethod;
6529 MonoClass *dummy_class;
6530 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6534 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6538 *out_field_token = field_token;
6540 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6543 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6545 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6549 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6550 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6567 if (size > mono_type_size (field->type, &dummy_align))
6570 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6571 if (!image_is_dynamic (method->klass->image)) {
6572 field_index = read32 (ip + 2) & 0xffffff;
6573 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6574 data_ptr = mono_image_rva_map (method->klass->image, rva);
6575 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6576 /* for aot code we do the lookup on load */
6577 if (aot && data_ptr)
6578 return (const char *)GUINT_TO_POINTER (rva);
6580 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6582 data_ptr = mono_field_get_data (field);
6590 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6593 char *method_fname = mono_method_full_name (method, TRUE);
6595 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6598 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6599 mono_error_cleanup (&error);
6600 } else if (header->code_size == 0)
6601 method_code = g_strdup ("method body is empty.");
6603 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6604 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6605 g_free (method_fname);
6606 g_free (method_code);
6607 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6611 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6614 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6615 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6616 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6617 /* Optimize reg-reg moves away */
6619 * Can't optimize other opcodes, since sp[0] might point to
6620 * the last ins of a decomposed opcode.
6622 sp [0]->dreg = (cfg)->locals [n]->dreg;
6624 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6629 * ldloca inhibits many optimizations so try to get rid of it in common
6632 static inline unsigned char *
6633 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6643 local = read16 (ip + 2);
6647 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6648 /* From the INITOBJ case */
6649 token = read32 (ip + 2);
6650 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6651 CHECK_TYPELOAD (klass);
6652 type = mini_get_underlying_type (&klass->byval_arg);
6653 emit_init_local (cfg, local, type, TRUE);
6661 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6663 MonoInst *icall_args [16];
6664 MonoInst *call_target, *ins, *vtable_ins;
6665 int arg_reg, this_reg, vtable_reg;
6666 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6667 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6668 gboolean variant_iface = FALSE;
6671 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6674 * In llvm-only mode, vtables contain function descriptors instead of
6675 * method addresses/trampolines.
6677 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6680 slot = mono_method_get_imt_slot (cmethod);
6682 slot = mono_method_get_vtable_index (cmethod);
6684 this_reg = sp [0]->dreg;
6686 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6687 variant_iface = TRUE;
6689 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6691 * The simplest case, a normal virtual call.
6693 int slot_reg = alloc_preg (cfg);
6694 int addr_reg = alloc_preg (cfg);
6695 int arg_reg = alloc_preg (cfg);
6696 MonoBasicBlock *non_null_bb;
6698 vtable_reg = alloc_preg (cfg);
6699 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6700 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6702 /* Load the vtable slot, which contains a function descriptor. */
6703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6705 NEW_BBLOCK (cfg, non_null_bb);
6707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6708 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6712 // FIXME: Make the wrapper use the preserveall cconv
6713 // FIXME: Use one icall per slot for small slot numbers ?
6714 icall_args [0] = vtable_ins;
6715 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6716 /* Make the icall return the vtable slot value to save some code space */
6717 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6718 ins->dreg = slot_reg;
6719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6722 MONO_START_BB (cfg, non_null_bb);
6723 /* Load the address + arg from the vtable slot */
6724 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6727 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6730 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6732 * A simple interface call
6734 * We make a call through an imt slot to obtain the function descriptor we need to call.
6735 * The imt slot contains a function descriptor for a runtime function + arg.
6737 int slot_reg = alloc_preg (cfg);
6738 int addr_reg = alloc_preg (cfg);
6739 int arg_reg = alloc_preg (cfg);
6740 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6742 vtable_reg = alloc_preg (cfg);
6743 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6744 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6747 * The slot is already initialized when the vtable is created so there is no need
6751 /* Load the imt slot, which contains a function descriptor. */
6752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6754 /* Load the address + arg of the imt thunk from the imt slot */
6755 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6756 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6758 * IMT thunks in llvm-only mode are C functions which take an info argument
6759 * plus the imt method and return the ftndesc to call.
6761 icall_args [0] = thunk_arg_ins;
6762 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6763 cmethod, MONO_RGCTX_INFO_METHOD);
6764 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6766 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6769 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6771 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6772 * dynamically extended as more instantiations are discovered.
6773 * This handles generic virtual methods both on classes and interfaces.
6775 int slot_reg = alloc_preg (cfg);
6776 int addr_reg = alloc_preg (cfg);
6777 int arg_reg = alloc_preg (cfg);
6778 int ftndesc_reg = alloc_preg (cfg);
6779 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6780 MonoBasicBlock *slowpath_bb, *end_bb;
6782 NEW_BBLOCK (cfg, slowpath_bb);
6783 NEW_BBLOCK (cfg, end_bb);
6785 vtable_reg = alloc_preg (cfg);
6786 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6788 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6790 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6792 /* Load the slot, which contains a function descriptor. */
6793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6795 /* These slots are not initialized, so fall back to the slow path until they are initialized */
6796 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
6797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6801 /* Same as with iface calls */
6802 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6803 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6804 icall_args [0] = thunk_arg_ins;
6805 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6806 cmethod, MONO_RGCTX_INFO_METHOD);
6807 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6808 ftndesc_ins->dreg = ftndesc_reg;
6810 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
6811 * they don't know about yet. Fall back to the slowpath in that case.
6813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
6814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6816 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6819 MONO_START_BB (cfg, slowpath_bb);
6820 icall_args [0] = vtable_ins;
6821 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6822 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6823 cmethod, MONO_RGCTX_INFO_METHOD);
6825 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
6827 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
6828 ftndesc_ins->dreg = ftndesc_reg;
6829 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6832 MONO_START_BB (cfg, end_bb);
6833 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6837 * Non-optimized cases
6839 icall_args [0] = sp [0];
6840 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6842 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6843 cmethod, MONO_RGCTX_INFO_METHOD);
6845 arg_reg = alloc_preg (cfg);
6846 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
6847 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
6849 g_assert (is_gsharedvt);
6851 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
6853 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
6856 * Pass the extra argument even if the callee doesn't receive it, most
6857 * calling conventions allow this.
6859 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6863 is_exception_class (MonoClass *klass)
6866 if (klass == mono_defaults.exception_class)
6868 klass = klass->parent;
6874 * is_jit_optimizer_disabled:
6876 * Determine whenever M's assembly has a DebuggableAttribute with the
6877 * IsJITOptimizerDisabled flag set.
6880 is_jit_optimizer_disabled (MonoMethod *m)
6883 MonoAssembly *ass = m->klass->image->assembly;
6884 MonoCustomAttrInfo* attrs;
6887 gboolean val = FALSE;
6890 if (ass->jit_optimizer_disabled_inited)
6891 return ass->jit_optimizer_disabled;
6893 klass = mono_class_try_get_debuggable_attribute_class ();
6897 ass->jit_optimizer_disabled = FALSE;
6898 mono_memory_barrier ();
6899 ass->jit_optimizer_disabled_inited = TRUE;
6903 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
6904 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6906 for (i = 0; i < attrs->num_attrs; ++i) {
6907 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6909 MonoMethodSignature *sig;
6911 if (!attr->ctor || attr->ctor->klass != klass)
6913 /* Decode the attribute. See reflection.c */
6914 p = (const char*)attr->data;
6915 g_assert (read16 (p) == 0x0001);
6918 // FIXME: Support named parameters
6919 sig = mono_method_signature (attr->ctor);
6920 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6922 /* Two boolean arguments */
6926 mono_custom_attrs_free (attrs);
6929 ass->jit_optimizer_disabled = val;
6930 mono_memory_barrier ();
6931 ass->jit_optimizer_disabled_inited = TRUE;
6937 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6939 gboolean supported_tail_call;
6942 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6944 for (i = 0; i < fsig->param_count; ++i) {
6945 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6946 /* These can point to the current method's stack */
6947 supported_tail_call = FALSE;
6949 if (fsig->hasthis && cmethod->klass->valuetype)
6950 /* this might point to the current method's stack */
6951 supported_tail_call = FALSE;
6952 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6953 supported_tail_call = FALSE;
6954 if (cfg->method->save_lmf)
6955 supported_tail_call = FALSE;
6956 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6957 supported_tail_call = FALSE;
6958 if (call_opcode != CEE_CALL)
6959 supported_tail_call = FALSE;
6961 /* Debugging support */
6963 if (supported_tail_call) {
6964 if (!mono_debug_count ())
6965 supported_tail_call = FALSE;
6969 return supported_tail_call;
6975 * Handle calls made to ctors from NEWOBJ opcodes.
6978 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6979 MonoInst **sp, guint8 *ip, int *inline_costs)
6981 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6983 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6984 mono_method_is_generic_sharable (cmethod, TRUE)) {
6985 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6986 mono_class_vtable (cfg->domain, cmethod->klass);
6987 CHECK_TYPELOAD (cmethod->klass);
6989 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6990 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6993 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
6994 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6996 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6998 CHECK_TYPELOAD (cmethod->klass);
6999 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7004 /* Avoid virtual calls to ctors if possible */
7005 if (mono_class_is_marshalbyref (cmethod->klass))
7006 callvirt_this_arg = sp [0];
7008 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7009 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7010 CHECK_CFG_EXCEPTION;
7011 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7012 mono_method_check_inlining (cfg, cmethod) &&
7013 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7016 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7017 cfg->real_offset += 5;
7019 *inline_costs += costs - 5;
7021 INLINE_FAILURE ("inline failure");
7022 // FIXME-VT: Clean this up
7023 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7024 GSHAREDVT_FAILURE(*ip);
7025 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7027 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7030 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7032 if (cfg->llvm_only) {
7033 // FIXME: Avoid initializing vtable_arg
7034 emit_llvmonly_calli (cfg, fsig, sp, addr);
7036 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7038 } else if (context_used &&
7039 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7040 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7041 MonoInst *cmethod_addr;
7043 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7045 if (cfg->llvm_only) {
7046 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7047 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7048 emit_llvmonly_calli (cfg, fsig, sp, addr);
7050 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7051 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7053 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7056 INLINE_FAILURE ("ctor call");
7057 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7058 callvirt_this_arg, NULL, vtable_arg);
7065 emit_setret (MonoCompile *cfg, MonoInst *val)
7067 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7070 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7073 if (!cfg->vret_addr) {
7074 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7076 EMIT_NEW_RETLOADA (cfg, ret_addr);
7078 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7079 ins->klass = mono_class_from_mono_type (ret_type);
7082 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7083 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7084 MonoInst *iargs [1];
7088 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7089 mono_arch_emit_setret (cfg, cfg->method, conv);
7091 mono_arch_emit_setret (cfg, cfg->method, val);
7094 mono_arch_emit_setret (cfg, cfg->method, val);
7100 * mono_method_to_ir:
7102 * Translate the .net IL into linear IR.
7104 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7105 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7106 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7107 * @inline_args: if not NULL, contains the arguments to the inline call
7108 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7109 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7111 * This method is used to turn ECMA IL into Mono's internal Linear IR
7112 * reprensetation. It is used both for entire methods, as well as
7113 * inlining existing methods. In the former case, the @start_bblock,
7114 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7115 * inline_offset is set to zero.
7117 * Returns: the inline cost, or -1 if there was an error processing this method.
7120 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7121 MonoInst *return_var, MonoInst **inline_args,
7122 guint inline_offset, gboolean is_virtual_call)
7125 MonoInst *ins, **sp, **stack_start;
7126 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7127 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7128 MonoMethod *cmethod, *method_definition;
7129 MonoInst **arg_array;
7130 MonoMethodHeader *header;
7132 guint32 token, ins_flag;
7134 MonoClass *constrained_class = NULL;
7135 unsigned char *ip, *end, *target, *err_pos;
7136 MonoMethodSignature *sig;
7137 MonoGenericContext *generic_context = NULL;
7138 MonoGenericContainer *generic_container = NULL;
7139 MonoType **param_types;
7140 int i, n, start_new_bblock, dreg;
7141 int num_calls = 0, inline_costs = 0;
7142 int breakpoint_id = 0;
7144 GSList *class_inits = NULL;
7145 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7147 gboolean init_locals, seq_points, skip_dead_blocks;
7148 gboolean sym_seq_points = FALSE;
7149 MonoDebugMethodInfo *minfo;
7150 MonoBitSet *seq_point_locs = NULL;
7151 MonoBitSet *seq_point_set_locs = NULL;
7153 cfg->disable_inline = is_jit_optimizer_disabled (method);
7155 /* serialization and xdomain stuff may need access to private fields and methods */
7156 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7157 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7158 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7159 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7160 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7161 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7163 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7164 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7165 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7166 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7167 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7169 image = method->klass->image;
7170 header = mono_method_get_header_checked (method, &cfg->error);
7172 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7173 goto exception_exit;
7175 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7178 generic_container = mono_method_get_generic_container (method);
7179 sig = mono_method_signature (method);
7180 num_args = sig->hasthis + sig->param_count;
7181 ip = (unsigned char*)header->code;
7182 cfg->cil_start = ip;
7183 end = ip + header->code_size;
7184 cfg->stat_cil_code_size += header->code_size;
7186 seq_points = cfg->gen_seq_points && cfg->method == method;
7188 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7189 /* We could hit a seq point before attaching to the JIT (#8338) */
7193 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7194 minfo = mono_debug_lookup_method (method);
7196 MonoSymSeqPoint *sps;
7197 int i, n_il_offsets;
7199 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7200 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7201 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7202 sym_seq_points = TRUE;
7203 for (i = 0; i < n_il_offsets; ++i) {
7204 if (sps [i].il_offset < header->code_size)
7205 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7209 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7211 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7213 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7214 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7216 mono_debug_free_method_async_debug_info (asyncMethod);
7218 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7219 /* Methods without line number info like auto-generated property accessors */
7220 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7221 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7222 sym_seq_points = TRUE;
7227 * Methods without init_locals set could cause asserts in various passes
7228 * (#497220). To work around this, we emit dummy initialization opcodes
7229 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7230 * on some platforms.
7232 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7233 init_locals = header->init_locals;
7237 method_definition = method;
7238 while (method_definition->is_inflated) {
7239 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7240 method_definition = imethod->declaring;
7243 /* SkipVerification is not allowed if core-clr is enabled */
7244 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7246 dont_verify_stloc = TRUE;
7249 if (sig->is_inflated)
7250 generic_context = mono_method_get_context (method);
7251 else if (generic_container)
7252 generic_context = &generic_container->context;
7253 cfg->generic_context = generic_context;
7256 g_assert (!sig->has_type_parameters);
7258 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7259 g_assert (method->is_inflated);
7260 g_assert (mono_method_get_context (method)->method_inst);
7262 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7263 g_assert (sig->generic_param_count);
7265 if (cfg->method == method) {
7266 cfg->real_offset = 0;
7268 cfg->real_offset = inline_offset;
7271 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7272 cfg->cil_offset_to_bb_len = header->code_size;
7274 cfg->current_method = method;
7276 if (cfg->verbose_level > 2)
7277 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7279 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7281 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7282 for (n = 0; n < sig->param_count; ++n)
7283 param_types [n + sig->hasthis] = sig->params [n];
7284 cfg->arg_types = param_types;
7286 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7287 if (cfg->method == method) {
7289 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7292 NEW_BBLOCK (cfg, start_bblock);
7293 cfg->bb_entry = start_bblock;
7294 start_bblock->cil_code = NULL;
7295 start_bblock->cil_length = 0;
7298 NEW_BBLOCK (cfg, end_bblock);
7299 cfg->bb_exit = end_bblock;
7300 end_bblock->cil_code = NULL;
7301 end_bblock->cil_length = 0;
7302 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7303 g_assert (cfg->num_bblocks == 2);
7305 arg_array = cfg->args;
7307 if (header->num_clauses) {
7308 cfg->spvars = g_hash_table_new (NULL, NULL);
7309 cfg->exvars = g_hash_table_new (NULL, NULL);
7311 /* handle exception clauses */
7312 for (i = 0; i < header->num_clauses; ++i) {
7313 MonoBasicBlock *try_bb;
7314 MonoExceptionClause *clause = &header->clauses [i];
7315 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7317 try_bb->real_offset = clause->try_offset;
7318 try_bb->try_start = TRUE;
7319 try_bb->region = ((i + 1) << 8) | clause->flags;
7320 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7321 tblock->real_offset = clause->handler_offset;
7322 tblock->flags |= BB_EXCEPTION_HANDLER;
7324 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
7325 mono_create_exvar_for_offset (cfg, clause->handler_offset);
7327 * Linking the try block with the EH block hinders inlining as we won't be able to
7328 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7330 if (COMPILE_LLVM (cfg))
7331 link_bblock (cfg, try_bb, tblock);
7333 if (*(ip + clause->handler_offset) == CEE_POP)
7334 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7336 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7337 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7338 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7339 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7340 MONO_ADD_INS (tblock, ins);
7342 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7343 /* finally clauses already have a seq point */
7344 /* seq points for filter clauses are emitted below */
7345 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7346 MONO_ADD_INS (tblock, ins);
7349 /* todo: is a fault block unsafe to optimize? */
7350 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7351 tblock->flags |= BB_EXCEPTION_UNSAFE;
7354 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7356 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7358 /* catch and filter blocks get the exception object on the stack */
7359 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7360 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7362 /* mostly like handle_stack_args (), but just sets the input args */
7363 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7364 tblock->in_scount = 1;
7365 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7366 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7370 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7371 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7372 if (!cfg->compile_llvm) {
7373 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7374 ins->dreg = tblock->in_stack [0]->dreg;
7375 MONO_ADD_INS (tblock, ins);
7378 MonoInst *dummy_use;
7381 * Add a dummy use for the exvar so its liveness info will be
7384 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7387 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7388 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7389 MONO_ADD_INS (tblock, ins);
7392 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7393 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7394 tblock->flags |= BB_EXCEPTION_HANDLER;
7395 tblock->real_offset = clause->data.filter_offset;
7396 tblock->in_scount = 1;
7397 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7398 /* The filter block shares the exvar with the handler block */
7399 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7400 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7401 MONO_ADD_INS (tblock, ins);
7405 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7406 clause->data.catch_class &&
7408 mono_class_check_context_used (clause->data.catch_class)) {
7410 * In shared generic code with catch
7411 * clauses containing type variables
7412 * the exception handling code has to
7413 * be able to get to the rgctx.
7414 * Therefore we have to make sure that
7415 * the vtable/mrgctx argument (for
7416 * static or generic methods) or the
7417 * "this" argument (for non-static
7418 * methods) are live.
7420 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7421 mini_method_get_context (method)->method_inst ||
7422 method->klass->valuetype) {
7423 mono_get_vtable_var (cfg);
7425 MonoInst *dummy_use;
7427 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7432 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7433 cfg->cbb = start_bblock;
7434 cfg->args = arg_array;
7435 mono_save_args (cfg, sig, inline_args);
7438 /* FIRST CODE BLOCK */
7439 NEW_BBLOCK (cfg, tblock);
7440 tblock->cil_code = ip;
7444 ADD_BBLOCK (cfg, tblock);
7446 if (cfg->method == method) {
7447 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7448 if (breakpoint_id) {
7449 MONO_INST_NEW (cfg, ins, OP_BREAK);
7450 MONO_ADD_INS (cfg->cbb, ins);
7454 /* we use a separate basic block for the initialization code */
7455 NEW_BBLOCK (cfg, init_localsbb);
7456 if (cfg->method == method)
7457 cfg->bb_init = init_localsbb;
7458 init_localsbb->real_offset = cfg->real_offset;
7459 start_bblock->next_bb = init_localsbb;
7460 init_localsbb->next_bb = cfg->cbb;
7461 link_bblock (cfg, start_bblock, init_localsbb);
7462 link_bblock (cfg, init_localsbb, cfg->cbb);
7464 cfg->cbb = init_localsbb;
7466 if (cfg->gsharedvt && cfg->method == method) {
7467 MonoGSharedVtMethodInfo *info;
7468 MonoInst *var, *locals_var;
7471 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7472 info->method = cfg->method;
7473 info->count_entries = 16;
7474 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7475 cfg->gsharedvt_info = info;
7477 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7478 /* prevent it from being register allocated */
7479 //var->flags |= MONO_INST_VOLATILE;
7480 cfg->gsharedvt_info_var = var;
7482 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7483 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7485 /* Allocate locals */
7486 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7487 /* prevent it from being register allocated */
7488 //locals_var->flags |= MONO_INST_VOLATILE;
7489 cfg->gsharedvt_locals_var = locals_var;
7491 dreg = alloc_ireg (cfg);
7492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7494 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7495 ins->dreg = locals_var->dreg;
7497 MONO_ADD_INS (cfg->cbb, ins);
7498 cfg->gsharedvt_locals_var_ins = ins;
7500 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7503 ins->flags |= MONO_INST_INIT;
7507 if (mono_security_core_clr_enabled ()) {
7508 /* check if this is native code, e.g. an icall or a p/invoke */
7509 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7510 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7512 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7513 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7515 /* if this ia a native call then it can only be JITted from platform code */
7516 if ((icall || pinvk) && method->klass && method->klass->image) {
7517 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7518 MonoException *ex = icall ? mono_get_exception_security () :
7519 mono_get_exception_method_access ();
7520 emit_throw_exception (cfg, ex);
7527 CHECK_CFG_EXCEPTION;
7529 if (header->code_size == 0)
7532 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7537 if (cfg->method == method)
7538 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7540 for (n = 0; n < header->num_locals; ++n) {
7541 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7546 /* We force the vtable variable here for all shared methods
7547 for the possibility that they might show up in a stack
7548 trace where their exact instantiation is needed. */
7549 if (cfg->gshared && method == cfg->method) {
7550 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7551 mini_method_get_context (method)->method_inst ||
7552 method->klass->valuetype) {
7553 mono_get_vtable_var (cfg);
7555 /* FIXME: Is there a better way to do this?
7556 We need the variable live for the duration
7557 of the whole method. */
7558 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7562 /* add a check for this != NULL to inlined methods */
7563 if (is_virtual_call) {
7566 NEW_ARGLOAD (cfg, arg_ins, 0);
7567 MONO_ADD_INS (cfg->cbb, arg_ins);
7568 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7571 skip_dead_blocks = !dont_verify;
7572 if (skip_dead_blocks) {
7573 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7578 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7579 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7582 start_new_bblock = 0;
7584 if (cfg->method == method)
7585 cfg->real_offset = ip - header->code;
7587 cfg->real_offset = inline_offset;
7592 if (start_new_bblock) {
7593 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7594 if (start_new_bblock == 2) {
7595 g_assert (ip == tblock->cil_code);
7597 GET_BBLOCK (cfg, tblock, ip);
7599 cfg->cbb->next_bb = tblock;
7601 start_new_bblock = 0;
7602 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7603 if (cfg->verbose_level > 3)
7604 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7605 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7609 g_slist_free (class_inits);
7612 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7613 link_bblock (cfg, cfg->cbb, tblock);
7614 if (sp != stack_start) {
7615 handle_stack_args (cfg, stack_start, sp - stack_start);
7617 CHECK_UNVERIFIABLE (cfg);
7619 cfg->cbb->next_bb = tblock;
7621 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7622 if (cfg->verbose_level > 3)
7623 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7624 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7627 g_slist_free (class_inits);
7632 if (skip_dead_blocks) {
7633 int ip_offset = ip - header->code;
7635 if (ip_offset == bb->end)
7639 int op_size = mono_opcode_size (ip, end);
7640 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7642 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7644 if (ip_offset + op_size == bb->end) {
7645 MONO_INST_NEW (cfg, ins, OP_NOP);
7646 MONO_ADD_INS (cfg->cbb, ins);
7647 start_new_bblock = 1;
7655 * Sequence points are points where the debugger can place a breakpoint.
7656 * Currently, we generate these automatically at points where the IL
7659 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7661 * Make methods interruptable at the beginning, and at the targets of
7662 * backward branches.
7663 * Also, do this at the start of every bblock in methods with clauses too,
7664 * to be able to handle instructions with inprecise control flow like
7666 * Backward branches are handled at the end of method-to-ir ().
7668 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7669 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7671 /* Avoid sequence points on empty IL like .volatile */
7672 // FIXME: Enable this
7673 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7674 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7675 if ((sp != stack_start) && !sym_seq_point)
7676 ins->flags |= MONO_INST_NONEMPTY_STACK;
7677 MONO_ADD_INS (cfg->cbb, ins);
7680 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7683 cfg->cbb->real_offset = cfg->real_offset;
7685 if ((cfg->method == method) && cfg->coverage_info) {
7686 guint32 cil_offset = ip - header->code;
7687 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
7688 cfg->coverage_info->data [cil_offset].cil_code = ip;
7690 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
7691 MonoInst *one_ins, *load_ins;
7693 EMIT_NEW_PCONST (cfg, load_ins, counter);
7694 EMIT_NEW_ICONST (cfg, one_ins, 1);
7695 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
7696 ins->dreg = mono_alloc_ireg (cfg);
7697 ins->inst_basereg = load_ins->dreg;
7698 ins->inst_offset = 0;
7699 ins->sreg2 = one_ins->dreg;
7700 ins->type = STACK_I4;
7701 MONO_ADD_INS (cfg->cbb, ins);
7703 EMIT_NEW_PCONST (cfg, ins, counter);
7704 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7708 if (cfg->verbose_level > 3)
7709 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7713 if (seq_points && !sym_seq_points && sp != stack_start) {
7715 * The C# compiler uses these nops to notify the JIT that it should
7716 * insert seq points.
7718 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7719 MONO_ADD_INS (cfg->cbb, ins);
7721 if (cfg->keep_cil_nops)
7722 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7724 MONO_INST_NEW (cfg, ins, OP_NOP);
7726 MONO_ADD_INS (cfg->cbb, ins);
7729 if (mini_should_insert_breakpoint (cfg->method)) {
7730 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7732 MONO_INST_NEW (cfg, ins, OP_NOP);
7735 MONO_ADD_INS (cfg->cbb, ins);
7741 CHECK_STACK_OVF (1);
7742 n = (*ip)-CEE_LDARG_0;
7744 EMIT_NEW_ARGLOAD (cfg, ins, n);
7752 CHECK_STACK_OVF (1);
7753 n = (*ip)-CEE_LDLOC_0;
7755 EMIT_NEW_LOCLOAD (cfg, ins, n);
7764 n = (*ip)-CEE_STLOC_0;
7767 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7769 emit_stloc_ir (cfg, sp, header, n);
7776 CHECK_STACK_OVF (1);
7779 EMIT_NEW_ARGLOAD (cfg, ins, n);
7785 CHECK_STACK_OVF (1);
7788 NEW_ARGLOADA (cfg, ins, n);
7789 MONO_ADD_INS (cfg->cbb, ins);
7799 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7801 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7806 CHECK_STACK_OVF (1);
7809 if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
7810 /* Avoid loading a struct just to load one of its fields */
7811 EMIT_NEW_LOCLOADA (cfg, ins, n);
7813 EMIT_NEW_LOCLOAD (cfg, ins, n);
7818 case CEE_LDLOCA_S: {
7819 unsigned char *tmp_ip;
7821 CHECK_STACK_OVF (1);
7822 CHECK_LOCAL (ip [1]);
7824 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7830 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7839 CHECK_LOCAL (ip [1]);
7840 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7842 emit_stloc_ir (cfg, sp, header, ip [1]);
7847 CHECK_STACK_OVF (1);
7848 EMIT_NEW_PCONST (cfg, ins, NULL);
7849 ins->type = STACK_OBJ;
7854 CHECK_STACK_OVF (1);
7855 EMIT_NEW_ICONST (cfg, ins, -1);
7868 CHECK_STACK_OVF (1);
7869 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7875 CHECK_STACK_OVF (1);
7877 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7883 CHECK_STACK_OVF (1);
7884 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7890 CHECK_STACK_OVF (1);
7891 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7892 ins->type = STACK_I8;
7893 ins->dreg = alloc_dreg (cfg, STACK_I8);
7895 ins->inst_l = (gint64)read64 (ip);
7896 MONO_ADD_INS (cfg->cbb, ins);
7902 gboolean use_aotconst = FALSE;
7904 #ifdef TARGET_POWERPC
7905 /* FIXME: Clean this up */
7906 if (cfg->compile_aot)
7907 use_aotconst = TRUE;
7910 /* FIXME: we should really allocate this only late in the compilation process */
7911 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
7913 CHECK_STACK_OVF (1);
7919 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7921 dreg = alloc_freg (cfg);
7922 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7923 ins->type = cfg->r4_stack_type;
7925 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7926 ins->type = cfg->r4_stack_type;
7927 ins->dreg = alloc_dreg (cfg, STACK_R8);
7929 MONO_ADD_INS (cfg->cbb, ins);
7939 gboolean use_aotconst = FALSE;
7941 #ifdef TARGET_POWERPC
7942 /* FIXME: Clean this up */
7943 if (cfg->compile_aot)
7944 use_aotconst = TRUE;
7947 /* FIXME: we should really allocate this only late in the compilation process */
7948 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
7950 CHECK_STACK_OVF (1);
7956 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7958 dreg = alloc_freg (cfg);
7959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7960 ins->type = STACK_R8;
7962 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7963 ins->type = STACK_R8;
7964 ins->dreg = alloc_dreg (cfg, STACK_R8);
7966 MONO_ADD_INS (cfg->cbb, ins);
7975 MonoInst *temp, *store;
7977 CHECK_STACK_OVF (1);
7981 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7982 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7984 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7987 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8000 if (sp [0]->type == STACK_R8)
8001 /* we need to pop the value from the x86 FP stack */
8002 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8007 MonoMethodSignature *fsig;
8010 INLINE_FAILURE ("jmp");
8011 GSHAREDVT_FAILURE (*ip);
8014 if (stack_start != sp)
8016 token = read32 (ip + 1);
8017 /* FIXME: check the signature matches */
8018 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8021 if (cfg->gshared && mono_method_check_context_used (cmethod))
8022 GENERIC_SHARING_FAILURE (CEE_JMP);
8024 mini_profiler_emit_tail_call (cfg, cmethod);
8026 fsig = mono_method_signature (cmethod);
8027 n = fsig->param_count + fsig->hasthis;
8028 if (cfg->llvm_only) {
8031 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8032 for (i = 0; i < n; ++i)
8033 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8034 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8036 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8037 * have to emit a normal return since llvm expects it.
8040 emit_setret (cfg, ins);
8041 MONO_INST_NEW (cfg, ins, OP_BR);
8042 ins->inst_target_bb = end_bblock;
8043 MONO_ADD_INS (cfg->cbb, ins);
8044 link_bblock (cfg, cfg->cbb, end_bblock);
8047 } else if (cfg->backend->have_op_tail_call) {
8048 /* Handle tail calls similarly to calls */
8051 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8052 call->method = cmethod;
8053 call->tail_call = TRUE;
8054 call->signature = mono_method_signature (cmethod);
8055 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8056 call->inst.inst_p0 = cmethod;
8057 for (i = 0; i < n; ++i)
8058 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8060 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8061 call->vret_var = cfg->vret_addr;
8063 mono_arch_emit_call (cfg, call);
8064 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8065 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8067 for (i = 0; i < num_args; ++i)
8068 /* Prevent arguments from being optimized away */
8069 arg_array [i]->flags |= MONO_INST_VOLATILE;
8071 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8072 ins = (MonoInst*)call;
8073 ins->inst_p0 = cmethod;
8074 MONO_ADD_INS (cfg->cbb, ins);
8078 start_new_bblock = 1;
8083 MonoMethodSignature *fsig;
8086 token = read32 (ip + 1);
8090 //GSHAREDVT_FAILURE (*ip);
8095 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8098 if (method->dynamic && fsig->pinvoke) {
8102 * This is a call through a function pointer using a pinvoke
8103 * signature. Have to create a wrapper and call that instead.
8104 * FIXME: This is very slow, need to create a wrapper at JIT time
8105 * instead based on the signature.
8107 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8108 EMIT_NEW_PCONST (cfg, args [1], fsig);
8110 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8113 n = fsig->param_count + fsig->hasthis;
8117 //g_assert (!virtual_ || fsig->hasthis);
8121 inline_costs += 10 * num_calls++;
8124 * Making generic calls out of gsharedvt methods.
8125 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8126 * patching gshared method addresses into a gsharedvt method.
8128 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8130 * We pass the address to the gsharedvt trampoline in the rgctx reg
8132 MonoInst *callee = addr;
8134 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8136 GSHAREDVT_FAILURE (*ip);
8140 GSHAREDVT_FAILURE (*ip);
8142 addr = emit_get_rgctx_sig (cfg, context_used,
8143 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8144 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8148 /* Prevent inlining of methods with indirect calls */
8149 INLINE_FAILURE ("indirect call");
8151 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8152 MonoJumpInfoType info_type;
8156 * Instead of emitting an indirect call, emit a direct call
8157 * with the contents of the aotconst as the patch info.
8159 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8160 info_type = (MonoJumpInfoType)addr->inst_c1;
8161 info_data = addr->inst_p0;
8163 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8164 info_data = addr->inst_right->inst_left;
8167 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8168 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8171 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8172 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8177 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8181 /* End of call, INS should contain the result of the call, if any */
8183 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8185 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8188 CHECK_CFG_EXCEPTION;
8192 constrained_class = NULL;
8196 case CEE_CALLVIRT: {
8197 MonoInst *addr = NULL;
8198 MonoMethodSignature *fsig = NULL;
8200 int virtual_ = *ip == CEE_CALLVIRT;
8201 gboolean pass_imt_from_rgctx = FALSE;
8202 MonoInst *imt_arg = NULL;
8203 MonoInst *keep_this_alive = NULL;
8204 gboolean pass_vtable = FALSE;
8205 gboolean pass_mrgctx = FALSE;
8206 MonoInst *vtable_arg = NULL;
8207 gboolean check_this = FALSE;
8208 gboolean supported_tail_call = FALSE;
8209 gboolean tail_call = FALSE;
8210 gboolean need_seq_point = FALSE;
8211 guint32 call_opcode = *ip;
8212 gboolean emit_widen = TRUE;
8213 gboolean push_res = TRUE;
8214 gboolean skip_ret = FALSE;
8215 gboolean delegate_invoke = FALSE;
8216 gboolean direct_icall = FALSE;
8217 gboolean constrained_partial_call = FALSE;
8218 MonoMethod *cil_method;
8221 token = read32 (ip + 1);
8225 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8228 cil_method = cmethod;
8230 if (constrained_class) {
8231 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8232 if (!mini_is_gsharedvt_klass (constrained_class)) {
8233 g_assert (!cmethod->klass->valuetype);
8234 if (!mini_type_is_reference (&constrained_class->byval_arg))
8235 constrained_partial_call = TRUE;
8239 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8240 if (cfg->verbose_level > 2)
8241 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8242 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8243 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8245 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8249 if (cfg->verbose_level > 2)
8250 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8252 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8254 * This is needed since get_method_constrained can't find
8255 * the method in klass representing a type var.
8256 * The type var is guaranteed to be a reference type in this
8259 if (!mini_is_gsharedvt_klass (constrained_class))
8260 g_assert (!cmethod->klass->valuetype);
8262 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8267 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8268 /* Use the corresponding method from the base type to avoid boxing */
8269 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8270 g_assert (base_type);
8271 constrained_class = mono_class_from_mono_type (base_type);
8272 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8277 if (!dont_verify && !cfg->skip_visibility) {
8278 MonoMethod *target_method = cil_method;
8279 if (method->is_inflated) {
8280 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8283 if (!mono_method_can_access_method (method_definition, target_method) &&
8284 !mono_method_can_access_method (method, cil_method))
8285 emit_method_access_failure (cfg, method, cil_method);
8288 if (mono_security_core_clr_enabled ())
8289 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8291 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8292 /* MS.NET seems to silently convert this to a callvirt */
8297 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8298 * converts to a callvirt.
8300 * tests/bug-515884.il is an example of this behavior
8302 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8303 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8304 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8308 if (!cmethod->klass->inited)
8309 if (!mono_class_init (cmethod->klass))
8310 TYPE_LOAD_ERROR (cmethod->klass);
8312 fsig = mono_method_signature (cmethod);
8315 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8316 mini_class_is_system_array (cmethod->klass)) {
8317 array_rank = cmethod->klass->rank;
8318 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8319 direct_icall = TRUE;
8320 } else if (fsig->pinvoke) {
8321 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8322 fsig = mono_method_signature (wrapper);
8323 } else if (constrained_class) {
8325 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8329 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8330 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8332 /* See code below */
8333 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8334 MonoBasicBlock *tbb;
8336 GET_BBLOCK (cfg, tbb, ip + 5);
8337 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8339 * We want to extend the try block to cover the call, but we can't do it if the
8340 * call is made directly since its followed by an exception check.
8342 direct_icall = FALSE;
8346 mono_save_token_info (cfg, image, token, cil_method);
8348 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8349 need_seq_point = TRUE;
8351 /* Don't support calls made using type arguments for now */
8353 if (cfg->gsharedvt) {
8354 if (mini_is_gsharedvt_signature (fsig))
8355 GSHAREDVT_FAILURE (*ip);
8359 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8360 g_assert_not_reached ();
8362 n = fsig->param_count + fsig->hasthis;
8364 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8368 g_assert (!mono_method_check_context_used (cmethod));
8372 //g_assert (!virtual_ || fsig->hasthis);
8376 if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8377 cfg->cbb->out_of_line = TRUE;
8380 * We have the `constrained.' prefix opcode.
8382 if (constrained_class) {
8383 if (mini_is_gsharedvt_klass (constrained_class)) {
8384 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8385 /* The 'Own method' case below */
8386 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8387 /* 'The type parameter is instantiated as a reference type' case below. */
8389 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8390 CHECK_CFG_EXCEPTION;
8396 if (constrained_partial_call) {
8397 gboolean need_box = TRUE;
8400 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8401 * called method is not known at compile time either. The called method could end up being
8402 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8403 * to box the receiver.
8404 * A simple solution would be to box always and make a normal virtual call, but that would
8405 * be bad performance wise.
8407 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8409 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8414 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8415 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8416 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8417 ins->klass = constrained_class;
8418 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8419 CHECK_CFG_EXCEPTION;
8420 } else if (need_box) {
8422 MonoBasicBlock *is_ref_bb, *end_bb;
8423 MonoInst *nonbox_call;
8426 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8428 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8429 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8431 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8433 NEW_BBLOCK (cfg, is_ref_bb);
8434 NEW_BBLOCK (cfg, end_bb);
8436 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8441 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8446 MONO_START_BB (cfg, is_ref_bb);
8447 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8448 ins->klass = constrained_class;
8449 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8450 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8454 MONO_START_BB (cfg, end_bb);
8457 nonbox_call->dreg = ins->dreg;
8460 g_assert (mono_class_is_interface (cmethod->klass));
8461 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8462 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8465 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8467 * The type parameter is instantiated as a valuetype,
8468 * but that type doesn't override the method we're
8469 * calling, so we need to box `this'.
8471 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8472 ins->klass = constrained_class;
8473 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8474 CHECK_CFG_EXCEPTION;
8475 } else if (!constrained_class->valuetype) {
8476 int dreg = alloc_ireg_ref (cfg);
8479 * The type parameter is instantiated as a reference
8480 * type. We have a managed pointer on the stack, so
8481 * we need to dereference it here.
8483 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8484 ins->type = STACK_OBJ;
8487 if (cmethod->klass->valuetype) {
8490 /* Interface method */
8493 mono_class_setup_vtable (constrained_class);
8494 CHECK_TYPELOAD (constrained_class);
8495 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8497 TYPE_LOAD_ERROR (constrained_class);
8498 slot = mono_method_get_vtable_slot (cmethod);
8500 TYPE_LOAD_ERROR (cmethod->klass);
8501 cmethod = constrained_class->vtable [ioffset + slot];
8503 if (cmethod->klass == mono_defaults.enum_class) {
8504 /* Enum implements some interfaces, so treat this as the first case */
8505 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8506 ins->klass = constrained_class;
8507 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8508 CHECK_CFG_EXCEPTION;
8513 constrained_class = NULL;
8516 if (check_call_signature (cfg, fsig, sp))
8519 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8520 delegate_invoke = TRUE;
8522 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8523 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8524 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8532 * If the callee is a shared method, then its static cctor
8533 * might not get called after the call was patched.
8535 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8536 emit_class_init (cfg, cmethod->klass);
8537 CHECK_TYPELOAD (cmethod->klass);
8540 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8543 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8545 context_used = mini_method_check_context_used (cfg, cmethod);
8547 if (context_used && mono_class_is_interface (cmethod->klass)) {
8548 /* Generic method interface
8549 calls are resolved via a
8550 helper function and don't
8552 if (!cmethod_context || !cmethod_context->method_inst)
8553 pass_imt_from_rgctx = TRUE;
8557 * If a shared method calls another
8558 * shared method then the caller must
8559 * have a generic sharing context
8560 * because the magic trampoline
8561 * requires it. FIXME: We shouldn't
8562 * have to force the vtable/mrgctx
8563 * variable here. Instead there
8564 * should be a flag in the cfg to
8565 * request a generic sharing context.
8568 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8569 mono_get_vtable_var (cfg);
8574 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8576 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8578 CHECK_TYPELOAD (cmethod->klass);
8579 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8584 g_assert (!vtable_arg);
8586 if (!cfg->compile_aot) {
8588 * emit_get_rgctx_method () calls mono_class_vtable () so check
8589 * for type load errors before.
8591 mono_class_setup_vtable (cmethod->klass);
8592 CHECK_TYPELOAD (cmethod->klass);
8595 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8597 /* !marshalbyref is needed to properly handle generic methods + remoting */
8598 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8599 MONO_METHOD_IS_FINAL (cmethod)) &&
8600 !mono_class_is_marshalbyref (cmethod->klass)) {
8607 if (pass_imt_from_rgctx) {
8608 g_assert (!pass_vtable);
8610 imt_arg = emit_get_rgctx_method (cfg, context_used,
8611 cmethod, MONO_RGCTX_INFO_METHOD);
8615 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8617 /* Calling virtual generic methods */
8618 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8619 !(MONO_METHOD_IS_FINAL (cmethod) &&
8620 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8621 fsig->generic_param_count &&
8622 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8624 MonoInst *this_temp, *this_arg_temp, *store;
8625 MonoInst *iargs [4];
8627 g_assert (fsig->is_inflated);
8629 /* Prevent inlining of methods that contain indirect calls */
8630 INLINE_FAILURE ("virtual generic call");
8632 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8633 GSHAREDVT_FAILURE (*ip);
8635 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8636 g_assert (!imt_arg);
8638 g_assert (cmethod->is_inflated);
8639 imt_arg = emit_get_rgctx_method (cfg, context_used,
8640 cmethod, MONO_RGCTX_INFO_METHOD);
8641 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8643 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8644 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8645 MONO_ADD_INS (cfg->cbb, store);
8647 /* FIXME: This should be a managed pointer */
8648 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8650 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8651 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8652 cmethod, MONO_RGCTX_INFO_METHOD);
8653 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8654 addr = mono_emit_jit_icall (cfg,
8655 mono_helper_compile_generic_method, iargs);
8657 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8659 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8666 * Implement a workaround for the inherent races involved in locking:
8672 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8673 * try block, the Exit () won't be executed, see:
8674 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8675 * To work around this, we extend such try blocks to include the last x bytes
8676 * of the Monitor.Enter () call.
8678 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8679 MonoBasicBlock *tbb;
8681 GET_BBLOCK (cfg, tbb, ip + 5);
8683 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8684 * from Monitor.Enter like ArgumentNullException.
8686 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8687 /* Mark this bblock as needing to be extended */
8688 tbb->extend_try_block = TRUE;
8692 /* Conversion to a JIT intrinsic */
8693 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8694 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8695 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8703 if ((cfg->opt & MONO_OPT_INLINE) &&
8704 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8705 mono_method_check_inlining (cfg, cmethod)) {
8707 gboolean always = FALSE;
8709 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8710 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8711 /* Prevent inlining of methods that call wrappers */
8712 INLINE_FAILURE ("wrapper call");
8713 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8717 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8719 cfg->real_offset += 5;
8721 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8722 /* *sp is already set by inline_method */
8727 inline_costs += costs;
8733 /* Tail recursion elimination */
8734 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8735 gboolean has_vtargs = FALSE;
8738 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8739 INLINE_FAILURE ("tail call");
8741 /* keep it simple */
8742 for (i = fsig->param_count - 1; i >= 0; i--) {
8743 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8748 if (need_seq_point) {
8749 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8750 need_seq_point = FALSE;
8752 for (i = 0; i < n; ++i)
8753 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8754 MONO_INST_NEW (cfg, ins, OP_BR);
8755 MONO_ADD_INS (cfg->cbb, ins);
8756 tblock = start_bblock->out_bb [0];
8757 link_bblock (cfg, cfg->cbb, tblock);
8758 ins->inst_target_bb = tblock;
8759 start_new_bblock = 1;
8761 /* skip the CEE_RET, too */
8762 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8769 inline_costs += 10 * num_calls++;
8772 * Synchronized wrappers.
8773 * Its hard to determine where to replace a method with its synchronized
8774 * wrapper without causing an infinite recursion. The current solution is
8775 * to add the synchronized wrapper in the trampolines, and to
8776 * change the called method to a dummy wrapper, and resolve that wrapper
8777 * to the real method in mono_jit_compile_method ().
8779 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8780 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8781 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8782 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8786 * Making generic calls out of gsharedvt methods.
8787 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8788 * patching gshared method addresses into a gsharedvt method.
8790 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
8791 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
8792 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
8793 MonoRgctxInfoType info_type;
8796 //if (mono_class_is_interface (cmethod->klass))
8797 //GSHAREDVT_FAILURE (*ip);
8798 // disable for possible remoting calls
8799 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8800 GSHAREDVT_FAILURE (*ip);
8801 if (fsig->generic_param_count) {
8802 /* virtual generic call */
8803 g_assert (!imt_arg);
8804 /* Same as the virtual generic case above */
8805 imt_arg = emit_get_rgctx_method (cfg, context_used,
8806 cmethod, MONO_RGCTX_INFO_METHOD);
8807 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8809 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
8810 /* This can happen when we call a fully instantiated iface method */
8811 imt_arg = emit_get_rgctx_method (cfg, context_used,
8812 cmethod, MONO_RGCTX_INFO_METHOD);
8817 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8818 keep_this_alive = sp [0];
8820 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8821 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8823 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8824 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8826 if (cfg->llvm_only) {
8827 // FIXME: Avoid initializing vtable_arg
8828 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8830 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8835 /* Generic sharing */
8838 * Use this if the callee is gsharedvt sharable too, since
8839 * at runtime we might find an instantiation so the call cannot
8840 * be patched (the 'no_patch' code path in mini-trampolines.c).
8842 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8843 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8844 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8845 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
8846 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8847 INLINE_FAILURE ("gshared");
8849 g_assert (cfg->gshared && cmethod);
8853 * We are compiling a call to a
8854 * generic method from shared code,
8855 * which means that we have to look up
8856 * the method in the rgctx and do an
8860 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8862 if (cfg->llvm_only) {
8863 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
8864 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
8866 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8867 // FIXME: Avoid initializing imt_arg/vtable_arg
8868 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8870 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8871 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8876 /* Direct calls to icalls */
8878 MonoMethod *wrapper;
8881 /* Inline the wrapper */
8882 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8884 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
8885 g_assert (costs > 0);
8886 cfg->real_offset += 5;
8888 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8889 /* *sp is already set by inline_method */
8894 inline_costs += costs;
8903 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8904 MonoInst *val = sp [fsig->param_count];
8906 if (val->type == STACK_OBJ) {
8907 MonoInst *iargs [2];
8912 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8915 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8917 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
8918 mini_emit_write_barrier (cfg, addr, val);
8919 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
8920 GSHAREDVT_FAILURE (*ip);
8921 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8922 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8924 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8925 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8926 if (!cmethod->klass->element_class->valuetype && !readonly)
8927 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8928 CHECK_TYPELOAD (cmethod->klass);
8931 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8934 g_assert_not_reached ();
8941 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
8945 /* Tail prefix / tail call optimization */
8947 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8948 /* FIXME: runtime generic context pointer for jumps? */
8949 /* FIXME: handle this for generic sharing eventually */
8950 if ((ins_flag & MONO_INST_TAILCALL) &&
8951 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8952 supported_tail_call = TRUE;
8954 if (supported_tail_call) {
8957 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8958 INLINE_FAILURE ("tail call");
8960 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8962 if (cfg->backend->have_op_tail_call) {
8963 /* Handle tail calls similarly to normal calls */
8966 mini_profiler_emit_tail_call (cfg, cmethod);
8968 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8969 call->tail_call = TRUE;
8970 call->method = cmethod;
8971 call->signature = mono_method_signature (cmethod);
8974 * We implement tail calls by storing the actual arguments into the
8975 * argument variables, then emitting a CEE_JMP.
8977 for (i = 0; i < n; ++i) {
8978 /* Prevent argument from being register allocated */
8979 arg_array [i]->flags |= MONO_INST_VOLATILE;
8980 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8982 ins = (MonoInst*)call;
8983 ins->inst_p0 = cmethod;
8984 ins->inst_p1 = arg_array [0];
8985 MONO_ADD_INS (cfg->cbb, ins);
8986 link_bblock (cfg, cfg->cbb, end_bblock);
8987 start_new_bblock = 1;
8989 // FIXME: Eliminate unreachable epilogs
8992 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8993 * only reachable from this call.
8995 GET_BBLOCK (cfg, tblock, ip + 5);
8996 if (tblock == cfg->cbb || tblock->in_count == 0)
9005 * Virtual calls in llvm-only mode.
9007 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9008 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9013 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9014 INLINE_FAILURE ("call");
9015 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9016 imt_arg, vtable_arg);
9018 if (tail_call && !cfg->llvm_only) {
9019 link_bblock (cfg, cfg->cbb, end_bblock);
9020 start_new_bblock = 1;
9022 // FIXME: Eliminate unreachable epilogs
9025 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9026 * only reachable from this call.
9028 GET_BBLOCK (cfg, tblock, ip + 5);
9029 if (tblock == cfg->cbb || tblock->in_count == 0)
9036 /* End of call, INS should contain the result of the call, if any */
9038 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9041 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9046 if (keep_this_alive) {
9047 MonoInst *dummy_use;
9049 /* See mono_emit_method_call_full () */
9050 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9053 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9055 * Clang can convert these calls to tail calls which screw up the stack
9056 * walk. This happens even when the -fno-optimize-sibling-calls
9057 * option is passed to clang.
9058 * Work around this by emitting a dummy call.
9060 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9063 CHECK_CFG_EXCEPTION;
9067 g_assert (*ip == CEE_RET);
9071 constrained_class = NULL;
9073 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9077 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
9079 if (cfg->method != method) {
9080 /* return from inlined method */
9082 * If in_count == 0, that means the ret is unreachable due to
9083 * being preceeded by a throw. In that case, inline_method () will
9084 * handle setting the return value
9085 * (test case: test_0_inline_throw ()).
9087 if (return_var && cfg->cbb->in_count) {
9088 MonoType *ret_type = mono_method_signature (method)->ret;
9094 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9097 //g_assert (returnvar != -1);
9098 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9099 cfg->ret_var_set = TRUE;
9102 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9106 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9108 if (seq_points && !sym_seq_points) {
9110 * Place a seq point here too even through the IL stack is not
9111 * empty, so a step over on
9114 * will work correctly.
9116 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9117 MONO_ADD_INS (cfg->cbb, ins);
9120 g_assert (!return_var);
9124 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9127 emit_setret (cfg, *sp);
9130 if (sp != stack_start)
9132 MONO_INST_NEW (cfg, ins, OP_BR);
9134 ins->inst_target_bb = end_bblock;
9135 MONO_ADD_INS (cfg->cbb, ins);
9136 link_bblock (cfg, cfg->cbb, end_bblock);
9137 start_new_bblock = 1;
9141 MONO_INST_NEW (cfg, ins, OP_BR);
9143 target = ip + 1 + (signed char)(*ip);
9145 GET_BBLOCK (cfg, tblock, target);
9146 link_bblock (cfg, cfg->cbb, tblock);
9147 ins->inst_target_bb = tblock;
9148 if (sp != stack_start) {
9149 handle_stack_args (cfg, stack_start, sp - stack_start);
9151 CHECK_UNVERIFIABLE (cfg);
9153 MONO_ADD_INS (cfg->cbb, ins);
9154 start_new_bblock = 1;
9155 inline_costs += BRANCH_COST;
9169 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9171 target = ip + 1 + *(signed char*)ip;
9177 inline_costs += BRANCH_COST;
9181 MONO_INST_NEW (cfg, ins, OP_BR);
9184 target = ip + 4 + (gint32)read32(ip);
9186 GET_BBLOCK (cfg, tblock, target);
9187 link_bblock (cfg, cfg->cbb, tblock);
9188 ins->inst_target_bb = tblock;
9189 if (sp != stack_start) {
9190 handle_stack_args (cfg, stack_start, sp - stack_start);
9192 CHECK_UNVERIFIABLE (cfg);
9195 MONO_ADD_INS (cfg->cbb, ins);
9197 start_new_bblock = 1;
9198 inline_costs += BRANCH_COST;
9205 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9206 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9207 guint32 opsize = is_short ? 1 : 4;
9209 CHECK_OPSIZE (opsize);
9211 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9214 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9219 GET_BBLOCK (cfg, tblock, target);
9220 link_bblock (cfg, cfg->cbb, tblock);
9221 GET_BBLOCK (cfg, tblock, ip);
9222 link_bblock (cfg, cfg->cbb, tblock);
9224 if (sp != stack_start) {
9225 handle_stack_args (cfg, stack_start, sp - stack_start);
9226 CHECK_UNVERIFIABLE (cfg);
9229 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9230 cmp->sreg1 = sp [0]->dreg;
9231 type_from_op (cfg, cmp, sp [0], NULL);
9234 #if SIZEOF_REGISTER == 4
9235 if (cmp->opcode == OP_LCOMPARE_IMM) {
9236 /* Convert it to OP_LCOMPARE */
9237 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9238 ins->type = STACK_I8;
9239 ins->dreg = alloc_dreg (cfg, STACK_I8);
9241 MONO_ADD_INS (cfg->cbb, ins);
9242 cmp->opcode = OP_LCOMPARE;
9243 cmp->sreg2 = ins->dreg;
9246 MONO_ADD_INS (cfg->cbb, cmp);
9248 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9249 type_from_op (cfg, ins, sp [0], NULL);
9250 MONO_ADD_INS (cfg->cbb, ins);
9251 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9252 GET_BBLOCK (cfg, tblock, target);
9253 ins->inst_true_bb = tblock;
9254 GET_BBLOCK (cfg, tblock, ip);
9255 ins->inst_false_bb = tblock;
9256 start_new_bblock = 2;
9259 inline_costs += BRANCH_COST;
9274 MONO_INST_NEW (cfg, ins, *ip);
9276 target = ip + 4 + (gint32)read32(ip);
9282 inline_costs += BRANCH_COST;
9286 MonoBasicBlock **targets;
9287 MonoBasicBlock *default_bblock;
9288 MonoJumpInfoBBTable *table;
9289 int offset_reg = alloc_preg (cfg);
9290 int target_reg = alloc_preg (cfg);
9291 int table_reg = alloc_preg (cfg);
9292 int sum_reg = alloc_preg (cfg);
9293 gboolean use_op_switch;
9297 n = read32 (ip + 1);
9300 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9304 CHECK_OPSIZE (n * sizeof (guint32));
9305 target = ip + n * sizeof (guint32);
9307 GET_BBLOCK (cfg, default_bblock, target);
9308 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9310 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9311 for (i = 0; i < n; ++i) {
9312 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9313 targets [i] = tblock;
9314 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9318 if (sp != stack_start) {
9320 * Link the current bb with the targets as well, so handle_stack_args
9321 * will set their in_stack correctly.
9323 link_bblock (cfg, cfg->cbb, default_bblock);
9324 for (i = 0; i < n; ++i)
9325 link_bblock (cfg, cfg->cbb, targets [i]);
9327 handle_stack_args (cfg, stack_start, sp - stack_start);
9329 CHECK_UNVERIFIABLE (cfg);
9331 /* Undo the links */
9332 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9333 for (i = 0; i < n; ++i)
9334 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9338 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9340 for (i = 0; i < n; ++i)
9341 link_bblock (cfg, cfg->cbb, targets [i]);
9343 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9344 table->table = targets;
9345 table->table_size = n;
9347 use_op_switch = FALSE;
9349 /* ARM implements SWITCH statements differently */
9350 /* FIXME: Make it use the generic implementation */
9351 if (!cfg->compile_aot)
9352 use_op_switch = TRUE;
9355 if (COMPILE_LLVM (cfg))
9356 use_op_switch = TRUE;
9358 cfg->cbb->has_jump_table = 1;
9360 if (use_op_switch) {
9361 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9362 ins->sreg1 = src1->dreg;
9363 ins->inst_p0 = table;
9364 ins->inst_many_bb = targets;
9365 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9366 MONO_ADD_INS (cfg->cbb, ins);
9368 if (sizeof (gpointer) == 8)
9369 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9371 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9373 #if SIZEOF_REGISTER == 8
9374 /* The upper word might not be zero, and we add it to a 64 bit address later */
9375 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9378 if (cfg->compile_aot) {
9379 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9381 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9382 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9383 ins->inst_p0 = table;
9384 ins->dreg = table_reg;
9385 MONO_ADD_INS (cfg->cbb, ins);
9388 /* FIXME: Use load_memindex */
9389 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9391 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9393 start_new_bblock = 1;
9394 inline_costs += (BRANCH_COST * 2);
9411 ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9427 if (ins_flag & MONO_INST_VOLATILE) {
9428 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9429 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9432 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9433 ins->flags |= ins_flag;
9436 MONO_ADD_INS (cfg->cbb, ins);
9438 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9439 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9448 MONO_INST_NEW (cfg, ins, (*ip));
9450 ins->sreg1 = sp [0]->dreg;
9451 ins->sreg2 = sp [1]->dreg;
9452 type_from_op (cfg, ins, sp [0], sp [1]);
9454 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9456 /* Use the immediate opcodes if possible */
9457 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9458 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9459 if (imm_opcode != -1) {
9460 ins->opcode = imm_opcode;
9461 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9464 NULLIFY_INS (sp [1]);
9468 MONO_ADD_INS ((cfg)->cbb, (ins));
9470 *sp++ = mono_decompose_opcode (cfg, ins);
9487 MONO_INST_NEW (cfg, ins, (*ip));
9489 ins->sreg1 = sp [0]->dreg;
9490 ins->sreg2 = sp [1]->dreg;
9491 type_from_op (cfg, ins, sp [0], sp [1]);
9493 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9494 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9496 /* FIXME: Pass opcode to is_inst_imm */
9498 /* Use the immediate opcodes if possible */
9499 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9500 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9501 if (imm_opcode != -1) {
9502 ins->opcode = imm_opcode;
9503 if (sp [1]->opcode == OP_I8CONST) {
9504 #if SIZEOF_REGISTER == 8
9505 ins->inst_imm = sp [1]->inst_l;
9507 ins->inst_ls_word = sp [1]->inst_ls_word;
9508 ins->inst_ms_word = sp [1]->inst_ms_word;
9512 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9515 /* Might be followed by an instruction added by add_widen_op */
9516 if (sp [1]->next == NULL)
9517 NULLIFY_INS (sp [1]);
9520 MONO_ADD_INS ((cfg)->cbb, (ins));
9522 *sp++ = mono_decompose_opcode (cfg, ins);
9535 case CEE_CONV_OVF_I8:
9536 case CEE_CONV_OVF_U8:
9540 /* Special case this earlier so we have long constants in the IR */
9541 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9542 int data = sp [-1]->inst_c0;
9543 sp [-1]->opcode = OP_I8CONST;
9544 sp [-1]->type = STACK_I8;
9545 #if SIZEOF_REGISTER == 8
9546 if ((*ip) == CEE_CONV_U8)
9547 sp [-1]->inst_c0 = (guint32)data;
9549 sp [-1]->inst_c0 = data;
9551 sp [-1]->inst_ls_word = data;
9552 if ((*ip) == CEE_CONV_U8)
9553 sp [-1]->inst_ms_word = 0;
9555 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9557 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9564 case CEE_CONV_OVF_I4:
9565 case CEE_CONV_OVF_I1:
9566 case CEE_CONV_OVF_I2:
9567 case CEE_CONV_OVF_I:
9568 case CEE_CONV_OVF_U:
9571 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9572 ADD_UNOP (CEE_CONV_OVF_I8);
9579 case CEE_CONV_OVF_U1:
9580 case CEE_CONV_OVF_U2:
9581 case CEE_CONV_OVF_U4:
9584 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9585 ADD_UNOP (CEE_CONV_OVF_U8);
9592 case CEE_CONV_OVF_I1_UN:
9593 case CEE_CONV_OVF_I2_UN:
9594 case CEE_CONV_OVF_I4_UN:
9595 case CEE_CONV_OVF_I8_UN:
9596 case CEE_CONV_OVF_U1_UN:
9597 case CEE_CONV_OVF_U2_UN:
9598 case CEE_CONV_OVF_U4_UN:
9599 case CEE_CONV_OVF_U8_UN:
9600 case CEE_CONV_OVF_I_UN:
9601 case CEE_CONV_OVF_U_UN:
9608 CHECK_CFG_EXCEPTION;
9612 case CEE_ADD_OVF_UN:
9614 case CEE_MUL_OVF_UN:
9616 case CEE_SUB_OVF_UN:
9622 GSHAREDVT_FAILURE (*ip);
9625 token = read32 (ip + 1);
9626 klass = mini_get_class (method, token, generic_context);
9627 CHECK_TYPELOAD (klass);
9629 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9640 token = read32 (ip + 1);
9641 klass = mini_get_class (method, token, generic_context);
9642 CHECK_TYPELOAD (klass);
9644 /* Optimize the common ldobj+stloc combination */
9654 loc_index = ip [5] - CEE_STLOC_0;
9661 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9662 CHECK_LOCAL (loc_index);
9664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9665 ins->dreg = cfg->locals [loc_index]->dreg;
9666 ins->flags |= ins_flag;
9669 if (ins_flag & MONO_INST_VOLATILE) {
9670 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9671 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9677 /* Optimize the ldobj+stobj combination */
9678 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
9683 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9690 ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9699 CHECK_STACK_OVF (1);
9701 n = read32 (ip + 1);
9703 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9704 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9705 ins->type = STACK_OBJ;
9708 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9709 MonoInst *iargs [1];
9710 char *str = (char *)mono_method_get_wrapper_data (method, n);
9712 if (cfg->compile_aot)
9713 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9715 EMIT_NEW_PCONST (cfg, iargs [0], str);
9716 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9718 if (cfg->opt & MONO_OPT_SHARED) {
9719 MonoInst *iargs [3];
9721 if (cfg->compile_aot) {
9722 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9724 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9725 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9726 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9727 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9728 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9731 if (cfg->cbb->out_of_line) {
9732 MonoInst *iargs [2];
9734 if (image == mono_defaults.corlib) {
9736 * Avoid relocations in AOT and save some space by using a
9737 * version of helper_ldstr specialized to mscorlib.
9739 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9740 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9742 /* Avoid creating the string object */
9743 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9744 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9745 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9749 if (cfg->compile_aot) {
9750 NEW_LDSTRCONST (cfg, ins, image, n);
9752 MONO_ADD_INS (cfg->cbb, ins);
9755 NEW_PCONST (cfg, ins, NULL);
9756 ins->type = STACK_OBJ;
9757 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9761 OUT_OF_MEMORY_FAILURE;
9764 MONO_ADD_INS (cfg->cbb, ins);
9773 MonoInst *iargs [2];
9774 MonoMethodSignature *fsig;
9777 MonoInst *vtable_arg = NULL;
9780 token = read32 (ip + 1);
9781 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9784 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9787 mono_save_token_info (cfg, image, token, cmethod);
9789 if (!mono_class_init (cmethod->klass))
9790 TYPE_LOAD_ERROR (cmethod->klass);
9792 context_used = mini_method_check_context_used (cfg, cmethod);
9794 if (!dont_verify && !cfg->skip_visibility) {
9795 MonoMethod *cil_method = cmethod;
9796 MonoMethod *target_method = cil_method;
9798 if (method->is_inflated) {
9799 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9803 if (!mono_method_can_access_method (method_definition, target_method) &&
9804 !mono_method_can_access_method (method, cil_method))
9805 emit_method_access_failure (cfg, method, cil_method);
9808 if (mono_security_core_clr_enabled ())
9809 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
9811 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9812 emit_class_init (cfg, cmethod->klass);
9813 CHECK_TYPELOAD (cmethod->klass);
9817 if (cfg->gsharedvt) {
9818 if (mini_is_gsharedvt_variable_signature (sig))
9819 GSHAREDVT_FAILURE (*ip);
9823 n = fsig->param_count;
9827 * Generate smaller code for the common newobj <exception> instruction in
9828 * argument checking code.
9830 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9831 is_exception_class (cmethod->klass) && n <= 2 &&
9832 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9833 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9834 MonoInst *iargs [3];
9838 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9841 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9845 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9850 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9853 g_assert_not_reached ();
9861 /* move the args to allow room for 'this' in the first position */
9867 /* check_call_signature () requires sp[0] to be set */
9868 this_ins.type = STACK_OBJ;
9870 if (check_call_signature (cfg, fsig, sp))
9875 if (mini_class_is_system_array (cmethod->klass)) {
9876 *sp = emit_get_rgctx_method (cfg, context_used,
9877 cmethod, MONO_RGCTX_INFO_METHOD);
9879 /* Avoid varargs in the common case */
9880 if (fsig->param_count == 1)
9881 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9882 else if (fsig->param_count == 2)
9883 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9884 else if (fsig->param_count == 3)
9885 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9886 else if (fsig->param_count == 4)
9887 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9889 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9890 } else if (cmethod->string_ctor) {
9891 g_assert (!context_used);
9892 g_assert (!vtable_arg);
9893 /* we simply pass a null pointer */
9894 EMIT_NEW_PCONST (cfg, *sp, NULL);
9895 /* now call the string ctor */
9896 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9898 if (cmethod->klass->valuetype) {
9899 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9900 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9901 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9906 * The code generated by mini_emit_virtual_call () expects
9907 * iargs [0] to be a boxed instance, but luckily the vcall
9908 * will be transformed into a normal call there.
9910 } else if (context_used) {
9911 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9914 MonoVTable *vtable = NULL;
9916 if (!cfg->compile_aot)
9917 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9918 CHECK_TYPELOAD (cmethod->klass);
9921 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9922 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9923 * As a workaround, we call class cctors before allocating objects.
9925 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9926 emit_class_init (cfg, cmethod->klass);
9927 if (cfg->verbose_level > 2)
9928 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9929 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9932 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9935 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9938 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9940 /* Now call the actual ctor */
9941 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
9942 CHECK_CFG_EXCEPTION;
9945 if (alloc == NULL) {
9947 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9948 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9956 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
9957 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9965 token = read32 (ip + 1);
9966 klass = mini_get_class (method, token, generic_context);
9967 CHECK_TYPELOAD (klass);
9968 if (sp [0]->type != STACK_OBJ)
9971 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
9972 ins->dreg = alloc_preg (cfg);
9973 ins->sreg1 = (*sp)->dreg;
9975 ins->type = STACK_OBJ;
9976 MONO_ADD_INS (cfg->cbb, ins);
9978 CHECK_CFG_EXCEPTION;
9982 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
9985 case CEE_UNBOX_ANY: {
9986 MonoInst *res, *addr;
9991 token = read32 (ip + 1);
9992 klass = mini_get_class (method, token, generic_context);
9993 CHECK_TYPELOAD (klass);
9995 mono_save_token_info (cfg, image, token, klass);
9997 context_used = mini_class_check_context_used (cfg, klass);
9999 if (mini_is_gsharedvt_klass (klass)) {
10000 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10002 } else if (generic_class_is_reference_type (cfg, klass)) {
10003 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10004 EMIT_NEW_PCONST (cfg, res, NULL);
10005 res->type = STACK_OBJ;
10007 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10008 res->dreg = alloc_preg (cfg);
10009 res->sreg1 = (*sp)->dreg;
10010 res->klass = klass;
10011 res->type = STACK_OBJ;
10012 MONO_ADD_INS (cfg->cbb, res);
10013 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10015 } else if (mono_class_is_nullable (klass)) {
10016 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10018 addr = handle_unbox (cfg, klass, sp, context_used);
10020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10031 MonoClass *enum_class;
10032 MonoMethod *has_flag;
10038 token = read32 (ip + 1);
10039 klass = mini_get_class (method, token, generic_context);
10040 CHECK_TYPELOAD (klass);
10042 mono_save_token_info (cfg, image, token, klass);
10044 context_used = mini_class_check_context_used (cfg, klass);
10046 if (generic_class_is_reference_type (cfg, klass)) {
10052 if (klass == mono_defaults.void_class)
10054 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10056 /* frequent check in generic code: box (struct), brtrue */
10061 * <push int/long ptr>
10064 * constrained. MyFlags
10065 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10067 * If we find this sequence and the operand types on box and constrained
10068 * are equal, we can emit a specialized instruction sequence instead of
10069 * the very slow HasFlag () call.
10071 if ((cfg->opt & MONO_OPT_INTRINS) &&
10072 /* Cheap checks first. */
10073 ip + 5 + 6 + 5 < end &&
10074 ip [5] == CEE_PREFIX1 &&
10075 ip [6] == CEE_CONSTRAINED_ &&
10076 ip [11] == CEE_CALLVIRT &&
10077 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10078 mono_class_is_enum (klass) &&
10079 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10080 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10081 has_flag->klass == mono_defaults.enum_class &&
10082 !strcmp (has_flag->name, "HasFlag") &&
10083 has_flag->signature->hasthis &&
10084 has_flag->signature->param_count == 1) {
10085 CHECK_TYPELOAD (enum_class);
10087 if (enum_class == klass) {
10088 MonoInst *enum_this, *enum_flag;
10093 enum_this = sp [0];
10094 enum_flag = sp [1];
10096 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10101 // FIXME: LLVM can't handle the inconsistent bb linking
10102 if (!mono_class_is_nullable (klass) &&
10103 !mini_is_gsharedvt_klass (klass) &&
10104 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10105 (ip [5] == CEE_BRTRUE ||
10106 ip [5] == CEE_BRTRUE_S ||
10107 ip [5] == CEE_BRFALSE ||
10108 ip [5] == CEE_BRFALSE_S)) {
10109 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10111 MonoBasicBlock *true_bb, *false_bb;
10115 if (cfg->verbose_level > 3) {
10116 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10117 printf ("<box+brtrue opt>\n");
10122 case CEE_BRFALSE_S:
10125 target = ip + 1 + (signed char)(*ip);
10132 target = ip + 4 + (gint)(read32 (ip));
10136 g_assert_not_reached ();
10140 * We need to link both bblocks, since it is needed for handling stack
10141 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10142 * Branching to only one of them would lead to inconsistencies, so
10143 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10145 GET_BBLOCK (cfg, true_bb, target);
10146 GET_BBLOCK (cfg, false_bb, ip);
10148 mono_link_bblock (cfg, cfg->cbb, true_bb);
10149 mono_link_bblock (cfg, cfg->cbb, false_bb);
10151 if (sp != stack_start) {
10152 handle_stack_args (cfg, stack_start, sp - stack_start);
10154 CHECK_UNVERIFIABLE (cfg);
10157 if (COMPILE_LLVM (cfg)) {
10158 dreg = alloc_ireg (cfg);
10159 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10162 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10164 /* The JIT can't eliminate the iconst+compare */
10165 MONO_INST_NEW (cfg, ins, OP_BR);
10166 ins->inst_target_bb = is_true ? true_bb : false_bb;
10167 MONO_ADD_INS (cfg->cbb, ins);
10170 start_new_bblock = 1;
10174 *sp++ = handle_box (cfg, val, klass, context_used);
10176 CHECK_CFG_EXCEPTION;
10185 token = read32 (ip + 1);
10186 klass = mini_get_class (method, token, generic_context);
10187 CHECK_TYPELOAD (klass);
10189 mono_save_token_info (cfg, image, token, klass);
10191 context_used = mini_class_check_context_used (cfg, klass);
10193 if (mono_class_is_nullable (klass)) {
10196 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10197 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10201 ins = handle_unbox (cfg, klass, sp, context_used);
10214 MonoClassField *field;
10215 #ifndef DISABLE_REMOTING
10219 gboolean is_instance;
10221 gpointer addr = NULL;
10222 gboolean is_special_static;
10224 MonoInst *store_val = NULL;
10225 MonoInst *thread_ins;
10228 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10230 if (op == CEE_STFLD) {
10233 store_val = sp [1];
10238 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10240 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10243 if (op == CEE_STSFLD) {
10246 store_val = sp [0];
10251 token = read32 (ip + 1);
10252 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10253 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10254 klass = field->parent;
10257 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10260 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10261 FIELD_ACCESS_FAILURE (method, field);
10262 mono_class_init (klass);
10264 /* if the class is Critical then transparent code cannot access it's fields */
10265 if (!is_instance && mono_security_core_clr_enabled ())
10266 ensure_method_is_allowed_to_access_field (cfg, method, field);
10268 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10269 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10270 if (mono_security_core_clr_enabled ())
10271 ensure_method_is_allowed_to_access_field (cfg, method, field);
10274 ftype = mono_field_get_type (field);
10277 * LDFLD etc. is usable on static fields as well, so convert those cases to
10280 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10292 g_assert_not_reached ();
10294 is_instance = FALSE;
10297 context_used = mini_class_check_context_used (cfg, klass);
10299 /* INSTANCE CASE */
10301 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10302 if (op == CEE_STFLD) {
10303 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10305 #ifndef DISABLE_REMOTING
10306 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10307 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10308 MonoInst *iargs [5];
10310 GSHAREDVT_FAILURE (op);
10312 iargs [0] = sp [0];
10313 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10314 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10315 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10317 iargs [4] = sp [1];
10319 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10320 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10321 iargs, ip, cfg->real_offset, TRUE);
10322 CHECK_CFG_EXCEPTION;
10323 g_assert (costs > 0);
10325 cfg->real_offset += 5;
10327 inline_costs += costs;
10329 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10334 MonoInst *store, *wbarrier_ptr_ins = NULL;
10336 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10338 if (ins_flag & MONO_INST_VOLATILE) {
10339 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10340 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10343 if (mini_is_gsharedvt_klass (klass)) {
10344 MonoInst *offset_ins;
10346 context_used = mini_class_check_context_used (cfg, klass);
10348 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10349 /* The value is offset by 1 */
10350 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10351 dreg = alloc_ireg_mp (cfg);
10352 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10353 wbarrier_ptr_ins = ins;
10354 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
10355 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10357 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10359 if (sp [0]->opcode != OP_LDADDR)
10360 store->flags |= MONO_INST_FAULT;
10362 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10363 if (mini_is_gsharedvt_klass (klass)) {
10364 g_assert (wbarrier_ptr_ins);
10365 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10367 /* insert call to write barrier */
10371 dreg = alloc_ireg_mp (cfg);
10372 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10373 mini_emit_write_barrier (cfg, ptr, sp [1]);
10377 store->flags |= ins_flag;
10384 #ifndef DISABLE_REMOTING
10385 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10386 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10387 MonoInst *iargs [4];
10389 GSHAREDVT_FAILURE (op);
10391 iargs [0] = sp [0];
10392 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10393 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10394 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10395 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10396 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10397 iargs, ip, cfg->real_offset, TRUE);
10398 CHECK_CFG_EXCEPTION;
10399 g_assert (costs > 0);
10401 cfg->real_offset += 5;
10405 inline_costs += costs;
10407 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10413 if (sp [0]->type == STACK_VTYPE) {
10416 /* Have to compute the address of the variable */
10418 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10420 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10422 g_assert (var->klass == klass);
10424 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10428 if (op == CEE_LDFLDA) {
10429 if (sp [0]->type == STACK_OBJ) {
10430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10431 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10434 dreg = alloc_ireg_mp (cfg);
10436 if (mini_is_gsharedvt_klass (klass)) {
10437 MonoInst *offset_ins;
10439 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10440 /* The value is offset by 1 */
10441 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10442 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10444 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10446 ins->klass = mono_class_from_mono_type (field->type);
10447 ins->type = STACK_MP;
10452 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10454 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10455 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10464 MonoInst *field_add_inst = sp [0];
10465 if (mini_is_gsharedvt_klass (klass)) {
10466 MonoInst *offset_ins;
10468 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10469 /* The value is offset by 1 */
10470 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10471 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10475 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10477 if (sp [0]->opcode != OP_LDADDR)
10478 load->flags |= MONO_INST_FAULT;
10490 context_used = mini_class_check_context_used (cfg, klass);
10492 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10493 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10497 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10498 * to be called here.
10500 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10501 mono_class_vtable (cfg->domain, klass);
10502 CHECK_TYPELOAD (klass);
10504 mono_domain_lock (cfg->domain);
10505 if (cfg->domain->special_static_fields)
10506 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10507 mono_domain_unlock (cfg->domain);
10509 is_special_static = mono_class_field_is_special_static (field);
10511 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10512 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10516 /* Generate IR to compute the field address */
10517 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10519 * Fast access to TLS data
10520 * Inline version of get_thread_static_data () in
10524 int idx, static_data_reg, array_reg, dreg;
10526 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10527 GSHAREDVT_FAILURE (op);
10529 static_data_reg = alloc_ireg (cfg);
10530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10532 if (cfg->compile_aot) {
10533 int offset_reg, offset2_reg, idx_reg;
10535 /* For TLS variables, this will return the TLS offset */
10536 EMIT_NEW_SFLDACONST (cfg, ins, field);
10537 offset_reg = ins->dreg;
10538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10539 idx_reg = alloc_ireg (cfg);
10540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10542 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10543 array_reg = alloc_ireg (cfg);
10544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10545 offset2_reg = alloc_ireg (cfg);
10546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10548 dreg = alloc_ireg (cfg);
10549 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10551 offset = (gsize)addr & 0x7fffffff;
10552 idx = offset & 0x3f;
10554 array_reg = alloc_ireg (cfg);
10555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10556 dreg = alloc_ireg (cfg);
10557 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10559 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10560 (cfg->compile_aot && is_special_static) ||
10561 (context_used && is_special_static)) {
10562 MonoInst *iargs [2];
10564 g_assert (field->parent);
10565 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10566 if (context_used) {
10567 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10568 field, MONO_RGCTX_INFO_CLASS_FIELD);
10570 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10572 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10573 } else if (context_used) {
10574 MonoInst *static_data;
10577 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10578 method->klass->name_space, method->klass->name, method->name,
10579 depth, field->offset);
10582 if (mono_class_needs_cctor_run (klass, method))
10583 emit_class_init (cfg, klass);
10586 * The pointer we're computing here is
10588 * super_info.static_data + field->offset
10590 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10591 klass, MONO_RGCTX_INFO_STATIC_DATA);
10593 if (mini_is_gsharedvt_klass (klass)) {
10594 MonoInst *offset_ins;
10596 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10597 /* The value is offset by 1 */
10598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10599 dreg = alloc_ireg_mp (cfg);
10600 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10601 } else if (field->offset == 0) {
10604 int addr_reg = mono_alloc_preg (cfg);
10605 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10607 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10608 MonoInst *iargs [2];
10610 g_assert (field->parent);
10611 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10612 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10613 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10615 MonoVTable *vtable = NULL;
10617 if (!cfg->compile_aot)
10618 vtable = mono_class_vtable (cfg->domain, klass);
10619 CHECK_TYPELOAD (klass);
10622 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10623 if (!(g_slist_find (class_inits, klass))) {
10624 emit_class_init (cfg, klass);
10625 if (cfg->verbose_level > 2)
10626 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10627 class_inits = g_slist_prepend (class_inits, klass);
10630 if (cfg->run_cctors) {
10631 /* This makes so that inline cannot trigger */
10632 /* .cctors: too many apps depend on them */
10633 /* running with a specific order... */
10635 if (! vtable->initialized)
10636 INLINE_FAILURE ("class init");
10637 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10638 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10639 goto exception_exit;
10643 if (cfg->compile_aot)
10644 EMIT_NEW_SFLDACONST (cfg, ins, field);
10647 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10649 EMIT_NEW_PCONST (cfg, ins, addr);
10652 MonoInst *iargs [1];
10653 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10654 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10658 /* Generate IR to do the actual load/store operation */
10660 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10661 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10662 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10665 if (op == CEE_LDSFLDA) {
10666 ins->klass = mono_class_from_mono_type (ftype);
10667 ins->type = STACK_PTR;
10669 } else if (op == CEE_STSFLD) {
10672 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10673 store->flags |= ins_flag;
10675 gboolean is_const = FALSE;
10676 MonoVTable *vtable = NULL;
10677 gpointer addr = NULL;
10679 if (!context_used) {
10680 vtable = mono_class_vtable (cfg->domain, klass);
10681 CHECK_TYPELOAD (klass);
10683 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10684 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10685 int ro_type = ftype->type;
10687 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10688 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10689 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10692 GSHAREDVT_FAILURE (op);
10694 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10697 case MONO_TYPE_BOOLEAN:
10699 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10703 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10706 case MONO_TYPE_CHAR:
10708 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10712 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10717 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10721 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10726 case MONO_TYPE_PTR:
10727 case MONO_TYPE_FNPTR:
10728 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10729 type_to_eval_stack_type ((cfg), field->type, *sp);
10732 case MONO_TYPE_STRING:
10733 case MONO_TYPE_OBJECT:
10734 case MONO_TYPE_CLASS:
10735 case MONO_TYPE_SZARRAY:
10736 case MONO_TYPE_ARRAY:
10737 if (!mono_gc_is_moving ()) {
10738 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10739 type_to_eval_stack_type ((cfg), field->type, *sp);
10747 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10752 case MONO_TYPE_VALUETYPE:
10762 CHECK_STACK_OVF (1);
10764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10765 load->flags |= ins_flag;
10771 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10772 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10773 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10784 token = read32 (ip + 1);
10785 klass = mini_get_class (method, token, generic_context);
10786 CHECK_TYPELOAD (klass);
10788 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10789 mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
10800 const char *data_ptr;
10802 guint32 field_token;
10808 token = read32 (ip + 1);
10810 klass = mini_get_class (method, token, generic_context);
10811 CHECK_TYPELOAD (klass);
10812 if (klass->byval_arg.type == MONO_TYPE_VOID)
10815 context_used = mini_class_check_context_used (cfg, klass);
10817 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10818 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10819 ins->sreg1 = sp [0]->dreg;
10820 ins->type = STACK_I4;
10821 ins->dreg = alloc_ireg (cfg);
10822 MONO_ADD_INS (cfg->cbb, ins);
10823 *sp = mono_decompose_opcode (cfg, ins);
10826 if (context_used) {
10827 MonoInst *args [3];
10828 MonoClass *array_class = mono_array_class_get (klass, 1);
10829 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10831 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10834 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
10835 array_class, MONO_RGCTX_INFO_VTABLE);
10840 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10842 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
10844 if (cfg->opt & MONO_OPT_SHARED) {
10845 /* Decompose now to avoid problems with references to the domainvar */
10846 MonoInst *iargs [3];
10848 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10849 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10850 iargs [2] = sp [0];
10852 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
10854 /* Decompose later since it is needed by abcrem */
10855 MonoClass *array_type = mono_array_class_get (klass, 1);
10856 mono_class_vtable (cfg->domain, array_type);
10857 CHECK_TYPELOAD (array_type);
10859 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10860 ins->dreg = alloc_ireg_ref (cfg);
10861 ins->sreg1 = sp [0]->dreg;
10862 ins->inst_newa_class = klass;
10863 ins->type = STACK_OBJ;
10864 ins->klass = array_type;
10865 MONO_ADD_INS (cfg->cbb, ins);
10866 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10867 cfg->cbb->has_array_access = TRUE;
10869 /* Needed so mono_emit_load_get_addr () gets called */
10870 mono_get_got_var (cfg);
10880 * we inline/optimize the initialization sequence if possible.
10881 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10882 * for small sizes open code the memcpy
10883 * ensure the rva field is big enough
10885 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10886 MonoMethod *memcpy_method = mini_get_memcpy_method ();
10887 MonoInst *iargs [3];
10888 int add_reg = alloc_ireg_mp (cfg);
10890 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10891 if (cfg->compile_aot) {
10892 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10894 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10896 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10897 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10906 if (sp [0]->type != STACK_OBJ)
10909 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10910 ins->dreg = alloc_preg (cfg);
10911 ins->sreg1 = sp [0]->dreg;
10912 ins->type = STACK_I4;
10913 /* This flag will be inherited by the decomposition */
10914 ins->flags |= MONO_INST_FAULT;
10915 MONO_ADD_INS (cfg->cbb, ins);
10916 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10917 cfg->cbb->has_array_access = TRUE;
10925 if (sp [0]->type != STACK_OBJ)
10928 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10930 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10931 CHECK_TYPELOAD (klass);
10932 /* we need to make sure that this array is exactly the type it needs
10933 * to be for correctness. the wrappers are lax with their usage
10934 * so we need to ignore them here
10936 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10937 MonoClass *array_class = mono_array_class_get (klass, 1);
10938 mini_emit_check_array_type (cfg, sp [0], array_class);
10939 CHECK_TYPELOAD (array_class);
10943 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10948 case CEE_LDELEM_I1:
10949 case CEE_LDELEM_U1:
10950 case CEE_LDELEM_I2:
10951 case CEE_LDELEM_U2:
10952 case CEE_LDELEM_I4:
10953 case CEE_LDELEM_U4:
10954 case CEE_LDELEM_I8:
10956 case CEE_LDELEM_R4:
10957 case CEE_LDELEM_R8:
10958 case CEE_LDELEM_REF: {
10964 if (*ip == CEE_LDELEM) {
10966 token = read32 (ip + 1);
10967 klass = mini_get_class (method, token, generic_context);
10968 CHECK_TYPELOAD (klass);
10969 mono_class_init (klass);
10972 klass = array_access_to_klass (*ip);
10974 if (sp [0]->type != STACK_OBJ)
10977 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10979 if (mini_is_gsharedvt_variable_klass (klass)) {
10980 // FIXME-VT: OP_ICONST optimization
10981 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10982 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10983 ins->opcode = OP_LOADV_MEMBASE;
10984 } else if (sp [1]->opcode == OP_ICONST) {
10985 int array_reg = sp [0]->dreg;
10986 int index_reg = sp [1]->dreg;
10987 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10989 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
10990 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
10992 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10993 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10995 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10996 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10999 if (*ip == CEE_LDELEM)
11006 case CEE_STELEM_I1:
11007 case CEE_STELEM_I2:
11008 case CEE_STELEM_I4:
11009 case CEE_STELEM_I8:
11010 case CEE_STELEM_R4:
11011 case CEE_STELEM_R8:
11012 case CEE_STELEM_REF:
11017 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11019 if (*ip == CEE_STELEM) {
11021 token = read32 (ip + 1);
11022 klass = mini_get_class (method, token, generic_context);
11023 CHECK_TYPELOAD (klass);
11024 mono_class_init (klass);
11027 klass = array_access_to_klass (*ip);
11029 if (sp [0]->type != STACK_OBJ)
11032 emit_array_store (cfg, klass, sp, TRUE);
11034 if (*ip == CEE_STELEM)
11041 case CEE_CKFINITE: {
11045 if (cfg->llvm_only) {
11046 MonoInst *iargs [1];
11048 iargs [0] = sp [0];
11049 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11051 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11052 ins->sreg1 = sp [0]->dreg;
11053 ins->dreg = alloc_freg (cfg);
11054 ins->type = STACK_R8;
11055 MONO_ADD_INS (cfg->cbb, ins);
11057 *sp++ = mono_decompose_opcode (cfg, ins);
11063 case CEE_REFANYVAL: {
11064 MonoInst *src_var, *src;
11066 int klass_reg = alloc_preg (cfg);
11067 int dreg = alloc_preg (cfg);
11069 GSHAREDVT_FAILURE (*ip);
11072 MONO_INST_NEW (cfg, ins, *ip);
11075 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11076 CHECK_TYPELOAD (klass);
11078 context_used = mini_class_check_context_used (cfg, klass);
11081 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11083 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11084 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11087 if (context_used) {
11088 MonoInst *klass_ins;
11090 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11091 klass, MONO_RGCTX_INFO_KLASS);
11094 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11095 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11097 mini_emit_class_check (cfg, klass_reg, klass);
11099 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11100 ins->type = STACK_MP;
11101 ins->klass = klass;
11106 case CEE_MKREFANY: {
11107 MonoInst *loc, *addr;
11109 GSHAREDVT_FAILURE (*ip);
11112 MONO_INST_NEW (cfg, ins, *ip);
11115 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11116 CHECK_TYPELOAD (klass);
11118 context_used = mini_class_check_context_used (cfg, klass);
11120 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11121 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11123 if (context_used) {
11124 MonoInst *const_ins;
11125 int type_reg = alloc_preg (cfg);
11127 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11132 int const_reg = alloc_preg (cfg);
11133 int type_reg = alloc_preg (cfg);
11135 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11136 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11138 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11140 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11142 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11143 ins->type = STACK_VTYPE;
11144 ins->klass = mono_defaults.typed_reference_class;
11149 case CEE_LDTOKEN: {
11151 MonoClass *handle_class;
11153 CHECK_STACK_OVF (1);
11156 n = read32 (ip + 1);
11158 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11159 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11160 handle = mono_method_get_wrapper_data (method, n);
11161 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11162 if (handle_class == mono_defaults.typehandle_class)
11163 handle = &((MonoClass*)handle)->byval_arg;
11166 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11171 mono_class_init (handle_class);
11172 if (cfg->gshared) {
11173 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11174 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11175 /* This case handles ldtoken
11176 of an open type, like for
11179 } else if (handle_class == mono_defaults.typehandle_class) {
11180 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11181 } else if (handle_class == mono_defaults.fieldhandle_class)
11182 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11183 else if (handle_class == mono_defaults.methodhandle_class)
11184 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11186 g_assert_not_reached ();
11189 if ((cfg->opt & MONO_OPT_SHARED) &&
11190 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11191 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11192 MonoInst *addr, *vtvar, *iargs [3];
11193 int method_context_used;
11195 method_context_used = mini_method_check_context_used (cfg, method);
11197 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11199 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11200 EMIT_NEW_ICONST (cfg, iargs [1], n);
11201 if (method_context_used) {
11202 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11203 method, MONO_RGCTX_INFO_METHOD);
11204 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11206 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11207 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11209 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11213 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11215 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11216 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11217 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11218 (cmethod->klass == mono_defaults.systemtype_class) &&
11219 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11220 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11222 mono_class_init (tclass);
11223 if (context_used) {
11224 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11225 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11226 } else if (cfg->compile_aot) {
11227 if (method->wrapper_type) {
11228 error_init (&error); //got to do it since there are multiple conditionals below
11229 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11230 /* Special case for static synchronized wrappers */
11231 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11233 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11234 /* FIXME: n is not a normal token */
11236 EMIT_NEW_PCONST (cfg, ins, NULL);
11239 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11242 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11244 EMIT_NEW_PCONST (cfg, ins, rt);
11246 ins->type = STACK_OBJ;
11247 ins->klass = cmethod->klass;
11250 MonoInst *addr, *vtvar;
11252 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11254 if (context_used) {
11255 if (handle_class == mono_defaults.typehandle_class) {
11256 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11257 mono_class_from_mono_type ((MonoType *)handle),
11258 MONO_RGCTX_INFO_TYPE);
11259 } else if (handle_class == mono_defaults.methodhandle_class) {
11260 ins = emit_get_rgctx_method (cfg, context_used,
11261 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11262 } else if (handle_class == mono_defaults.fieldhandle_class) {
11263 ins = emit_get_rgctx_field (cfg, context_used,
11264 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11266 g_assert_not_reached ();
11268 } else if (cfg->compile_aot) {
11269 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11271 EMIT_NEW_PCONST (cfg, ins, handle);
11273 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11274 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11275 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11285 if (sp [-1]->type != STACK_OBJ)
11288 MONO_INST_NEW (cfg, ins, OP_THROW);
11290 ins->sreg1 = sp [0]->dreg;
11292 cfg->cbb->out_of_line = TRUE;
11293 MONO_ADD_INS (cfg->cbb, ins);
11294 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11295 MONO_ADD_INS (cfg->cbb, ins);
11298 link_bblock (cfg, cfg->cbb, end_bblock);
11299 start_new_bblock = 1;
11300 /* This can complicate code generation for llvm since the return value might not be defined */
11301 if (COMPILE_LLVM (cfg))
11302 INLINE_FAILURE ("throw");
11304 case CEE_ENDFINALLY:
11305 if (!ip_in_finally_clause (cfg, ip - header->code))
11307 /* mono_save_seq_point_info () depends on this */
11308 if (sp != stack_start)
11309 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11310 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11311 MONO_ADD_INS (cfg->cbb, ins);
11313 start_new_bblock = 1;
11316 * Control will leave the method so empty the stack, otherwise
11317 * the next basic block will start with a nonempty stack.
11319 while (sp != stack_start) {
11324 case CEE_LEAVE_S: {
11327 if (*ip == CEE_LEAVE) {
11329 target = ip + 5 + (gint32)read32(ip + 1);
11332 target = ip + 2 + (signed char)(ip [1]);
11335 /* empty the stack */
11336 while (sp != stack_start) {
11341 * If this leave statement is in a catch block, check for a
11342 * pending exception, and rethrow it if necessary.
11343 * We avoid doing this in runtime invoke wrappers, since those are called
11344 * by native code which excepts the wrapper to catch all exceptions.
11346 for (i = 0; i < header->num_clauses; ++i) {
11347 MonoExceptionClause *clause = &header->clauses [i];
11350 * Use <= in the final comparison to handle clauses with multiple
11351 * leave statements, like in bug #78024.
11352 * The ordering of the exception clauses guarantees that we find the
11353 * innermost clause.
11355 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11357 MonoBasicBlock *dont_throw;
11362 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11365 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11367 NEW_BBLOCK (cfg, dont_throw);
11370 * Currently, we always rethrow the abort exception, despite the
11371 * fact that this is not correct. See thread6.cs for an example.
11372 * But propagating the abort exception is more important than
11373 * getting the sematics right.
11375 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11377 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11379 MONO_START_BB (cfg, dont_throw);
11384 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11387 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11390 for (tmp = handlers; tmp; tmp = tmp->next) {
11391 MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
11392 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
11393 MonoBasicBlock *dont_throw;
11395 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11397 link_bblock (cfg, cfg->cbb, tblock);
11399 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
11401 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11402 ins->inst_target_bb = tblock;
11403 ins->inst_eh_block = clause;
11404 MONO_ADD_INS (cfg->cbb, ins);
11405 cfg->cbb->has_call_handler = 1;
11407 /* Throw exception if exvar is set */
11408 /* FIXME Do we need this for calls from catch/filter ? */
11409 NEW_BBLOCK (cfg, dont_throw);
11410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
11411 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11412 mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
11413 cfg->cbb->clause_hole = clause;
11415 MONO_START_BB (cfg, dont_throw);
11416 cfg->cbb->clause_hole = clause;
11418 if (COMPILE_LLVM (cfg)) {
11419 MonoBasicBlock *target_bb;
11422 * Link the finally bblock with the target, since it will
11423 * conceptually branch there.
11425 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11426 GET_BBLOCK (cfg, target_bb, target);
11427 link_bblock (cfg, tblock, target_bb);
11430 g_list_free (handlers);
11433 MONO_INST_NEW (cfg, ins, OP_BR);
11434 MONO_ADD_INS (cfg->cbb, ins);
11435 GET_BBLOCK (cfg, tblock, target);
11436 link_bblock (cfg, cfg->cbb, tblock);
11437 ins->inst_target_bb = tblock;
11439 start_new_bblock = 1;
11441 if (*ip == CEE_LEAVE)
11450 * Mono specific opcodes
11452 case MONO_CUSTOM_PREFIX: {
11454 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11458 case CEE_MONO_ICALL: {
11460 MonoJitICallInfo *info;
11462 token = read32 (ip + 2);
11463 func = mono_method_get_wrapper_data (method, token);
11464 info = mono_find_jit_icall_by_addr (func);
11466 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11469 CHECK_STACK (info->sig->param_count);
11470 sp -= info->sig->param_count;
11472 if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
11476 * This is called on unattached threads, so it cannot go through the trampoline
11477 * infrastructure. Use an indirect call through a got slot initialized at load time
11480 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
11481 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
11483 ins = mono_emit_jit_icall (cfg, info->func, sp);
11486 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11490 inline_costs += 10 * num_calls++;
11494 case CEE_MONO_LDPTR_CARD_TABLE:
11495 case CEE_MONO_LDPTR_NURSERY_START:
11496 case CEE_MONO_LDPTR_NURSERY_BITS:
11497 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11498 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
11499 CHECK_STACK_OVF (1);
11502 case CEE_MONO_LDPTR_CARD_TABLE:
11503 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11505 case CEE_MONO_LDPTR_NURSERY_START:
11506 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11508 case CEE_MONO_LDPTR_NURSERY_BITS:
11509 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11511 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11512 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11514 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
11515 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
11518 g_assert_not_reached ();
11524 inline_costs += 10 * num_calls++;
11527 case CEE_MONO_LDPTR: {
11530 CHECK_STACK_OVF (1);
11532 token = read32 (ip + 2);
11534 ptr = mono_method_get_wrapper_data (method, token);
11535 EMIT_NEW_PCONST (cfg, ins, ptr);
11538 inline_costs += 10 * num_calls++;
11539 /* Can't embed random pointers into AOT code */
11543 case CEE_MONO_JIT_ICALL_ADDR: {
11544 MonoJitICallInfo *callinfo;
11547 CHECK_STACK_OVF (1);
11549 token = read32 (ip + 2);
11551 ptr = mono_method_get_wrapper_data (method, token);
11552 callinfo = mono_find_jit_icall_by_addr (ptr);
11553 g_assert (callinfo);
11554 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11557 inline_costs += 10 * num_calls++;
11560 case CEE_MONO_ICALL_ADDR: {
11561 MonoMethod *cmethod;
11564 CHECK_STACK_OVF (1);
11566 token = read32 (ip + 2);
11568 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11570 if (cfg->compile_aot) {
11571 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11573 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11574 * before the call, its not needed when using direct pinvoke.
11575 * This is not an optimization, but its used to avoid looking up pinvokes
11576 * on platforms which don't support dlopen ().
11578 EMIT_NEW_PCONST (cfg, ins, NULL);
11580 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11583 ptr = mono_lookup_internal_call (cmethod);
11585 EMIT_NEW_PCONST (cfg, ins, ptr);
11591 case CEE_MONO_VTADDR: {
11592 MonoInst *src_var, *src;
11598 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11599 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11604 case CEE_MONO_NEWOBJ: {
11605 MonoInst *iargs [2];
11607 CHECK_STACK_OVF (1);
11609 token = read32 (ip + 2);
11610 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11611 mono_class_init (klass);
11612 NEW_DOMAINCONST (cfg, iargs [0]);
11613 MONO_ADD_INS (cfg->cbb, iargs [0]);
11614 NEW_CLASSCONST (cfg, iargs [1], klass);
11615 MONO_ADD_INS (cfg->cbb, iargs [1]);
11616 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11618 inline_costs += 10 * num_calls++;
11621 case CEE_MONO_OBJADDR:
11624 MONO_INST_NEW (cfg, ins, OP_MOVE);
11625 ins->dreg = alloc_ireg_mp (cfg);
11626 ins->sreg1 = sp [0]->dreg;
11627 ins->type = STACK_MP;
11628 MONO_ADD_INS (cfg->cbb, ins);
11632 case CEE_MONO_LDNATIVEOBJ:
11634 * Similar to LDOBJ, but instead load the unmanaged
11635 * representation of the vtype to the stack.
11640 token = read32 (ip + 2);
11641 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11642 g_assert (klass->valuetype);
11643 mono_class_init (klass);
11646 MonoInst *src, *dest, *temp;
11649 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11650 temp->backend.is_pinvoke = 1;
11651 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11652 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
11654 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11655 dest->type = STACK_VTYPE;
11656 dest->klass = klass;
11662 case CEE_MONO_RETOBJ: {
11664 * Same as RET, but return the native representation of a vtype
11667 g_assert (cfg->ret);
11668 g_assert (mono_method_signature (method)->pinvoke);
11673 token = read32 (ip + 2);
11674 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11676 if (!cfg->vret_addr) {
11677 g_assert (cfg->ret_var_is_local);
11679 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11681 EMIT_NEW_RETLOADA (cfg, ins);
11683 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
11685 if (sp != stack_start)
11688 MONO_INST_NEW (cfg, ins, OP_BR);
11689 ins->inst_target_bb = end_bblock;
11690 MONO_ADD_INS (cfg->cbb, ins);
11691 link_bblock (cfg, cfg->cbb, end_bblock);
11692 start_new_bblock = 1;
11696 case CEE_MONO_SAVE_LMF:
11697 case CEE_MONO_RESTORE_LMF:
11700 case CEE_MONO_CLASSCONST:
11701 CHECK_STACK_OVF (1);
11703 token = read32 (ip + 2);
11704 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11707 inline_costs += 10 * num_calls++;
11709 case CEE_MONO_NOT_TAKEN:
11710 cfg->cbb->out_of_line = TRUE;
11713 case CEE_MONO_TLS: {
11716 CHECK_STACK_OVF (1);
11718 key = (MonoTlsKey)read32 (ip + 2);
11719 g_assert (key < TLS_KEY_NUM);
11721 ins = mono_create_tls_get (cfg, key);
11723 ins->type = STACK_PTR;
11728 case CEE_MONO_DYN_CALL: {
11729 MonoCallInst *call;
11731 /* It would be easier to call a trampoline, but that would put an
11732 * extra frame on the stack, confusing exception handling. So
11733 * implement it inline using an opcode for now.
11736 if (!cfg->dyn_call_var) {
11737 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11738 /* prevent it from being register allocated */
11739 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11742 /* Has to use a call inst since it local regalloc expects it */
11743 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11744 ins = (MonoInst*)call;
11746 ins->sreg1 = sp [0]->dreg;
11747 ins->sreg2 = sp [1]->dreg;
11748 MONO_ADD_INS (cfg->cbb, ins);
11750 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11753 inline_costs += 10 * num_calls++;
11757 case CEE_MONO_MEMORY_BARRIER: {
11759 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11763 case CEE_MONO_ATOMIC_STORE_I4: {
11764 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11770 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11771 ins->dreg = sp [0]->dreg;
11772 ins->sreg1 = sp [1]->dreg;
11773 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11774 MONO_ADD_INS (cfg->cbb, ins);
11779 case CEE_MONO_JIT_ATTACH: {
11780 MonoInst *args [16], *domain_ins;
11781 MonoInst *ad_ins, *jit_tls_ins;
11782 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11784 g_assert (!mono_threads_is_blocking_transition_enabled ());
11786 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11788 EMIT_NEW_PCONST (cfg, ins, NULL);
11789 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11791 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11792 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11794 if (ad_ins && jit_tls_ins) {
11795 NEW_BBLOCK (cfg, next_bb);
11796 NEW_BBLOCK (cfg, call_bb);
11798 if (cfg->compile_aot) {
11799 /* AOT code is only used in the root domain */
11800 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11802 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11804 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11805 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11808 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11811 MONO_START_BB (cfg, call_bb);
11814 /* AOT code is only used in the root domain */
11815 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
11816 if (cfg->compile_aot) {
11820 * This is called on unattached threads, so it cannot go through the trampoline
11821 * infrastructure. Use an indirect call through a got slot initialized at load time
11824 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
11825 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
11827 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11829 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11832 MONO_START_BB (cfg, next_bb);
11837 case CEE_MONO_JIT_DETACH: {
11838 MonoInst *args [16];
11840 /* Restore the original domain */
11841 dreg = alloc_ireg (cfg);
11842 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11843 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11847 case CEE_MONO_CALLI_EXTRA_ARG: {
11849 MonoMethodSignature *fsig;
11853 * This is the same as CEE_CALLI, but passes an additional argument
11854 * to the called method in llvmonly mode.
11855 * This is only used by delegate invoke wrappers to call the
11856 * actual delegate method.
11858 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
11861 token = read32 (ip + 2);
11869 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
11872 if (cfg->llvm_only)
11873 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
11875 n = fsig->param_count + fsig->hasthis + 1;
11882 if (cfg->llvm_only) {
11884 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
11885 * cconv. This is set by mono_init_delegate ().
11887 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
11888 MonoInst *callee = addr;
11889 MonoInst *call, *localloc_ins;
11890 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11891 int low_bit_reg = alloc_preg (cfg);
11893 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11894 NEW_BBLOCK (cfg, end_bb);
11896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11900 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
11901 addr = emit_get_rgctx_sig (cfg, context_used,
11902 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11904 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
11906 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
11907 ins->dreg = alloc_preg (cfg);
11908 ins->inst_imm = 2 * SIZEOF_VOID_P;
11909 MONO_ADD_INS (cfg->cbb, ins);
11910 localloc_ins = ins;
11911 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11912 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
11913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
11915 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
11916 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11918 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
11919 MONO_START_BB (cfg, is_gsharedvt_bb);
11920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
11921 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
11922 ins->dreg = call->dreg;
11924 MONO_START_BB (cfg, end_bb);
11926 /* Caller uses a normal calling conv */
11928 MonoInst *callee = addr;
11929 MonoInst *call, *localloc_ins;
11930 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11931 int low_bit_reg = alloc_preg (cfg);
11933 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11934 NEW_BBLOCK (cfg, end_bb);
11936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11940 /* Normal case: callee uses a normal cconv, no conversion is needed */
11941 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
11942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11943 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
11944 MONO_START_BB (cfg, is_gsharedvt_bb);
11945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
11946 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
11947 MONO_ADD_INS (cfg->cbb, addr);
11949 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
11951 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
11952 ins->dreg = alloc_preg (cfg);
11953 ins->inst_imm = 2 * SIZEOF_VOID_P;
11954 MONO_ADD_INS (cfg->cbb, ins);
11955 localloc_ins = ins;
11956 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
11958 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
11960 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
11961 ins->dreg = call->dreg;
11962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11964 MONO_START_BB (cfg, end_bb);
11967 /* Same as CEE_CALLI */
11968 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
11970 * We pass the address to the gsharedvt trampoline in the rgctx reg
11972 MonoInst *callee = addr;
11974 addr = emit_get_rgctx_sig (cfg, context_used,
11975 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11976 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
11978 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
11982 if (!MONO_TYPE_IS_VOID (fsig->ret))
11983 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
11985 CHECK_CFG_EXCEPTION;
11989 constrained_class = NULL;
11992 case CEE_MONO_LDDOMAIN:
11993 CHECK_STACK_OVF (1);
11994 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
11998 case CEE_MONO_GET_LAST_ERROR:
12000 CHECK_STACK_OVF (1);
12002 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12003 ins->dreg = alloc_dreg (cfg, STACK_I4);
12004 ins->type = STACK_I4;
12005 MONO_ADD_INS (cfg->cbb, ins);
12010 case CEE_MONO_GET_RGCTX_ARG:
12012 CHECK_STACK_OVF (1);
12014 mono_create_rgctx_var (cfg);
12016 MONO_INST_NEW (cfg, ins, OP_MOVE);
12017 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12018 ins->sreg1 = cfg->rgctx_var->dreg;
12019 ins->type = STACK_PTR;
12020 MONO_ADD_INS (cfg->cbb, ins);
12026 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12032 case CEE_PREFIX1: {
12035 case CEE_ARGLIST: {
12036 /* somewhat similar to LDTOKEN */
12037 MonoInst *addr, *vtvar;
12038 CHECK_STACK_OVF (1);
12039 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12041 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12042 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12044 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12045 ins->type = STACK_VTYPE;
12046 ins->klass = mono_defaults.argumenthandle_class;
12056 MonoInst *cmp, *arg1, *arg2;
12064 * The following transforms:
12065 * CEE_CEQ into OP_CEQ
12066 * CEE_CGT into OP_CGT
12067 * CEE_CGT_UN into OP_CGT_UN
12068 * CEE_CLT into OP_CLT
12069 * CEE_CLT_UN into OP_CLT_UN
12071 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12073 MONO_INST_NEW (cfg, ins, cmp->opcode);
12074 cmp->sreg1 = arg1->dreg;
12075 cmp->sreg2 = arg2->dreg;
12076 type_from_op (cfg, cmp, arg1, arg2);
12078 add_widen_op (cfg, cmp, &arg1, &arg2);
12079 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12080 cmp->opcode = OP_LCOMPARE;
12081 else if (arg1->type == STACK_R4)
12082 cmp->opcode = OP_RCOMPARE;
12083 else if (arg1->type == STACK_R8)
12084 cmp->opcode = OP_FCOMPARE;
12086 cmp->opcode = OP_ICOMPARE;
12087 MONO_ADD_INS (cfg->cbb, cmp);
12088 ins->type = STACK_I4;
12089 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12090 type_from_op (cfg, ins, arg1, arg2);
12092 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12094 * The backends expect the fceq opcodes to do the
12097 ins->sreg1 = cmp->sreg1;
12098 ins->sreg2 = cmp->sreg2;
12101 MONO_ADD_INS (cfg->cbb, ins);
12107 MonoInst *argconst;
12108 MonoMethod *cil_method;
12110 CHECK_STACK_OVF (1);
12112 n = read32 (ip + 2);
12113 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12116 mono_class_init (cmethod->klass);
12118 mono_save_token_info (cfg, image, n, cmethod);
12120 context_used = mini_method_check_context_used (cfg, cmethod);
12122 cil_method = cmethod;
12123 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12124 emit_method_access_failure (cfg, method, cil_method);
12126 if (mono_security_core_clr_enabled ())
12127 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12130 * Optimize the common case of ldftn+delegate creation
12132 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12133 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12134 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12135 MonoInst *target_ins, *handle_ins;
12136 MonoMethod *invoke;
12137 int invoke_context_used;
12139 invoke = mono_get_delegate_invoke (ctor_method->klass);
12140 if (!invoke || !mono_method_signature (invoke))
12143 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12145 target_ins = sp [-1];
12147 if (mono_security_core_clr_enabled ())
12148 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12150 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12151 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12152 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12154 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12158 /* FIXME: SGEN support */
12159 if (invoke_context_used == 0 || cfg->llvm_only) {
12161 if (cfg->verbose_level > 3)
12162 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12163 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12166 CHECK_CFG_EXCEPTION;
12176 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12177 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12181 inline_costs += 10 * num_calls++;
12184 case CEE_LDVIRTFTN: {
12185 MonoInst *args [2];
12189 n = read32 (ip + 2);
12190 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12193 mono_class_init (cmethod->klass);
12195 context_used = mini_method_check_context_used (cfg, cmethod);
12197 if (mono_security_core_clr_enabled ())
12198 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12201 * Optimize the common case of ldvirtftn+delegate creation
12203 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12204 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12205 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12206 MonoInst *target_ins, *handle_ins;
12207 MonoMethod *invoke;
12208 int invoke_context_used;
12209 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12211 invoke = mono_get_delegate_invoke (ctor_method->klass);
12212 if (!invoke || !mono_method_signature (invoke))
12215 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12217 target_ins = sp [-1];
12219 if (mono_security_core_clr_enabled ())
12220 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12222 /* FIXME: SGEN support */
12223 if (invoke_context_used == 0 || cfg->llvm_only) {
12225 if (cfg->verbose_level > 3)
12226 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12227 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12230 CHECK_CFG_EXCEPTION;
12243 args [1] = emit_get_rgctx_method (cfg, context_used,
12244 cmethod, MONO_RGCTX_INFO_METHOD);
12247 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12249 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12252 inline_costs += 10 * num_calls++;
12256 CHECK_STACK_OVF (1);
12258 n = read16 (ip + 2);
12260 EMIT_NEW_ARGLOAD (cfg, ins, n);
12265 CHECK_STACK_OVF (1);
12267 n = read16 (ip + 2);
12269 NEW_ARGLOADA (cfg, ins, n);
12270 MONO_ADD_INS (cfg->cbb, ins);
12278 n = read16 (ip + 2);
12280 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12282 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12286 CHECK_STACK_OVF (1);
12288 n = read16 (ip + 2);
12290 if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
12291 /* Avoid loading a struct just to load one of its fields */
12292 EMIT_NEW_LOCLOADA (cfg, ins, n);
12294 EMIT_NEW_LOCLOAD (cfg, ins, n);
12300 unsigned char *tmp_ip;
12301 CHECK_STACK_OVF (1);
12303 n = read16 (ip + 2);
12306 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12312 EMIT_NEW_LOCLOADA (cfg, ins, n);
12321 n = read16 (ip + 2);
12323 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12325 emit_stloc_ir (cfg, sp, header, n);
12329 case CEE_LOCALLOC: {
12331 MonoBasicBlock *non_zero_bb, *end_bb;
12332 int alloc_ptr = alloc_preg (cfg);
12334 if (sp != stack_start)
12336 if (cfg->method != method)
12338 * Inlining this into a loop in a parent could lead to
12339 * stack overflows which is different behavior than the
12340 * non-inlined case, thus disable inlining in this case.
12342 INLINE_FAILURE("localloc");
12344 NEW_BBLOCK (cfg, non_zero_bb);
12345 NEW_BBLOCK (cfg, end_bb);
12347 /* if size != zero */
12348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12351 //size is zero, so result is NULL
12352 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12355 MONO_START_BB (cfg, non_zero_bb);
12356 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12357 ins->dreg = alloc_ptr;
12358 ins->sreg1 = sp [0]->dreg;
12359 ins->type = STACK_PTR;
12360 MONO_ADD_INS (cfg->cbb, ins);
12362 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12364 ins->flags |= MONO_INST_INIT;
12366 MONO_START_BB (cfg, end_bb);
12367 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12368 ins->type = STACK_PTR;
12374 case CEE_ENDFILTER: {
12375 MonoExceptionClause *clause, *nearest;
12380 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12382 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12383 ins->sreg1 = (*sp)->dreg;
12384 MONO_ADD_INS (cfg->cbb, ins);
12385 start_new_bblock = 1;
12389 for (cc = 0; cc < header->num_clauses; ++cc) {
12390 clause = &header->clauses [cc];
12391 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12392 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12393 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12396 g_assert (nearest);
12397 if ((ip - header->code) != nearest->handler_offset)
12402 case CEE_UNALIGNED_:
12403 ins_flag |= MONO_INST_UNALIGNED;
12404 /* FIXME: record alignment? we can assume 1 for now */
12408 case CEE_VOLATILE_:
12409 ins_flag |= MONO_INST_VOLATILE;
12413 ins_flag |= MONO_INST_TAILCALL;
12414 cfg->flags |= MONO_CFG_HAS_TAIL;
12415 /* Can't inline tail calls at this time */
12416 inline_costs += 100000;
12423 token = read32 (ip + 2);
12424 klass = mini_get_class (method, token, generic_context);
12425 CHECK_TYPELOAD (klass);
12426 if (generic_class_is_reference_type (cfg, klass))
12427 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12429 mini_emit_initobj (cfg, *sp, NULL, klass);
12433 case CEE_CONSTRAINED_:
12435 token = read32 (ip + 2);
12436 constrained_class = mini_get_class (method, token, generic_context);
12437 CHECK_TYPELOAD (constrained_class);
12443 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12451 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12459 ins_flag |= MONO_INST_NOTYPECHECK;
12461 ins_flag |= MONO_INST_NORANGECHECK;
12462 /* we ignore the no-nullcheck for now since we
12463 * really do it explicitly only when doing callvirt->call
12467 case CEE_RETHROW: {
12469 int handler_offset = -1;
12471 for (i = 0; i < header->num_clauses; ++i) {
12472 MonoExceptionClause *clause = &header->clauses [i];
12473 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12474 handler_offset = clause->handler_offset;
12479 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12481 if (handler_offset == -1)
12484 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12485 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12486 ins->sreg1 = load->dreg;
12487 MONO_ADD_INS (cfg->cbb, ins);
12489 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12490 MONO_ADD_INS (cfg->cbb, ins);
12493 link_bblock (cfg, cfg->cbb, end_bblock);
12494 start_new_bblock = 1;
12502 CHECK_STACK_OVF (1);
12504 token = read32 (ip + 2);
12505 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12506 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12509 val = mono_type_size (type, &ialign);
12511 MonoClass *klass = mini_get_class (method, token, generic_context);
12512 CHECK_TYPELOAD (klass);
12514 val = mono_type_size (&klass->byval_arg, &ialign);
12516 if (mini_is_gsharedvt_klass (klass))
12517 GSHAREDVT_FAILURE (*ip);
12519 EMIT_NEW_ICONST (cfg, ins, val);
12524 case CEE_REFANYTYPE: {
12525 MonoInst *src_var, *src;
12527 GSHAREDVT_FAILURE (*ip);
12533 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12535 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12536 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12542 case CEE_READONLY_:
12555 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12565 g_warning ("opcode 0x%02x not handled", *ip);
12569 if (start_new_bblock != 1)
12572 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12573 if (cfg->cbb->next_bb) {
12574 /* This could already be set because of inlining, #693905 */
12575 MonoBasicBlock *bb = cfg->cbb;
12577 while (bb->next_bb)
12579 bb->next_bb = end_bblock;
12581 cfg->cbb->next_bb = end_bblock;
12584 if (cfg->method == method && cfg->domainvar) {
12586 MonoInst *get_domain;
12588 cfg->cbb = init_localsbb;
12590 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12591 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12592 MONO_ADD_INS (cfg->cbb, store);
12595 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12596 if (cfg->compile_aot)
12597 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12598 mono_get_got_var (cfg);
12601 if (cfg->method == method && cfg->got_var)
12602 mono_emit_load_got_addr (cfg);
12604 if (init_localsbb) {
12605 cfg->cbb = init_localsbb;
12607 for (i = 0; i < header->num_locals; ++i) {
12608 emit_init_local (cfg, i, header->locals [i], init_locals);
12612 if (cfg->init_ref_vars && cfg->method == method) {
12613 /* Emit initialization for ref vars */
12614 // FIXME: Avoid duplication initialization for IL locals.
12615 for (i = 0; i < cfg->num_varinfo; ++i) {
12616 MonoInst *ins = cfg->varinfo [i];
12618 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12619 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12623 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12624 cfg->cbb = init_localsbb;
12625 emit_push_lmf (cfg);
12628 cfg->cbb = init_localsbb;
12629 mini_profiler_emit_enter (cfg);
12632 MonoBasicBlock *bb;
12635 * Make seq points at backward branch targets interruptable.
12637 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12638 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12639 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12642 /* Add a sequence point for method entry/exit events */
12643 if (seq_points && cfg->gen_sdb_seq_points) {
12644 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12645 MONO_ADD_INS (init_localsbb, ins);
12646 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12647 MONO_ADD_INS (cfg->bb_exit, ins);
12651 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12652 * the code they refer to was dead (#11880).
12654 if (sym_seq_points) {
12655 for (i = 0; i < header->code_size; ++i) {
12656 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12659 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12660 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12667 if (cfg->method == method) {
12668 MonoBasicBlock *bb;
12669 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12670 if (bb == cfg->bb_init)
12673 bb->region = mono_find_block_region (cfg, bb->real_offset);
12675 mono_create_spvar_for_region (cfg, bb->region);
12676 if (cfg->verbose_level > 2)
12677 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12680 MonoBasicBlock *bb;
12681 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12682 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12683 bb->real_offset = inline_offset;
12687 if (inline_costs < 0) {
12690 /* Method is too large */
12691 mname = mono_method_full_name (method, TRUE);
12692 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12696 if ((cfg->verbose_level > 2) && (cfg->method == method))
12697 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12702 g_assert (!mono_error_ok (&cfg->error));
12706 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12710 set_exception_type_from_invalid_il (cfg, method, ip);
12714 g_slist_free (class_inits);
12715 mono_basic_block_free (original_bb);
12716 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12717 if (cfg->exception_type)
12720 return inline_costs;
12724 store_membase_reg_to_store_membase_imm (int opcode)
12727 case OP_STORE_MEMBASE_REG:
12728 return OP_STORE_MEMBASE_IMM;
12729 case OP_STOREI1_MEMBASE_REG:
12730 return OP_STOREI1_MEMBASE_IMM;
12731 case OP_STOREI2_MEMBASE_REG:
12732 return OP_STOREI2_MEMBASE_IMM;
12733 case OP_STOREI4_MEMBASE_REG:
12734 return OP_STOREI4_MEMBASE_IMM;
12735 case OP_STOREI8_MEMBASE_REG:
12736 return OP_STOREI8_MEMBASE_IMM;
12738 g_assert_not_reached ();
12745 mono_op_to_op_imm (int opcode)
12749 return OP_IADD_IMM;
12751 return OP_ISUB_IMM;
12753 return OP_IDIV_IMM;
12755 return OP_IDIV_UN_IMM;
12757 return OP_IREM_IMM;
12759 return OP_IREM_UN_IMM;
12761 return OP_IMUL_IMM;
12763 return OP_IAND_IMM;
12767 return OP_IXOR_IMM;
12769 return OP_ISHL_IMM;
12771 return OP_ISHR_IMM;
12773 return OP_ISHR_UN_IMM;
12776 return OP_LADD_IMM;
12778 return OP_LSUB_IMM;
12780 return OP_LAND_IMM;
12784 return OP_LXOR_IMM;
12786 return OP_LSHL_IMM;
12788 return OP_LSHR_IMM;
12790 return OP_LSHR_UN_IMM;
12791 #if SIZEOF_REGISTER == 8
12793 return OP_LREM_IMM;
12797 return OP_COMPARE_IMM;
12799 return OP_ICOMPARE_IMM;
12801 return OP_LCOMPARE_IMM;
12803 case OP_STORE_MEMBASE_REG:
12804 return OP_STORE_MEMBASE_IMM;
12805 case OP_STOREI1_MEMBASE_REG:
12806 return OP_STOREI1_MEMBASE_IMM;
12807 case OP_STOREI2_MEMBASE_REG:
12808 return OP_STOREI2_MEMBASE_IMM;
12809 case OP_STOREI4_MEMBASE_REG:
12810 return OP_STOREI4_MEMBASE_IMM;
12812 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12814 return OP_X86_PUSH_IMM;
12815 case OP_X86_COMPARE_MEMBASE_REG:
12816 return OP_X86_COMPARE_MEMBASE_IMM;
12818 #if defined(TARGET_AMD64)
12819 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12820 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12822 case OP_VOIDCALL_REG:
12823 return OP_VOIDCALL;
12831 return OP_LOCALLOC_IMM;
12838 ldind_to_load_membase (int opcode)
12842 return OP_LOADI1_MEMBASE;
12844 return OP_LOADU1_MEMBASE;
12846 return OP_LOADI2_MEMBASE;
12848 return OP_LOADU2_MEMBASE;
12850 return OP_LOADI4_MEMBASE;
12852 return OP_LOADU4_MEMBASE;
12854 return OP_LOAD_MEMBASE;
12855 case CEE_LDIND_REF:
12856 return OP_LOAD_MEMBASE;
12858 return OP_LOADI8_MEMBASE;
12860 return OP_LOADR4_MEMBASE;
12862 return OP_LOADR8_MEMBASE;
12864 g_assert_not_reached ();
12871 stind_to_store_membase (int opcode)
12875 return OP_STOREI1_MEMBASE_REG;
12877 return OP_STOREI2_MEMBASE_REG;
12879 return OP_STOREI4_MEMBASE_REG;
12881 case CEE_STIND_REF:
12882 return OP_STORE_MEMBASE_REG;
12884 return OP_STOREI8_MEMBASE_REG;
12886 return OP_STORER4_MEMBASE_REG;
12888 return OP_STORER8_MEMBASE_REG;
12890 g_assert_not_reached ();
12897 mono_load_membase_to_load_mem (int opcode)
12899 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12900 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12902 case OP_LOAD_MEMBASE:
12903 return OP_LOAD_MEM;
12904 case OP_LOADU1_MEMBASE:
12905 return OP_LOADU1_MEM;
12906 case OP_LOADU2_MEMBASE:
12907 return OP_LOADU2_MEM;
12908 case OP_LOADI4_MEMBASE:
12909 return OP_LOADI4_MEM;
12910 case OP_LOADU4_MEMBASE:
12911 return OP_LOADU4_MEM;
12912 #if SIZEOF_REGISTER == 8
12913 case OP_LOADI8_MEMBASE:
12914 return OP_LOADI8_MEM;
12923 op_to_op_dest_membase (int store_opcode, int opcode)
12925 #if defined(TARGET_X86)
12926 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12931 return OP_X86_ADD_MEMBASE_REG;
12933 return OP_X86_SUB_MEMBASE_REG;
12935 return OP_X86_AND_MEMBASE_REG;
12937 return OP_X86_OR_MEMBASE_REG;
12939 return OP_X86_XOR_MEMBASE_REG;
12942 return OP_X86_ADD_MEMBASE_IMM;
12945 return OP_X86_SUB_MEMBASE_IMM;
12948 return OP_X86_AND_MEMBASE_IMM;
12951 return OP_X86_OR_MEMBASE_IMM;
12954 return OP_X86_XOR_MEMBASE_IMM;
12960 #if defined(TARGET_AMD64)
12961 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12966 return OP_X86_ADD_MEMBASE_REG;
12968 return OP_X86_SUB_MEMBASE_REG;
12970 return OP_X86_AND_MEMBASE_REG;
12972 return OP_X86_OR_MEMBASE_REG;
12974 return OP_X86_XOR_MEMBASE_REG;
12976 return OP_X86_ADD_MEMBASE_IMM;
12978 return OP_X86_SUB_MEMBASE_IMM;
12980 return OP_X86_AND_MEMBASE_IMM;
12982 return OP_X86_OR_MEMBASE_IMM;
12984 return OP_X86_XOR_MEMBASE_IMM;
12986 return OP_AMD64_ADD_MEMBASE_REG;
12988 return OP_AMD64_SUB_MEMBASE_REG;
12990 return OP_AMD64_AND_MEMBASE_REG;
12992 return OP_AMD64_OR_MEMBASE_REG;
12994 return OP_AMD64_XOR_MEMBASE_REG;
12997 return OP_AMD64_ADD_MEMBASE_IMM;
13000 return OP_AMD64_SUB_MEMBASE_IMM;
13003 return OP_AMD64_AND_MEMBASE_IMM;
13006 return OP_AMD64_OR_MEMBASE_IMM;
13009 return OP_AMD64_XOR_MEMBASE_IMM;
13019 op_to_op_store_membase (int store_opcode, int opcode)
13021 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13024 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13025 return OP_X86_SETEQ_MEMBASE;
13027 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13028 return OP_X86_SETNE_MEMBASE;
13036 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13039 /* FIXME: This has sign extension issues */
13041 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13042 return OP_X86_COMPARE_MEMBASE8_IMM;
13045 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13050 return OP_X86_PUSH_MEMBASE;
13051 case OP_COMPARE_IMM:
13052 case OP_ICOMPARE_IMM:
13053 return OP_X86_COMPARE_MEMBASE_IMM;
13056 return OP_X86_COMPARE_MEMBASE_REG;
13060 #ifdef TARGET_AMD64
13061 /* FIXME: This has sign extension issues */
13063 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13064 return OP_X86_COMPARE_MEMBASE8_IMM;
13069 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13070 return OP_X86_PUSH_MEMBASE;
13072 /* FIXME: This only works for 32 bit immediates
13073 case OP_COMPARE_IMM:
13074 case OP_LCOMPARE_IMM:
13075 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13076 return OP_AMD64_COMPARE_MEMBASE_IMM;
13078 case OP_ICOMPARE_IMM:
13079 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13080 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13084 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13085 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13086 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13087 return OP_AMD64_COMPARE_MEMBASE_REG;
13090 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13091 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13100 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13103 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13109 return OP_X86_COMPARE_REG_MEMBASE;
13111 return OP_X86_ADD_REG_MEMBASE;
13113 return OP_X86_SUB_REG_MEMBASE;
13115 return OP_X86_AND_REG_MEMBASE;
13117 return OP_X86_OR_REG_MEMBASE;
13119 return OP_X86_XOR_REG_MEMBASE;
13123 #ifdef TARGET_AMD64
13124 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13127 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13129 return OP_X86_ADD_REG_MEMBASE;
13131 return OP_X86_SUB_REG_MEMBASE;
13133 return OP_X86_AND_REG_MEMBASE;
13135 return OP_X86_OR_REG_MEMBASE;
13137 return OP_X86_XOR_REG_MEMBASE;
13139 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13143 return OP_AMD64_COMPARE_REG_MEMBASE;
13145 return OP_AMD64_ADD_REG_MEMBASE;
13147 return OP_AMD64_SUB_REG_MEMBASE;
13149 return OP_AMD64_AND_REG_MEMBASE;
13151 return OP_AMD64_OR_REG_MEMBASE;
13153 return OP_AMD64_XOR_REG_MEMBASE;
13162 mono_op_to_op_imm_noemul (int opcode)
13165 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13171 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13178 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13183 return mono_op_to_op_imm (opcode);
13188 * mono_handle_global_vregs:
13190 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13194 mono_handle_global_vregs (MonoCompile *cfg)
13196 gint32 *vreg_to_bb;
13197 MonoBasicBlock *bb;
13200 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13202 #ifdef MONO_ARCH_SIMD_INTRINSICS
13203 if (cfg->uses_simd_intrinsics)
13204 mono_simd_simplify_indirection (cfg);
13207 /* Find local vregs used in more than one bb */
13208 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13209 MonoInst *ins = bb->code;
13210 int block_num = bb->block_num;
13212 if (cfg->verbose_level > 2)
13213 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13216 for (; ins; ins = ins->next) {
13217 const char *spec = INS_INFO (ins->opcode);
13218 int regtype = 0, regindex;
13221 if (G_UNLIKELY (cfg->verbose_level > 2))
13222 mono_print_ins (ins);
13224 g_assert (ins->opcode >= MONO_CEE_LAST);
13226 for (regindex = 0; regindex < 4; regindex ++) {
13229 if (regindex == 0) {
13230 regtype = spec [MONO_INST_DEST];
13231 if (regtype == ' ')
13234 } else if (regindex == 1) {
13235 regtype = spec [MONO_INST_SRC1];
13236 if (regtype == ' ')
13239 } else if (regindex == 2) {
13240 regtype = spec [MONO_INST_SRC2];
13241 if (regtype == ' ')
13244 } else if (regindex == 3) {
13245 regtype = spec [MONO_INST_SRC3];
13246 if (regtype == ' ')
13251 #if SIZEOF_REGISTER == 4
13252 /* In the LLVM case, the long opcodes are not decomposed */
13253 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13255 * Since some instructions reference the original long vreg,
13256 * and some reference the two component vregs, it is quite hard
13257 * to determine when it needs to be global. So be conservative.
13259 if (!get_vreg_to_inst (cfg, vreg)) {
13260 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13262 if (cfg->verbose_level > 2)
13263 printf ("LONG VREG R%d made global.\n", vreg);
13267 * Make the component vregs volatile since the optimizations can
13268 * get confused otherwise.
13270 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13271 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13275 g_assert (vreg != -1);
13277 prev_bb = vreg_to_bb [vreg];
13278 if (prev_bb == 0) {
13279 /* 0 is a valid block num */
13280 vreg_to_bb [vreg] = block_num + 1;
13281 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13282 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13285 if (!get_vreg_to_inst (cfg, vreg)) {
13286 if (G_UNLIKELY (cfg->verbose_level > 2))
13287 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13291 if (vreg_is_ref (cfg, vreg))
13292 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13294 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13297 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13300 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13304 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13307 g_assert_not_reached ();
13311 /* Flag as having been used in more than one bb */
13312 vreg_to_bb [vreg] = -1;
13318 /* If a variable is used in only one bblock, convert it into a local vreg */
13319 for (i = 0; i < cfg->num_varinfo; i++) {
13320 MonoInst *var = cfg->varinfo [i];
13321 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13323 switch (var->type) {
13329 #if SIZEOF_REGISTER == 8
13332 #if !defined(TARGET_X86)
13333 /* Enabling this screws up the fp stack on x86 */
13336 if (mono_arch_is_soft_float ())
13340 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13344 /* Arguments are implicitly global */
13345 /* Putting R4 vars into registers doesn't work currently */
13346 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13347 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13349 * Make that the variable's liveness interval doesn't contain a call, since
13350 * that would cause the lvreg to be spilled, making the whole optimization
13353 /* This is too slow for JIT compilation */
13355 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13357 int def_index, call_index, ins_index;
13358 gboolean spilled = FALSE;
13363 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13364 const char *spec = INS_INFO (ins->opcode);
13366 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13367 def_index = ins_index;
13369 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13370 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13371 if (call_index > def_index) {
13377 if (MONO_IS_CALL (ins))
13378 call_index = ins_index;
13388 if (G_UNLIKELY (cfg->verbose_level > 2))
13389 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13390 var->flags |= MONO_INST_IS_DEAD;
13391 cfg->vreg_to_inst [var->dreg] = NULL;
13398 * Compress the varinfo and vars tables so the liveness computation is faster and
13399 * takes up less space.
13402 for (i = 0; i < cfg->num_varinfo; ++i) {
13403 MonoInst *var = cfg->varinfo [i];
13404 if (pos < i && cfg->locals_start == i)
13405 cfg->locals_start = pos;
13406 if (!(var->flags & MONO_INST_IS_DEAD)) {
13408 cfg->varinfo [pos] = cfg->varinfo [i];
13409 cfg->varinfo [pos]->inst_c0 = pos;
13410 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13411 cfg->vars [pos].idx = pos;
13412 #if SIZEOF_REGISTER == 4
13413 if (cfg->varinfo [pos]->type == STACK_I8) {
13414 /* Modify the two component vars too */
13417 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13418 var1->inst_c0 = pos;
13419 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13420 var1->inst_c0 = pos;
13427 cfg->num_varinfo = pos;
13428 if (cfg->locals_start > cfg->num_varinfo)
13429 cfg->locals_start = cfg->num_varinfo;
13433 * mono_allocate_gsharedvt_vars:
13435 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13436 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13439 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13443 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13445 for (i = 0; i < cfg->num_varinfo; ++i) {
13446 MonoInst *ins = cfg->varinfo [i];
13449 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13450 if (i >= cfg->locals_start) {
13452 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13453 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13454 ins->opcode = OP_GSHAREDVT_LOCAL;
13455 ins->inst_imm = idx;
13458 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13459 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13466 * mono_spill_global_vars:
13468 * Generate spill code for variables which are not allocated to registers,
13469 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13470 * code is generated which could be optimized by the local optimization passes.
13473 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13475 MonoBasicBlock *bb;
13477 int orig_next_vreg;
13478 guint32 *vreg_to_lvreg;
13480 guint32 i, lvregs_len, lvregs_size;
13481 gboolean dest_has_lvreg = FALSE;
13482 MonoStackType stacktypes [128];
13483 MonoInst **live_range_start, **live_range_end;
13484 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13486 *need_local_opts = FALSE;
13488 memset (spec2, 0, sizeof (spec2));
13490 /* FIXME: Move this function to mini.c */
13491 stacktypes ['i'] = STACK_PTR;
13492 stacktypes ['l'] = STACK_I8;
13493 stacktypes ['f'] = STACK_R8;
13494 #ifdef MONO_ARCH_SIMD_INTRINSICS
13495 stacktypes ['x'] = STACK_VTYPE;
13498 #if SIZEOF_REGISTER == 4
13499 /* Create MonoInsts for longs */
13500 for (i = 0; i < cfg->num_varinfo; i++) {
13501 MonoInst *ins = cfg->varinfo [i];
13503 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13504 switch (ins->type) {
13509 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13512 g_assert (ins->opcode == OP_REGOFFSET);
13514 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13516 tree->opcode = OP_REGOFFSET;
13517 tree->inst_basereg = ins->inst_basereg;
13518 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13520 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13522 tree->opcode = OP_REGOFFSET;
13523 tree->inst_basereg = ins->inst_basereg;
13524 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13534 if (cfg->compute_gc_maps) {
13535 /* registers need liveness info even for !non refs */
13536 for (i = 0; i < cfg->num_varinfo; i++) {
13537 MonoInst *ins = cfg->varinfo [i];
13539 if (ins->opcode == OP_REGVAR)
13540 ins->flags |= MONO_INST_GC_TRACK;
13544 /* FIXME: widening and truncation */
13547 * As an optimization, when a variable allocated to the stack is first loaded into
13548 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13549 * the variable again.
13551 orig_next_vreg = cfg->next_vreg;
13552 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13553 lvregs_size = 1024;
13554 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13558 * These arrays contain the first and last instructions accessing a given
13560 * Since we emit bblocks in the same order we process them here, and we
13561 * don't split live ranges, these will precisely describe the live range of
13562 * the variable, i.e. the instruction range where a valid value can be found
13563 * in the variables location.
13564 * The live range is computed using the liveness info computed by the liveness pass.
13565 * We can't use vmv->range, since that is an abstract live range, and we need
13566 * one which is instruction precise.
13567 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13569 /* FIXME: Only do this if debugging info is requested */
13570 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13571 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13572 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13573 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13575 /* Add spill loads/stores */
13576 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13579 if (cfg->verbose_level > 2)
13580 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13582 /* Clear vreg_to_lvreg array */
13583 for (i = 0; i < lvregs_len; i++)
13584 vreg_to_lvreg [lvregs [i]] = 0;
13588 MONO_BB_FOR_EACH_INS (bb, ins) {
13589 const char *spec = INS_INFO (ins->opcode);
13590 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13591 gboolean store, no_lvreg;
13592 int sregs [MONO_MAX_SRC_REGS];
13594 if (G_UNLIKELY (cfg->verbose_level > 2))
13595 mono_print_ins (ins);
13597 if (ins->opcode == OP_NOP)
13601 * We handle LDADDR here as well, since it can only be decomposed
13602 * when variable addresses are known.
13604 if (ins->opcode == OP_LDADDR) {
13605 MonoInst *var = (MonoInst *)ins->inst_p0;
13607 if (var->opcode == OP_VTARG_ADDR) {
13608 /* Happens on SPARC/S390 where vtypes are passed by reference */
13609 MonoInst *vtaddr = var->inst_left;
13610 if (vtaddr->opcode == OP_REGVAR) {
13611 ins->opcode = OP_MOVE;
13612 ins->sreg1 = vtaddr->dreg;
13614 else if (var->inst_left->opcode == OP_REGOFFSET) {
13615 ins->opcode = OP_LOAD_MEMBASE;
13616 ins->inst_basereg = vtaddr->inst_basereg;
13617 ins->inst_offset = vtaddr->inst_offset;
13620 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13621 /* gsharedvt arg passed by ref */
13622 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13624 ins->opcode = OP_LOAD_MEMBASE;
13625 ins->inst_basereg = var->inst_basereg;
13626 ins->inst_offset = var->inst_offset;
13627 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13628 MonoInst *load, *load2, *load3;
13629 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13630 int reg1, reg2, reg3;
13631 MonoInst *info_var = cfg->gsharedvt_info_var;
13632 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13636 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13639 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13641 g_assert (info_var);
13642 g_assert (locals_var);
13644 /* Mark the instruction used to compute the locals var as used */
13645 cfg->gsharedvt_locals_var_ins = NULL;
13647 /* Load the offset */
13648 if (info_var->opcode == OP_REGOFFSET) {
13649 reg1 = alloc_ireg (cfg);
13650 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13651 } else if (info_var->opcode == OP_REGVAR) {
13653 reg1 = info_var->dreg;
13655 g_assert_not_reached ();
13657 reg2 = alloc_ireg (cfg);
13658 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13659 /* Load the locals area address */
13660 reg3 = alloc_ireg (cfg);
13661 if (locals_var->opcode == OP_REGOFFSET) {
13662 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13663 } else if (locals_var->opcode == OP_REGVAR) {
13664 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13666 g_assert_not_reached ();
13668 /* Compute the address */
13669 ins->opcode = OP_PADD;
13673 mono_bblock_insert_before_ins (bb, ins, load3);
13674 mono_bblock_insert_before_ins (bb, load3, load2);
13676 mono_bblock_insert_before_ins (bb, load2, load);
13678 g_assert (var->opcode == OP_REGOFFSET);
13680 ins->opcode = OP_ADD_IMM;
13681 ins->sreg1 = var->inst_basereg;
13682 ins->inst_imm = var->inst_offset;
13685 *need_local_opts = TRUE;
13686 spec = INS_INFO (ins->opcode);
13689 if (ins->opcode < MONO_CEE_LAST) {
13690 mono_print_ins (ins);
13691 g_assert_not_reached ();
13695 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13699 if (MONO_IS_STORE_MEMBASE (ins)) {
13700 tmp_reg = ins->dreg;
13701 ins->dreg = ins->sreg2;
13702 ins->sreg2 = tmp_reg;
13705 spec2 [MONO_INST_DEST] = ' ';
13706 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13707 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13708 spec2 [MONO_INST_SRC3] = ' ';
13710 } else if (MONO_IS_STORE_MEMINDEX (ins))
13711 g_assert_not_reached ();
13716 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13717 printf ("\t %.3s %d", spec, ins->dreg);
13718 num_sregs = mono_inst_get_src_registers (ins, sregs);
13719 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13720 printf (" %d", sregs [srcindex]);
13727 regtype = spec [MONO_INST_DEST];
13728 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13731 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13732 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13733 MonoInst *store_ins;
13735 MonoInst *def_ins = ins;
13736 int dreg = ins->dreg; /* The original vreg */
13738 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13740 if (var->opcode == OP_REGVAR) {
13741 ins->dreg = var->dreg;
13742 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13744 * Instead of emitting a load+store, use a _membase opcode.
13746 g_assert (var->opcode == OP_REGOFFSET);
13747 if (ins->opcode == OP_MOVE) {
13751 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13752 ins->inst_basereg = var->inst_basereg;
13753 ins->inst_offset = var->inst_offset;
13756 spec = INS_INFO (ins->opcode);
13760 g_assert (var->opcode == OP_REGOFFSET);
13762 prev_dreg = ins->dreg;
13764 /* Invalidate any previous lvreg for this vreg */
13765 vreg_to_lvreg [ins->dreg] = 0;
13769 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13771 store_opcode = OP_STOREI8_MEMBASE_REG;
13774 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13776 #if SIZEOF_REGISTER != 8
13777 if (regtype == 'l') {
13778 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13779 mono_bblock_insert_after_ins (bb, ins, store_ins);
13780 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13781 mono_bblock_insert_after_ins (bb, ins, store_ins);
13782 def_ins = store_ins;
13787 g_assert (store_opcode != OP_STOREV_MEMBASE);
13789 /* Try to fuse the store into the instruction itself */
13790 /* FIXME: Add more instructions */
13791 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13792 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13793 ins->inst_imm = ins->inst_c0;
13794 ins->inst_destbasereg = var->inst_basereg;
13795 ins->inst_offset = var->inst_offset;
13796 spec = INS_INFO (ins->opcode);
13797 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13798 ins->opcode = store_opcode;
13799 ins->inst_destbasereg = var->inst_basereg;
13800 ins->inst_offset = var->inst_offset;
13804 tmp_reg = ins->dreg;
13805 ins->dreg = ins->sreg2;
13806 ins->sreg2 = tmp_reg;
13809 spec2 [MONO_INST_DEST] = ' ';
13810 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13811 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13812 spec2 [MONO_INST_SRC3] = ' ';
13814 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13815 // FIXME: The backends expect the base reg to be in inst_basereg
13816 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13818 ins->inst_basereg = var->inst_basereg;
13819 ins->inst_offset = var->inst_offset;
13820 spec = INS_INFO (ins->opcode);
13822 /* printf ("INS: "); mono_print_ins (ins); */
13823 /* Create a store instruction */
13824 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13826 /* Insert it after the instruction */
13827 mono_bblock_insert_after_ins (bb, ins, store_ins);
13829 def_ins = store_ins;
13832 * We can't assign ins->dreg to var->dreg here, since the
13833 * sregs could use it. So set a flag, and do it after
13836 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13837 dest_has_lvreg = TRUE;
13842 if (def_ins && !live_range_start [dreg]) {
13843 live_range_start [dreg] = def_ins;
13844 live_range_start_bb [dreg] = bb;
13847 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13850 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13851 tmp->inst_c1 = dreg;
13852 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13859 num_sregs = mono_inst_get_src_registers (ins, sregs);
13860 for (srcindex = 0; srcindex < 3; ++srcindex) {
13861 regtype = spec [MONO_INST_SRC1 + srcindex];
13862 sreg = sregs [srcindex];
13864 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13865 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13866 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13867 MonoInst *use_ins = ins;
13868 MonoInst *load_ins;
13869 guint32 load_opcode;
13871 if (var->opcode == OP_REGVAR) {
13872 sregs [srcindex] = var->dreg;
13873 //mono_inst_set_src_registers (ins, sregs);
13874 live_range_end [sreg] = use_ins;
13875 live_range_end_bb [sreg] = bb;
13877 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13880 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13881 /* var->dreg is a hreg */
13882 tmp->inst_c1 = sreg;
13883 mono_bblock_insert_after_ins (bb, ins, tmp);
13889 g_assert (var->opcode == OP_REGOFFSET);
13891 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13893 g_assert (load_opcode != OP_LOADV_MEMBASE);
13895 if (vreg_to_lvreg [sreg]) {
13896 g_assert (vreg_to_lvreg [sreg] != -1);
13898 /* The variable is already loaded to an lvreg */
13899 if (G_UNLIKELY (cfg->verbose_level > 2))
13900 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13901 sregs [srcindex] = vreg_to_lvreg [sreg];
13902 //mono_inst_set_src_registers (ins, sregs);
13906 /* Try to fuse the load into the instruction */
13907 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
13908 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
13909 sregs [0] = var->inst_basereg;
13910 //mono_inst_set_src_registers (ins, sregs);
13911 ins->inst_offset = var->inst_offset;
13912 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
13913 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
13914 sregs [1] = var->inst_basereg;
13915 //mono_inst_set_src_registers (ins, sregs);
13916 ins->inst_offset = var->inst_offset;
13918 if (MONO_IS_REAL_MOVE (ins)) {
13919 ins->opcode = OP_NOP;
13922 //printf ("%d ", srcindex); mono_print_ins (ins);
13924 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13926 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13927 if (var->dreg == prev_dreg) {
13929 * sreg refers to the value loaded by the load
13930 * emitted below, but we need to use ins->dreg
13931 * since it refers to the store emitted earlier.
13935 g_assert (sreg != -1);
13936 vreg_to_lvreg [var->dreg] = sreg;
13937 if (lvregs_len >= lvregs_size) {
13938 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
13939 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
13940 lvregs = new_lvregs;
13943 lvregs [lvregs_len ++] = var->dreg;
13947 sregs [srcindex] = sreg;
13948 //mono_inst_set_src_registers (ins, sregs);
13950 #if SIZEOF_REGISTER != 8
13951 if (regtype == 'l') {
13952 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13953 mono_bblock_insert_before_ins (bb, ins, load_ins);
13954 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13955 mono_bblock_insert_before_ins (bb, ins, load_ins);
13956 use_ins = load_ins;
13961 #if SIZEOF_REGISTER == 4
13962 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13964 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13965 mono_bblock_insert_before_ins (bb, ins, load_ins);
13966 use_ins = load_ins;
13970 if (var->dreg < orig_next_vreg) {
13971 live_range_end [var->dreg] = use_ins;
13972 live_range_end_bb [var->dreg] = bb;
13975 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13978 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13979 tmp->inst_c1 = var->dreg;
13980 mono_bblock_insert_after_ins (bb, ins, tmp);
13984 mono_inst_set_src_registers (ins, sregs);
13986 if (dest_has_lvreg) {
13987 g_assert (ins->dreg != -1);
13988 vreg_to_lvreg [prev_dreg] = ins->dreg;
13989 if (lvregs_len >= lvregs_size) {
13990 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
13991 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
13992 lvregs = new_lvregs;
13995 lvregs [lvregs_len ++] = prev_dreg;
13996 dest_has_lvreg = FALSE;
14000 tmp_reg = ins->dreg;
14001 ins->dreg = ins->sreg2;
14002 ins->sreg2 = tmp_reg;
14005 if (MONO_IS_CALL (ins)) {
14006 /* Clear vreg_to_lvreg array */
14007 for (i = 0; i < lvregs_len; i++)
14008 vreg_to_lvreg [lvregs [i]] = 0;
14010 } else if (ins->opcode == OP_NOP) {
14012 MONO_INST_NULLIFY_SREGS (ins);
14015 if (cfg->verbose_level > 2)
14016 mono_print_ins_index (1, ins);
14019 /* Extend the live range based on the liveness info */
14020 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14021 for (i = 0; i < cfg->num_varinfo; i ++) {
14022 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14024 if (vreg_is_volatile (cfg, vi->vreg))
14025 /* The liveness info is incomplete */
14028 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14029 /* Live from at least the first ins of this bb */
14030 live_range_start [vi->vreg] = bb->code;
14031 live_range_start_bb [vi->vreg] = bb;
14034 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14035 /* Live at least until the last ins of this bb */
14036 live_range_end [vi->vreg] = bb->last_ins;
14037 live_range_end_bb [vi->vreg] = bb;
14044 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14045 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14047 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14048 for (i = 0; i < cfg->num_varinfo; ++i) {
14049 int vreg = MONO_VARINFO (cfg, i)->vreg;
14052 if (live_range_start [vreg]) {
14053 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14055 ins->inst_c1 = vreg;
14056 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14058 if (live_range_end [vreg]) {
14059 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14061 ins->inst_c1 = vreg;
14062 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14063 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14065 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14070 if (cfg->gsharedvt_locals_var_ins) {
14071 /* Nullify if unused */
14072 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14073 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14076 g_free (live_range_start);
14077 g_free (live_range_end);
14078 g_free (live_range_start_bb);
14079 g_free (live_range_end_bb);
14085 * - use 'iadd' instead of 'int_add'
14086 * - handling ovf opcodes: decompose in method_to_ir.
14087 * - unify iregs/fregs
14088 * -> partly done, the missing parts are:
14089 * - a more complete unification would involve unifying the hregs as well, so
14090 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14091 * would no longer map to the machine hregs, so the code generators would need to
14092 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14093 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14094 * fp/non-fp branches speeds it up by about 15%.
14095 * - use sext/zext opcodes instead of shifts
14097 * - get rid of TEMPLOADs if possible and use vregs instead
14098 * - clean up usage of OP_P/OP_ opcodes
14099 * - cleanup usage of DUMMY_USE
14100 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14102 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14103 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14104 * - make sure handle_stack_args () is called before the branch is emitted
14105 * - when the new IR is done, get rid of all unused stuff
14106 * - COMPARE/BEQ as separate instructions or unify them ?
14107 * - keeping them separate allows specialized compare instructions like
14108 * compare_imm, compare_membase
14109 * - most back ends unify fp compare+branch, fp compare+ceq
14110 * - integrate mono_save_args into inline_method
14111 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14112 * - handle long shift opts on 32 bit platforms somehow: they require
14113 * 3 sregs (2 for arg1 and 1 for arg2)
14114 * - make byref a 'normal' type.
14115 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14116 * variable if needed.
14117 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14118 * like inline_method.
14119 * - remove inlining restrictions
14120 * - fix LNEG and enable cfold of INEG
14121 * - generalize x86 optimizations like ldelema as a peephole optimization
14122 * - add store_mem_imm for amd64
14123 * - optimize the loading of the interruption flag in the managed->native wrappers
14124 * - avoid special handling of OP_NOP in passes
14125 * - move code inserting instructions into one function/macro.
14126 * - try a coalescing phase after liveness analysis
14127 * - add float -> vreg conversion + local optimizations on !x86
14128 * - figure out how to handle decomposed branches during optimizations, ie.
14129 * compare+branch, op_jump_table+op_br etc.
14130 * - promote RuntimeXHandles to vregs
14131 * - vtype cleanups:
14132 * - add a NEW_VARLOADA_VREG macro
14133 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14134 * accessing vtype fields.
14135 * - get rid of I8CONST on 64 bit platforms
14136 * - dealing with the increase in code size due to branches created during opcode
14138 * - use extended basic blocks
14139 * - all parts of the JIT
14140 * - handle_global_vregs () && local regalloc
14141 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14142 * - sources of increase in code size:
14145 * - isinst and castclass
14146 * - lvregs not allocated to global registers even if used multiple times
14147 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14149 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14150 * - add all micro optimizations from the old JIT
14151 * - put tree optimizations into the deadce pass
14152 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14153 * specific function.
14154 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14155 * fcompare + branchCC.
14156 * - create a helper function for allocating a stack slot, taking into account
14157 * MONO_CFG_HAS_SPILLUP.
14159 * - optimize mono_regstate2_alloc_int/float.
14160 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14161 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14162 * parts of the tree could be separated by other instructions, killing the tree
14163 * arguments, or stores killing loads etc. Also, should we fold loads into other
14164 * instructions if the result of the load is used multiple times ?
14165 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14166 * - LAST MERGE: 108395.
14167 * - when returning vtypes in registers, generate IR and append it to the end of the
14168 * last bb instead of doing it in the epilog.
14169 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14177 - When to decompose opcodes:
14178 - earlier: this makes some optimizations hard to implement, since the low level IR
14179 no longer contains the neccessary information. But it is easier to do.
14180 - later: harder to implement, enables more optimizations.
14181 - Branches inside bblocks:
14182 - created when decomposing complex opcodes.
14183 - branches to another bblock: harmless, but not tracked by the branch
14184 optimizations, so need to branch to a label at the start of the bblock.
14185 - branches to inside the same bblock: very problematic, trips up the local
14186 reg allocator. Can be fixed by spitting the current bblock, but that is a
14187 complex operation, since some local vregs can become global vregs etc.
14188 - Local/global vregs:
14189 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14190 local register allocator.
14191 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14192 structure, created by mono_create_var (). Assigned to hregs or the stack by
14193 the global register allocator.
14194 - When to do optimizations like alu->alu_imm:
14195 - earlier -> saves work later on since the IR will be smaller/simpler
14196 - later -> can work on more instructions
14197 - Handling of valuetypes:
14198 - When a vtype is pushed on the stack, a new temporary is created, an
14199 instruction computing its address (LDADDR) is emitted and pushed on
14200 the stack. Need to optimize cases when the vtype is used immediately as in
14201 argument passing, stloc etc.
14202 - Instead of the to_end stuff in the old JIT, simply call the function handling
14203 the values on the stack before emitting the last instruction of the bb.
14206 #endif /* !DISABLE_JIT */