3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1165 ldind_to_type (int op)
1168 case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1169 case CEE_LDIND_U1: return mono_defaults.byte_class;
1170 case CEE_LDIND_I2: return mono_defaults.int16_class;
1171 case CEE_LDIND_U2: return mono_defaults.uint16_class;
1172 case CEE_LDIND_I4: return mono_defaults.int32_class;
1173 case CEE_LDIND_U4: return mono_defaults.uint32_class;
1174 case CEE_LDIND_I8: return mono_defaults.int64_class;
1175 case CEE_LDIND_I: return mono_defaults.int_class;
1176 case CEE_LDIND_R4: return mono_defaults.single_class;
1177 case CEE_LDIND_R8: return mono_defaults.double_class;
1178 case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1179 default: g_error ("Unknown ldind type %d", op);
1186 param_table [STACK_MAX] [STACK_MAX] = {
1191 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1196 switch (args->type) {
1206 for (i = 0; i < sig->param_count; ++i) {
1207 switch (args [i].type) {
1211 if (!sig->params [i]->byref)
1215 if (sig->params [i]->byref)
1217 switch (sig->params [i]->type) {
1218 case MONO_TYPE_CLASS:
1219 case MONO_TYPE_STRING:
1220 case MONO_TYPE_OBJECT:
1221 case MONO_TYPE_SZARRAY:
1222 case MONO_TYPE_ARRAY:
1229 if (sig->params [i]->byref)
1231 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1240 /*if (!param_table [args [i].type] [sig->params [i]->type])
1248 * When we need a pointer to the current domain many times in a method, we
1249 * call mono_domain_get() once and we store the result in a local variable.
1250 * This function returns the variable that represents the MonoDomain*.
1252 inline static MonoInst *
1253 mono_get_domainvar (MonoCompile *cfg)
1255 if (!cfg->domainvar)
1256 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 return cfg->domainvar;
1261 * The got_var contains the address of the Global Offset Table when AOT
1265 mono_get_got_var (MonoCompile *cfg)
1267 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1269 if (!cfg->got_var) {
1270 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 return cfg->got_var;
1276 mono_create_rgctx_var (MonoCompile *cfg)
1278 if (!cfg->rgctx_var) {
1279 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1280 /* force the var to be stack allocated */
1281 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1286 mono_get_vtable_var (MonoCompile *cfg)
1288 g_assert (cfg->gshared);
1290 mono_create_rgctx_var (cfg);
1292 return cfg->rgctx_var;
1296 type_from_stack_type (MonoInst *ins) {
1297 switch (ins->type) {
1298 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1299 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1300 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1301 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1302 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1304 return &ins->klass->this_arg;
1305 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1306 case STACK_VTYPE: return &ins->klass->byval_arg;
1308 g_error ("stack type %d to monotype not handled\n", ins->type);
1313 static G_GNUC_UNUSED int
1314 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1316 t = mono_type_get_underlying_type (t);
1328 case MONO_TYPE_FNPTR:
1330 case MONO_TYPE_CLASS:
1331 case MONO_TYPE_STRING:
1332 case MONO_TYPE_OBJECT:
1333 case MONO_TYPE_SZARRAY:
1334 case MONO_TYPE_ARRAY:
1340 return cfg->r4_stack_type;
1343 case MONO_TYPE_VALUETYPE:
1344 case MONO_TYPE_TYPEDBYREF:
1346 case MONO_TYPE_GENERICINST:
1347 if (mono_type_generic_inst_is_valuetype (t))
1353 g_assert_not_reached ();
1360 array_access_to_klass (int opcode)
1364 return mono_defaults.byte_class;
1366 return mono_defaults.uint16_class;
1369 return mono_defaults.int_class;
1372 return mono_defaults.sbyte_class;
1375 return mono_defaults.int16_class;
1378 return mono_defaults.int32_class;
1380 return mono_defaults.uint32_class;
1383 return mono_defaults.int64_class;
1386 return mono_defaults.single_class;
1389 return mono_defaults.double_class;
1390 case CEE_LDELEM_REF:
1391 case CEE_STELEM_REF:
1392 return mono_defaults.object_class;
1394 g_assert_not_reached ();
1400 * We try to share variables when possible
1403 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1408 /* inlining can result in deeper stacks */
1409 if (slot >= cfg->header->max_stack)
1410 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 pos = ins->type - 1 + slot * STACK_MAX;
1414 switch (ins->type) {
1421 if ((vnum = cfg->intvars [pos]))
1422 return cfg->varinfo [vnum];
1423 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1424 cfg->intvars [pos] = res->inst_c0;
1427 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1433 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 * Don't use this if a generic_context is set, since that means AOT can't
1437 * look up the method using just the image+token.
1438 * table == 0 means this is a reference made from a wrapper.
1440 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1441 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1442 jump_info_token->image = image;
1443 jump_info_token->token = token;
1444 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1449 * This function is called to handle items that are left on the evaluation stack
1450 * at basic block boundaries. What happens is that we save the values to local variables
1451 * and we reload them later when first entering the target basic block (with the
1452 * handle_loaded_temps () function).
1453 * A single joint point will use the same variables (stored in the array bb->out_stack or
1454 * bb->in_stack, if the basic block is before or after the joint point).
1456 * This function needs to be called _before_ emitting the last instruction of
1457 * the bb (i.e. before emitting a branch).
1458 * If the stack merge fails at a join point, cfg->unverifiable is set.
1461 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 MonoBasicBlock *bb = cfg->cbb;
1465 MonoBasicBlock *outb;
1466 MonoInst *inst, **locals;
1471 if (cfg->verbose_level > 3)
1472 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1473 if (!bb->out_scount) {
1474 bb->out_scount = count;
1475 //printf ("bblock %d has out:", bb->block_num);
1477 for (i = 0; i < bb->out_count; ++i) {
1478 outb = bb->out_bb [i];
1479 /* exception handlers are linked, but they should not be considered for stack args */
1480 if (outb->flags & BB_EXCEPTION_HANDLER)
1482 //printf (" %d", outb->block_num);
1483 if (outb->in_stack) {
1485 bb->out_stack = outb->in_stack;
1491 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1492 for (i = 0; i < count; ++i) {
1494 * try to reuse temps already allocated for this purpouse, if they occupy the same
1495 * stack slot and if they are of the same type.
1496 * This won't cause conflicts since if 'local' is used to
1497 * store one of the values in the in_stack of a bblock, then
1498 * the same variable will be used for the same outgoing stack
1500 * This doesn't work when inlining methods, since the bblocks
1501 * in the inlined methods do not inherit their in_stack from
1502 * the bblock they are inlined to. See bug #58863 for an
1505 if (cfg->inlined_method)
1506 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1508 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1513 for (i = 0; i < bb->out_count; ++i) {
1514 outb = bb->out_bb [i];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER)
1518 if (outb->in_scount) {
1519 if (outb->in_scount != bb->out_scount) {
1520 cfg->unverifiable = TRUE;
1523 continue; /* check they are the same locals */
1525 outb->in_scount = count;
1526 outb->in_stack = bb->out_stack;
1529 locals = bb->out_stack;
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1540 * It is possible that the out bblocks already have in_stack assigned, and
1541 * the in_stacks differ. In this case, we will store to all the different
1548 /* Find a bblock which has a different in_stack */
1550 while (bindex < bb->out_count) {
1551 outb = bb->out_bb [bindex];
1552 /* exception handlers are linked, but they should not be considered for stack args */
1553 if (outb->flags & BB_EXCEPTION_HANDLER) {
1557 if (outb->in_stack != locals) {
1558 for (i = 0; i < count; ++i) {
1559 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1560 inst->cil_code = sp [i]->cil_code;
1561 sp [i] = locals [i];
1562 if (cfg->verbose_level > 3)
1563 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1565 locals = outb->in_stack;
1575 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1579 if (cfg->compile_aot) {
1580 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1586 ji.type = patch_type;
1587 ji.data.target = data;
1588 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1589 mono_error_assert_ok (&error);
1591 EMIT_NEW_PCONST (cfg, ins, target);
1597 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1599 int tls_offset = mono_tls_get_tls_offset (key);
1601 if (cfg->compile_aot)
1604 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1606 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1607 ins->dreg = mono_alloc_preg (cfg);
1608 ins->inst_offset = tls_offset;
1615 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1617 int tls_offset = mono_tls_get_tls_offset (key);
1619 if (cfg->compile_aot)
1622 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1624 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1625 ins->sreg1 = value->dreg;
1626 ins->inst_offset = tls_offset;
1634 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1636 MonoInst *fast_tls = NULL;
1638 if (!mini_get_debug_options ()->use_fallback_tls)
1639 fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 MONO_ADD_INS (cfg->cbb, fast_tls);
1646 if (cfg->compile_aot) {
1649 * tls getters are critical pieces of code and we don't want to resolve them
1650 * through the standard plt/tramp mechanism since we might expose ourselves
1651 * to crashes and infinite recursions.
1653 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1654 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1656 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1657 return mono_emit_jit_icall (cfg, getter, NULL);
1662 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1664 MonoInst *fast_tls = NULL;
1666 if (!mini_get_debug_options ()->use_fallback_tls)
1667 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 MONO_ADD_INS (cfg->cbb, fast_tls);
1674 if (cfg->compile_aot) {
1676 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1677 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1679 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1680 return mono_emit_jit_icall (cfg, setter, &value);
1687 * Emit IR to push the current LMF onto the LMF stack.
1690 emit_push_lmf (MonoCompile *cfg)
1693 * Emit IR to push the LMF:
1694 * lmf_addr = <lmf_addr from tls>
1695 * lmf->lmf_addr = lmf_addr
1696 * lmf->prev_lmf = *lmf_addr
1699 MonoInst *ins, *lmf_ins;
1704 int lmf_reg, prev_lmf_reg;
1706 * Store lmf_addr in a variable, so it can be allocated to a global register.
1708 if (!cfg->lmf_addr_var)
1709 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1714 int jit_tls_dreg = ins->dreg;
1716 lmf_reg = alloc_preg (cfg);
1717 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1719 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1724 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1725 lmf_reg = ins->dreg;
1727 prev_lmf_reg = alloc_preg (cfg);
1728 /* Save previous_lmf */
1729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1730 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1738 * Emit IR to pop the current LMF from the LMF stack.
1741 emit_pop_lmf (MonoCompile *cfg)
1743 int lmf_reg, lmf_addr_reg;
1749 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1750 lmf_reg = ins->dreg;
1754 * Emit IR to pop the LMF:
1755 * *(lmf->lmf_addr) = lmf->prev_lmf
1757 /* This could be called before emit_push_lmf () */
1758 if (!cfg->lmf_addr_var)
1759 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1760 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1762 prev_lmf_reg = alloc_preg (cfg);
1763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1764 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1768 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1771 type = mini_get_underlying_type (type);
1772 switch (type->type) {
1773 case MONO_TYPE_VOID:
1774 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1781 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1785 case MONO_TYPE_FNPTR:
1786 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1787 case MONO_TYPE_CLASS:
1788 case MONO_TYPE_STRING:
1789 case MONO_TYPE_OBJECT:
1790 case MONO_TYPE_SZARRAY:
1791 case MONO_TYPE_ARRAY:
1792 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1795 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1798 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1800 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = mono_class_enum_basetype (type->data.klass);
1808 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 case MONO_TYPE_MVAR:
1817 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1819 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1824 //XXX this ignores if t is byref
1825 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1828 * target_type_is_incompatible:
1829 * @cfg: MonoCompile context
1831 * Check that the item @arg on the evaluation stack can be stored
1832 * in the target type (can be a local, or field, etc).
1833 * The cfg arg can be used to check if we need verification or just
1836 * Returns: non-0 value if arg can't be stored on a target.
1839 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1841 MonoType *simple_type;
1844 if (target->byref) {
1845 /* FIXME: check that the pointed to types match */
1846 if (arg->type == STACK_MP) {
1847 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1848 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1849 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1851 /* if the target is native int& or same type */
1852 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1855 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1856 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1857 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1861 if (arg->type == STACK_PTR)
1866 simple_type = mini_get_underlying_type (target);
1867 switch (simple_type->type) {
1868 case MONO_TYPE_VOID:
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1880 /* STACK_MP is needed when setting pinned locals */
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1886 case MONO_TYPE_FNPTR:
1888 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1889 * in native int. (#688008).
1891 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1894 case MONO_TYPE_CLASS:
1895 case MONO_TYPE_STRING:
1896 case MONO_TYPE_OBJECT:
1897 case MONO_TYPE_SZARRAY:
1898 case MONO_TYPE_ARRAY:
1899 if (arg->type != STACK_OBJ)
1901 /* FIXME: check type compatibility */
1905 if (arg->type != STACK_I8)
1909 if (arg->type != cfg->r4_stack_type)
1913 if (arg->type != STACK_R8)
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 MonoClass *target_class;
1933 if (arg->type != STACK_VTYPE)
1935 klass = mono_class_from_mono_type (simple_type);
1936 target_class = mono_class_from_mono_type (target);
1937 /* The second cases is needed when doing partial sharing */
1938 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1942 if (arg->type != STACK_OBJ)
1944 /* FIXME: check type compatibility */
1948 case MONO_TYPE_MVAR:
1949 g_assert (cfg->gshared);
1950 if (mini_type_var_is_vt (simple_type)) {
1951 if (arg->type != STACK_VTYPE)
1954 if (arg->type != STACK_OBJ)
1959 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1965 * Prepare arguments for passing to a function call.
1966 * Return a non-zero value if the arguments can't be passed to the given
1968 * The type checks are not yet complete and some conversions may need
1969 * casts on 32 or 64 bit architectures.
1971 * FIXME: implement this using target_type_is_incompatible ()
1974 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1976 MonoType *simple_type;
1980 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1984 for (i = 0; i < sig->param_count; ++i) {
1985 if (sig->params [i]->byref) {
1986 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1990 simple_type = mini_get_underlying_type (sig->params [i]);
1992 switch (simple_type->type) {
1993 case MONO_TYPE_VOID:
2002 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2008 case MONO_TYPE_FNPTR:
2009 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2012 case MONO_TYPE_CLASS:
2013 case MONO_TYPE_STRING:
2014 case MONO_TYPE_OBJECT:
2015 case MONO_TYPE_SZARRAY:
2016 case MONO_TYPE_ARRAY:
2017 if (args [i]->type != STACK_OBJ)
2022 if (args [i]->type != STACK_I8)
2026 if (args [i]->type != cfg->r4_stack_type)
2030 if (args [i]->type != STACK_R8)
2033 case MONO_TYPE_VALUETYPE:
2034 if (simple_type->data.klass->enumtype) {
2035 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2038 if (args [i]->type != STACK_VTYPE)
2041 case MONO_TYPE_TYPEDBYREF:
2042 if (args [i]->type != STACK_VTYPE)
2045 case MONO_TYPE_GENERICINST:
2046 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2049 case MONO_TYPE_MVAR:
2051 if (args [i]->type != STACK_VTYPE)
2055 g_error ("unknown type 0x%02x in check_call_signature",
2063 callvirt_to_call (int opcode)
2066 case OP_CALL_MEMBASE:
2068 case OP_VOIDCALL_MEMBASE:
2070 case OP_FCALL_MEMBASE:
2072 case OP_RCALL_MEMBASE:
2074 case OP_VCALL_MEMBASE:
2076 case OP_LCALL_MEMBASE:
2079 g_assert_not_reached ();
2086 callvirt_to_call_reg (int opcode)
2089 case OP_CALL_MEMBASE:
2091 case OP_VOIDCALL_MEMBASE:
2092 return OP_VOIDCALL_REG;
2093 case OP_FCALL_MEMBASE:
2094 return OP_FCALL_REG;
2095 case OP_RCALL_MEMBASE:
2096 return OP_RCALL_REG;
2097 case OP_VCALL_MEMBASE:
2098 return OP_VCALL_REG;
2099 case OP_LCALL_MEMBASE:
2100 return OP_LCALL_REG;
2102 g_assert_not_reached ();
2108 /* Either METHOD or IMT_ARG needs to be set */
2110 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2114 if (COMPILE_LLVM (cfg)) {
2116 method_reg = alloc_preg (cfg);
2117 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2119 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2120 method_reg = ins->dreg;
2124 call->imt_arg_reg = method_reg;
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2131 method_reg = alloc_preg (cfg);
2132 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2134 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2135 method_reg = ins->dreg;
2138 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2141 static MonoJumpInfo *
2142 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2144 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2148 ji->data.target = target;
2154 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2157 return mono_class_check_context_used (klass);
2163 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2166 return mono_method_check_context_used (method);
2172 * check_method_sharing:
2174 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2177 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2179 gboolean pass_vtable = FALSE;
2180 gboolean pass_mrgctx = FALSE;
2182 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2183 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2184 gboolean sharable = FALSE;
2186 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2190 * Pass vtable iff target method might
2191 * be shared, which means that sharing
2192 * is enabled for its class and its
2193 * context is sharable (and it's not a
2196 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2200 if (mini_method_get_context (cmethod) &&
2201 mini_method_get_context (cmethod)->method_inst) {
2202 g_assert (!pass_vtable);
2204 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2207 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2212 if (out_pass_vtable)
2213 *out_pass_vtable = pass_vtable;
2214 if (out_pass_mrgctx)
2215 *out_pass_mrgctx = pass_mrgctx;
2218 inline static MonoCallInst *
2219 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2220 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2224 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2232 mini_profiler_emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE, NULL, NULL);
2234 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2236 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2239 call->signature = sig;
2240 call->rgctx_reg = rgctx;
2241 sig_ret = mini_get_underlying_type (sig->ret);
2243 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2246 if (mini_type_is_vtype (sig_ret)) {
2247 call->vret_var = cfg->vret_addr;
2248 //g_assert_not_reached ();
2250 } else if (mini_type_is_vtype (sig_ret)) {
2251 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2254 temp->backend.is_pinvoke = sig->pinvoke;
2257 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2258 * address of return value to increase optimization opportunities.
2259 * Before vtype decomposition, the dreg of the call ins itself represents the
2260 * fact the call modifies the return value. After decomposition, the call will
2261 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2262 * will be transformed into an LDADDR.
2264 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2265 loada->dreg = alloc_preg (cfg);
2266 loada->inst_p0 = temp;
2267 /* We reference the call too since call->dreg could change during optimization */
2268 loada->inst_p1 = call;
2269 MONO_ADD_INS (cfg->cbb, loada);
2271 call->inst.dreg = temp->dreg;
2273 call->vret_var = loada;
2274 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2275 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2277 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2278 if (COMPILE_SOFT_FLOAT (cfg)) {
2280 * If the call has a float argument, we would need to do an r8->r4 conversion using
2281 * an icall, but that cannot be done during the call sequence since it would clobber
2282 * the call registers + the stack. So we do it before emitting the call.
2284 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2286 MonoInst *in = call->args [i];
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mono_type_get_underlying_type (t);
2294 if (!t->byref && t->type == MONO_TYPE_R4) {
2295 MonoInst *iargs [1];
2299 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2301 /* The result will be in an int vreg */
2302 call->args [i] = conv;
2308 call->need_unbox_trampoline = unbox_trampoline;
2311 if (COMPILE_LLVM (cfg))
2312 mono_llvm_emit_call (cfg, call);
2314 mono_arch_emit_call (cfg, call);
2316 mono_arch_emit_call (cfg, call);
2319 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2320 cfg->flags |= MONO_CFG_HAS_CALLS;
2326 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2328 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2329 cfg->uses_rgctx_reg = TRUE;
2330 call->rgctx_reg = TRUE;
2332 call->rgctx_arg_reg = rgctx_reg;
2337 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2342 gboolean check_sp = FALSE;
2344 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2345 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2347 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2352 rgctx_reg = mono_alloc_preg (cfg);
2353 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2357 if (!cfg->stack_inbalance_var)
2358 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2360 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2361 ins->dreg = cfg->stack_inbalance_var->dreg;
2362 MONO_ADD_INS (cfg->cbb, ins);
2365 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2367 call->inst.sreg1 = addr->dreg;
2370 emit_imt_argument (cfg, call, NULL, imt_arg);
2372 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2377 sp_reg = mono_alloc_preg (cfg);
2379 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2381 MONO_ADD_INS (cfg->cbb, ins);
2383 /* Restore the stack so we don't crash when throwing the exception */
2384 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2385 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2386 MONO_ADD_INS (cfg->cbb, ins);
2388 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2389 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2393 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2395 return (MonoInst*)call;
2399 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2402 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2403 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2405 #ifndef DISABLE_REMOTING
2406 gboolean might_be_remote = FALSE;
2408 gboolean virtual_ = this_ins != NULL;
2409 gboolean enable_for_aot = TRUE;
2412 MonoInst *call_target = NULL;
2414 gboolean need_unbox_trampoline;
2417 sig = mono_method_signature (method);
2419 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2420 g_assert_not_reached ();
2423 rgctx_reg = mono_alloc_preg (cfg);
2424 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2427 if (method->string_ctor) {
2428 /* Create the real signature */
2429 /* FIXME: Cache these */
2430 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2431 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2436 context_used = mini_method_check_context_used (cfg, method);
2438 #ifndef DISABLE_REMOTING
2439 might_be_remote = this_ins && sig->hasthis &&
2440 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2441 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2443 if (might_be_remote && context_used) {
2446 g_assert (cfg->gshared);
2448 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2450 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2454 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2455 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2457 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2459 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2461 #ifndef DISABLE_REMOTING
2462 if (might_be_remote)
2463 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2466 call->method = method;
2467 call->inst.flags |= MONO_INST_HAS_METHOD;
2468 call->inst.inst_left = this_ins;
2469 call->tail_call = tail;
2472 int vtable_reg, slot_reg, this_reg;
2475 this_reg = this_ins->dreg;
2477 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2478 MonoInst *dummy_use;
2480 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2482 /* Make a call to delegate->invoke_impl */
2483 call->inst.inst_basereg = this_reg;
2484 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2485 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2487 /* We must emit a dummy use here because the delegate trampoline will
2488 replace the 'this' argument with the delegate target making this activation
2489 no longer a root for the delegate.
2490 This is an issue for delegates that target collectible code such as dynamic
2491 methods of GC'able assemblies.
2493 For a test case look into #667921.
2495 FIXME: a dummy use is not the best way to do it as the local register allocator
2496 will put it on a caller save register and spil it around the call.
2497 Ideally, we would either put it on a callee save register or only do the store part.
2499 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2501 return (MonoInst*)call;
2504 if ((!cfg->compile_aot || enable_for_aot) &&
2505 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2506 (MONO_METHOD_IS_FINAL (method) &&
2507 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2508 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2510 * the method is not virtual, we just need to ensure this is not null
2511 * and then we can call the method directly.
2513 #ifndef DISABLE_REMOTING
2514 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2516 * The check above ensures method is not gshared, this is needed since
2517 * gshared methods can't have wrappers.
2519 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2523 if (!method->string_ctor)
2524 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2526 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2527 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2529 * the method is virtual, but we can statically dispatch since either
2530 * it's class or the method itself are sealed.
2531 * But first we need to ensure it's not a null reference.
2533 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2535 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2536 } else if (call_target) {
2537 vtable_reg = alloc_preg (cfg);
2538 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2540 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2541 call->inst.sreg1 = call_target->dreg;
2542 call->inst.flags &= !MONO_INST_HAS_METHOD;
2544 vtable_reg = alloc_preg (cfg);
2545 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2546 if (mono_class_is_interface (method->klass)) {
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2552 slot_reg = vtable_reg;
2553 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2554 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2556 g_assert (mono_method_signature (method)->generic_param_count);
2557 emit_imt_argument (cfg, call, call->method, imt_arg);
2561 call->inst.sreg1 = slot_reg;
2562 call->inst.inst_offset = offset;
2563 call->is_virtual = TRUE;
2567 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2570 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2572 return (MonoInst*)call;
2576 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2578 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2582 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2589 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2592 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2594 return (MonoInst*)call;
2598 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2600 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2604 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2608 * mono_emit_abs_call:
2610 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2612 inline static MonoInst*
2613 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2614 MonoMethodSignature *sig, MonoInst **args)
2616 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2620 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2623 if (cfg->abs_patches == NULL)
2624 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2625 g_hash_table_insert (cfg->abs_patches, ji, ji);
2626 ins = mono_emit_native_call (cfg, ji, sig, args);
2627 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2631 static MonoMethodSignature*
2632 sig_to_rgctx_sig (MonoMethodSignature *sig)
2634 // FIXME: memory allocation
2635 MonoMethodSignature *res;
2638 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2639 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2640 res->param_count = sig->param_count + 1;
2641 for (i = 0; i < sig->param_count; ++i)
2642 res->params [i] = sig->params [i];
2643 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2647 /* Make an indirect call to FSIG passing an additional argument */
2649 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2651 MonoMethodSignature *csig;
2652 MonoInst *args_buf [16];
2654 int i, pindex, tmp_reg;
2656 /* Make a call with an rgctx/extra arg */
2657 if (fsig->param_count + 2 < 16)
2660 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2663 args [pindex ++] = orig_args [0];
2664 for (i = 0; i < fsig->param_count; ++i)
2665 args [pindex ++] = orig_args [fsig->hasthis + i];
2666 tmp_reg = alloc_preg (cfg);
2667 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2668 csig = sig_to_rgctx_sig (fsig);
2669 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2672 /* Emit an indirect call to the function descriptor ADDR */
2674 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2676 int addr_reg, arg_reg;
2677 MonoInst *call_target;
2679 g_assert (cfg->llvm_only);
2682 * addr points to a <addr, arg> pair, load both of them, and
2683 * make a call to addr, passing arg as an extra arg.
2685 addr_reg = alloc_preg (cfg);
2686 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2687 arg_reg = alloc_preg (cfg);
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2690 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2694 direct_icalls_enabled (MonoCompile *cfg)
2698 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2700 if (cfg->compile_llvm && !cfg->llvm_only)
2703 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2709 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2712 * Call the jit icall without a wrapper if possible.
2713 * The wrapper is needed for the following reasons:
2714 * - to handle exceptions thrown using mono_raise_exceptions () from the
2715 * icall function. The EH code needs the lmf frame pushed by the
2716 * wrapper to be able to unwind back to managed code.
2717 * - to be able to do stack walks for asynchronously suspended
2718 * threads when debugging.
2720 if (info->no_raise && direct_icalls_enabled (cfg)) {
2724 if (!info->wrapper_method) {
2725 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2726 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2728 mono_memory_barrier ();
2732 * Inline the wrapper method, which is basically a call to the C icall, and
2733 * an exception check.
2735 costs = inline_method (cfg, info->wrapper_method, NULL,
2736 args, NULL, il_offset, TRUE);
2737 g_assert (costs > 0);
2738 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2742 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2747 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2749 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2750 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2754 * Native code might return non register sized integers
2755 * without initializing the upper bits.
2757 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2758 case OP_LOADI1_MEMBASE:
2759 widen_op = OP_ICONV_TO_I1;
2761 case OP_LOADU1_MEMBASE:
2762 widen_op = OP_ICONV_TO_U1;
2764 case OP_LOADI2_MEMBASE:
2765 widen_op = OP_ICONV_TO_I2;
2767 case OP_LOADU2_MEMBASE:
2768 widen_op = OP_ICONV_TO_U2;
2774 if (widen_op != -1) {
2775 int dreg = alloc_preg (cfg);
2778 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2779 widen->type = ins->type;
2790 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2792 MonoInst *args [16];
2794 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2795 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2797 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2801 mini_get_memcpy_method (void)
2803 static MonoMethod *memcpy_method = NULL;
2804 if (!memcpy_method) {
2805 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2807 g_error ("Old corlib found. Install a new one");
2809 return memcpy_method;
2813 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2815 int card_table_shift_bits;
2816 gpointer card_table_mask;
2818 MonoInst *dummy_use;
2819 int nursery_shift_bits;
2820 size_t nursery_size;
2822 if (!cfg->gen_write_barriers)
2825 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2827 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2829 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2831 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2834 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2835 wbarrier->sreg1 = ptr->dreg;
2836 wbarrier->sreg2 = value->dreg;
2837 MONO_ADD_INS (cfg->cbb, wbarrier);
2838 } else if (card_table) {
2839 int offset_reg = alloc_preg (cfg);
2844 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2845 * collector case, so, for the serial collector, it might slightly slow down nursery
2846 * collections. We also expect that the host system and the target system have the same card
2847 * table configuration, which is the case if they have the same pointer size.
2850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2851 if (card_table_mask)
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2854 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2855 * IMM's larger than 32bits.
2857 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2858 card_reg = ins->dreg;
2860 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2861 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2863 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2864 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2867 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2871 mini_get_memset_method (void)
2873 static MonoMethod *memset_method = NULL;
2874 if (!memset_method) {
2875 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2877 g_error ("Old corlib found. Install a new one");
2879 return memset_method;
2883 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2885 MonoInst *iargs [3];
2888 MonoMethod *memset_method;
2889 MonoInst *size_ins = NULL;
2890 MonoInst *bzero_ins = NULL;
2891 static MonoMethod *bzero_method;
2893 /* FIXME: Optimize this for the case when dest is an LDADDR */
2894 mono_class_init (klass);
2895 if (mini_is_gsharedvt_klass (klass)) {
2896 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2897 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2899 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
2900 g_assert (bzero_method);
2902 iargs [1] = size_ins;
2903 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
2907 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2909 n = mono_class_value_size (klass, &align);
2911 if (n <= sizeof (gpointer) * 8) {
2912 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2915 memset_method = mini_get_memset_method ();
2917 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2918 EMIT_NEW_ICONST (cfg, iargs [2], n);
2919 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2926 * Emit IR to return either the this pointer for instance method,
2927 * or the mrgctx for static methods.
2930 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2932 MonoInst *this_ins = NULL;
2934 g_assert (cfg->gshared);
2936 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2937 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2938 !method->klass->valuetype)
2939 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
2941 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2942 MonoInst *mrgctx_loc, *mrgctx_var;
2944 g_assert (!this_ins);
2945 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2947 mrgctx_loc = mono_get_vtable_var (cfg);
2948 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2951 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
2952 MonoInst *mrgctx_loc, *mrgctx_var;
2954 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
2955 mrgctx_loc = mono_get_vtable_var (cfg);
2956 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2958 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
2961 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2962 MonoInst *vtable_loc, *vtable_var;
2964 g_assert (!this_ins);
2966 vtable_loc = mono_get_vtable_var (cfg);
2967 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2969 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2970 MonoInst *mrgctx_var = vtable_var;
2973 vtable_reg = alloc_preg (cfg);
2974 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2975 vtable_var->type = STACK_PTR;
2983 vtable_reg = alloc_preg (cfg);
2984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2989 static MonoJumpInfoRgctxEntry *
2990 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2992 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2993 res->method = method;
2994 res->in_mrgctx = in_mrgctx;
2995 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2996 res->data->type = patch_type;
2997 res->data->data.target = patch_data;
2998 res->info_type = info_type;
3003 static inline MonoInst*
3004 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3006 MonoInst *args [16];
3009 // FIXME: No fastpath since the slot is not a compile time constant
3011 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3012 if (entry->in_mrgctx)
3013 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3015 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3019 * FIXME: This can be called during decompose, which is a problem since it creates
3021 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3023 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3025 MonoBasicBlock *is_null_bb, *end_bb;
3026 MonoInst *res, *ins, *call;
3029 slot = mini_get_rgctx_entry_slot (entry);
3031 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3032 index = MONO_RGCTX_SLOT_INDEX (slot);
3034 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3035 for (depth = 0; ; ++depth) {
3036 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3038 if (index < size - 1)
3043 NEW_BBLOCK (cfg, end_bb);
3044 NEW_BBLOCK (cfg, is_null_bb);
3047 rgctx_reg = rgctx->dreg;
3049 rgctx_reg = alloc_preg (cfg);
3051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3052 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3053 NEW_BBLOCK (cfg, is_null_bb);
3055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3059 for (i = 0; i < depth; ++i) {
3060 int array_reg = alloc_preg (cfg);
3062 /* load ptr to next array */
3063 if (mrgctx && i == 0)
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3067 rgctx_reg = array_reg;
3068 /* is the ptr null? */
3069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3070 /* if yes, jump to actual trampoline */
3071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3075 val_reg = alloc_preg (cfg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3077 /* is the slot null? */
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3079 /* if yes, jump to actual trampoline */
3080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3083 res_reg = alloc_preg (cfg);
3084 MONO_INST_NEW (cfg, ins, OP_MOVE);
3085 ins->dreg = res_reg;
3086 ins->sreg1 = val_reg;
3087 MONO_ADD_INS (cfg->cbb, ins);
3089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3092 MONO_START_BB (cfg, is_null_bb);
3094 EMIT_NEW_ICONST (cfg, args [1], index);
3096 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3098 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3099 MONO_INST_NEW (cfg, ins, OP_MOVE);
3100 ins->dreg = res_reg;
3101 ins->sreg1 = call->dreg;
3102 MONO_ADD_INS (cfg->cbb, ins);
3103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3105 MONO_START_BB (cfg, end_bb);
3114 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3117 static inline MonoInst*
3118 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3121 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3123 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3127 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3128 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3130 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3131 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3133 return emit_rgctx_fetch (cfg, rgctx, entry);
3137 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3138 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3140 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3141 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3143 return emit_rgctx_fetch (cfg, rgctx, entry);
3147 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3148 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3150 MonoJumpInfoGSharedVtCall *call_info;
3151 MonoJumpInfoRgctxEntry *entry;
3154 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3155 call_info->sig = sig;
3156 call_info->method = cmethod;
3158 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3159 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3161 return emit_rgctx_fetch (cfg, rgctx, entry);
3165 * emit_get_rgctx_virt_method:
3167 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3170 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3171 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3173 MonoJumpInfoVirtMethod *info;
3174 MonoJumpInfoRgctxEntry *entry;
3177 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3178 info->klass = klass;
3179 info->method = virt_method;
3181 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3182 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3184 return emit_rgctx_fetch (cfg, rgctx, entry);
3188 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3189 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3191 MonoJumpInfoRgctxEntry *entry;
3194 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3195 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3197 return emit_rgctx_fetch (cfg, rgctx, entry);
3201 * emit_get_rgctx_method:
3203 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3204 * normal constants, else emit a load from the rgctx.
3207 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3208 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3210 if (!context_used) {
3213 switch (rgctx_type) {
3214 case MONO_RGCTX_INFO_METHOD:
3215 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3217 case MONO_RGCTX_INFO_METHOD_RGCTX:
3218 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3221 g_assert_not_reached ();
3224 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3225 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3227 return emit_rgctx_fetch (cfg, rgctx, entry);
3232 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3233 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3235 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3236 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3238 return emit_rgctx_fetch (cfg, rgctx, entry);
3242 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3244 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3245 MonoRuntimeGenericContextInfoTemplate *template_;
3250 for (i = 0; i < info->num_entries; ++i) {
3251 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3253 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3257 if (info->num_entries == info->count_entries) {
3258 MonoRuntimeGenericContextInfoTemplate *new_entries;
3259 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3261 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3263 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3264 info->entries = new_entries;
3265 info->count_entries = new_count_entries;
3268 idx = info->num_entries;
3269 template_ = &info->entries [idx];
3270 template_->info_type = rgctx_type;
3271 template_->data = data;
3273 info->num_entries ++;
3279 * emit_get_gsharedvt_info:
3281 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3284 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3289 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3290 /* Load info->entries [idx] */
3291 dreg = alloc_preg (cfg);
3292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3298 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3300 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3304 * On return the caller must check @klass for load errors.
3307 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3309 MonoInst *vtable_arg;
3312 context_used = mini_class_check_context_used (cfg, klass);
3315 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3316 klass, MONO_RGCTX_INFO_VTABLE);
3318 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3322 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3325 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3329 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3330 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3332 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3333 ins->sreg1 = vtable_arg->dreg;
3334 MONO_ADD_INS (cfg->cbb, ins);
3337 MonoBasicBlock *inited_bb;
3338 MonoInst *args [16];
3340 inited_reg = alloc_ireg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3344 NEW_BBLOCK (cfg, inited_bb);
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3349 args [0] = vtable_arg;
3350 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3352 MONO_START_BB (cfg, inited_bb);
3357 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3361 if (cfg->gen_seq_points && cfg->method == method) {
3362 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3364 ins->flags |= MONO_INST_NONEMPTY_STACK;
3365 MONO_ADD_INS (cfg->cbb, ins);
3370 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3372 if (mini_get_debug_options ()->better_cast_details) {
3373 int vtable_reg = alloc_preg (cfg);
3374 int klass_reg = alloc_preg (cfg);
3375 MonoBasicBlock *is_null_bb = NULL;
3377 int to_klass_reg, context_used;
3380 NEW_BBLOCK (cfg, is_null_bb);
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3386 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3388 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3397 context_used = mini_class_check_context_used (cfg, klass);
3399 MonoInst *class_ins;
3401 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3402 to_klass_reg = class_ins->dreg;
3404 to_klass_reg = alloc_preg (cfg);
3405 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3410 MONO_START_BB (cfg, is_null_bb);
3415 mini_reset_cast_details (MonoCompile *cfg)
3417 /* Reset the variables holding the cast details */
3418 if (mini_get_debug_options ()->better_cast_details) {
3419 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3420 /* It is enough to reset the from field */
3421 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3426 * On return the caller must check @array_class for load errors
3429 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3431 int vtable_reg = alloc_preg (cfg);
3434 context_used = mini_class_check_context_used (cfg, array_class);
3436 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3438 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3440 if (cfg->opt & MONO_OPT_SHARED) {
3441 int class_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3445 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3447 } else if (context_used) {
3448 MonoInst *vtable_ins;
3450 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3453 if (cfg->compile_aot) {
3457 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3459 vt_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3461 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3464 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3470 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3472 mini_reset_cast_details (cfg);
3476 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3477 * generic code is generated.
3480 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3482 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3485 MonoInst *rgctx, *addr;
3487 /* FIXME: What if the class is shared? We might not
3488 have to get the address of the method from the
3490 addr = emit_get_rgctx_method (cfg, context_used, method,
3491 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3492 if (cfg->llvm_only) {
3493 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3494 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3496 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3498 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3501 gboolean pass_vtable, pass_mrgctx;
3502 MonoInst *rgctx_arg = NULL;
3504 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3505 g_assert (!pass_mrgctx);
3508 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3511 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3514 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3519 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3523 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3524 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3525 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3526 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3528 obj_reg = sp [0]->dreg;
3529 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3532 /* FIXME: generics */
3533 g_assert (klass->rank == 0);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3543 MonoInst *element_class;
3545 /* This assertion is from the unboxcast insn */
3546 g_assert (klass->rank == 0);
3548 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3549 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3554 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3555 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3556 mini_reset_cast_details (cfg);
3559 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3560 MONO_ADD_INS (cfg->cbb, add);
3561 add->type = STACK_MP;
3568 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3570 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3571 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3575 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3581 args [1] = klass_inst;
3584 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3586 NEW_BBLOCK (cfg, is_ref_bb);
3587 NEW_BBLOCK (cfg, is_nullable_bb);
3588 NEW_BBLOCK (cfg, end_bb);
3589 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3596 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3597 addr_reg = alloc_dreg (cfg, STACK_MP);
3601 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3602 MONO_ADD_INS (cfg->cbb, addr);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3607 MONO_START_BB (cfg, is_ref_bb);
3609 /* Save the ref to a temporary */
3610 dreg = alloc_ireg (cfg);
3611 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3612 addr->dreg = addr_reg;
3613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3617 MONO_START_BB (cfg, is_nullable_bb);
3620 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3621 MonoInst *unbox_call;
3622 MonoMethodSignature *unbox_sig;
3624 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3625 unbox_sig->ret = &klass->byval_arg;
3626 unbox_sig->param_count = 1;
3627 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3630 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3632 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3634 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3635 addr->dreg = addr_reg;
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3641 MONO_START_BB (cfg, end_bb);
3644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3650 * Returns NULL and set the cfg exception on error.
3653 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3655 MonoInst *iargs [2];
3660 MonoRgctxInfoType rgctx_info;
3661 MonoInst *iargs [2];
3662 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3664 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3666 if (cfg->opt & MONO_OPT_SHARED)
3667 rgctx_info = MONO_RGCTX_INFO_KLASS;
3669 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3670 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3672 if (cfg->opt & MONO_OPT_SHARED) {
3673 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3675 alloc_ftn = ves_icall_object_new;
3678 alloc_ftn = ves_icall_object_new_specific;
3681 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3682 if (known_instance_size) {
3683 int size = mono_class_instance_size (klass);
3684 if (size < sizeof (MonoObject))
3685 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3687 EMIT_NEW_ICONST (cfg, iargs [1], size);
3689 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3692 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3695 if (cfg->opt & MONO_OPT_SHARED) {
3696 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3697 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3699 alloc_ftn = ves_icall_object_new;
3700 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3701 /* This happens often in argument checking code, eg. throw new FooException... */
3702 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3703 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3704 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3706 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3707 MonoMethod *managed_alloc = NULL;
3711 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3712 cfg->exception_ptr = klass;
3716 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3718 if (managed_alloc) {
3719 int size = mono_class_instance_size (klass);
3720 if (size < sizeof (MonoObject))
3721 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3723 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3724 EMIT_NEW_ICONST (cfg, iargs [1], size);
3725 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3727 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3729 guint32 lw = vtable->klass->instance_size;
3730 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3731 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3732 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3735 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3739 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3743 * Returns NULL and set the cfg exception on error.
3746 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3748 MonoInst *alloc, *ins;
3750 if (mono_class_is_nullable (klass)) {
3751 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3754 if (cfg->llvm_only && cfg->gsharedvt) {
3755 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3756 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3757 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3759 /* FIXME: What if the class is shared? We might not
3760 have to get the method address from the RGCTX. */
3761 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3762 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3763 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3765 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3768 gboolean pass_vtable, pass_mrgctx;
3769 MonoInst *rgctx_arg = NULL;
3771 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3772 g_assert (!pass_mrgctx);
3775 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3778 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3781 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3785 if (mini_is_gsharedvt_klass (klass)) {
3786 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3787 MonoInst *res, *is_ref, *src_var, *addr;
3790 dreg = alloc_ireg (cfg);
3792 NEW_BBLOCK (cfg, is_ref_bb);
3793 NEW_BBLOCK (cfg, is_nullable_bb);
3794 NEW_BBLOCK (cfg, end_bb);
3795 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3800 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3803 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3806 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3807 ins->opcode = OP_STOREV_MEMBASE;
3809 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3810 res->type = STACK_OBJ;
3812 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3815 MONO_START_BB (cfg, is_ref_bb);
3817 /* val is a vtype, so has to load the value manually */
3818 src_var = get_vreg_to_inst (cfg, val->dreg);
3820 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3821 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3823 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3826 MONO_START_BB (cfg, is_nullable_bb);
3829 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3830 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3832 MonoMethodSignature *box_sig;
3835 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3836 * construct that method at JIT time, so have to do things by hand.
3838 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3839 box_sig->ret = &mono_defaults.object_class->byval_arg;
3840 box_sig->param_count = 1;
3841 box_sig->params [0] = &klass->byval_arg;
3844 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
3846 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3847 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3848 res->type = STACK_OBJ;
3852 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3854 MONO_START_BB (cfg, end_bb);
3858 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3862 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3867 static GHashTable* direct_icall_type_hash;
3870 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
3872 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3873 if (!direct_icalls_enabled (cfg))
3877 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
3878 * Whitelist a few icalls for now.
3880 if (!direct_icall_type_hash) {
3881 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
3883 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
3884 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
3885 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
3886 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
3887 mono_memory_barrier ();
3888 direct_icall_type_hash = h;
3891 if (cmethod->klass == mono_defaults.math_class)
3893 /* No locking needed */
3894 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
3900 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3902 if (cmethod->klass == mono_defaults.systemtype_class) {
3903 if (!strcmp (cmethod->name, "GetType"))
3909 static G_GNUC_UNUSED MonoInst*
3910 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
3912 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
3913 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3916 switch (enum_type->type) {
3919 #if SIZEOF_REGISTER == 8
3931 MonoInst *load, *and_, *cmp, *ceq;
3932 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3933 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3934 int dest_reg = alloc_ireg (cfg);
3936 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3937 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3938 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3939 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3941 ceq->type = STACK_I4;
3944 load = mono_decompose_opcode (cfg, load);
3945 and_ = mono_decompose_opcode (cfg, and_);
3946 cmp = mono_decompose_opcode (cfg, cmp);
3947 ceq = mono_decompose_opcode (cfg, ceq);
3955 * Returns NULL and set the cfg exception on error.
3957 static G_GNUC_UNUSED MonoInst*
3958 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
3962 gpointer trampoline;
3963 MonoInst *obj, *method_ins, *tramp_ins;
3967 if (virtual_ && !cfg->llvm_only) {
3968 MonoMethod *invoke = mono_get_delegate_invoke (klass);
3971 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
3975 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
3979 /* Inline the contents of mono_delegate_ctor */
3981 /* Set target field */
3982 /* Optimize away setting of NULL target */
3983 if (!MONO_INS_IS_PCONST_NULL (target)) {
3984 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3985 if (cfg->gen_write_barriers) {
3986 dreg = alloc_preg (cfg);
3987 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3988 mini_emit_write_barrier (cfg, ptr, target);
3992 /* Set method field */
3993 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3994 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3997 * To avoid looking up the compiled code belonging to the target method
3998 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3999 * store it, and we fill it after the method has been compiled.
4001 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4002 MonoInst *code_slot_ins;
4005 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4007 domain = mono_domain_get ();
4008 mono_domain_lock (domain);
4009 if (!domain_jit_info (domain)->method_code_hash)
4010 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4011 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4013 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4014 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4016 mono_domain_unlock (domain);
4018 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4020 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4023 if (cfg->llvm_only) {
4024 MonoInst *args [16];
4029 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4030 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4033 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4039 if (cfg->compile_aot) {
4040 MonoDelegateClassMethodPair *del_tramp;
4042 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4043 del_tramp->klass = klass;
4044 del_tramp->method = context_used ? NULL : method;
4045 del_tramp->is_virtual = virtual_;
4046 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4049 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4051 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4052 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4055 /* Set invoke_impl field */
4057 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4059 dreg = alloc_preg (cfg);
4060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4063 dreg = alloc_preg (cfg);
4064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4065 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4068 dreg = alloc_preg (cfg);
4069 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4072 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4078 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4080 MonoJitICallInfo *info;
4082 /* Need to register the icall so it gets an icall wrapper */
4083 info = mono_get_array_new_va_icall (rank);
4085 cfg->flags |= MONO_CFG_HAS_VARARGS;
4087 /* mono_array_new_va () needs a vararg calling convention */
4088 cfg->exception_message = g_strdup ("array-new");
4089 cfg->disable_llvm = TRUE;
4091 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4092 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4096 * handle_constrained_gsharedvt_call:
4098 * Handle constrained calls where the receiver is a gsharedvt type.
4099 * Return the instruction representing the call. Set the cfg exception on failure.
4102 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4103 gboolean *ref_emit_widen)
4105 MonoInst *ins = NULL;
4106 gboolean emit_widen = *ref_emit_widen;
4109 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4110 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4111 * pack the arguments into an array, and do the rest of the work in in an icall.
4113 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4114 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4115 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4116 MonoInst *args [16];
4119 * This case handles calls to
4120 * - object:ToString()/Equals()/GetHashCode(),
4121 * - System.IComparable<T>:CompareTo()
4122 * - System.IEquatable<T>:Equals ()
4123 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4127 if (mono_method_check_context_used (cmethod))
4128 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4130 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4131 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4133 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4134 if (fsig->hasthis && fsig->param_count) {
4135 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4136 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4137 ins->dreg = alloc_preg (cfg);
4138 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4139 MONO_ADD_INS (cfg->cbb, ins);
4142 if (mini_is_gsharedvt_type (fsig->params [0])) {
4143 int addr_reg, deref_arg_reg;
4145 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4146 deref_arg_reg = alloc_preg (cfg);
4147 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4148 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4150 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4151 addr_reg = ins->dreg;
4152 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4154 EMIT_NEW_ICONST (cfg, args [3], 0);
4155 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4158 EMIT_NEW_ICONST (cfg, args [3], 0);
4159 EMIT_NEW_ICONST (cfg, args [4], 0);
4161 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4164 if (mini_is_gsharedvt_type (fsig->ret)) {
4165 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4166 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4170 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4171 MONO_ADD_INS (cfg->cbb, add);
4173 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4174 MONO_ADD_INS (cfg->cbb, ins);
4175 /* ins represents the call result */
4178 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4181 *ref_emit_widen = emit_widen;
4190 mono_emit_load_got_addr (MonoCompile *cfg)
4192 MonoInst *getaddr, *dummy_use;
4194 if (!cfg->got_var || cfg->got_var_allocated)
4197 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4198 getaddr->cil_code = cfg->header->code;
4199 getaddr->dreg = cfg->got_var->dreg;
4201 /* Add it to the start of the first bblock */
4202 if (cfg->bb_entry->code) {
4203 getaddr->next = cfg->bb_entry->code;
4204 cfg->bb_entry->code = getaddr;
4207 MONO_ADD_INS (cfg->bb_entry, getaddr);
4209 cfg->got_var_allocated = TRUE;
4212 * Add a dummy use to keep the got_var alive, since real uses might
4213 * only be generated by the back ends.
4214 * Add it to end_bblock, so the variable's lifetime covers the whole
4216 * It would be better to make the usage of the got var explicit in all
4217 * cases when the backend needs it (i.e. calls, throw etc.), so this
4218 * wouldn't be needed.
4220 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4221 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4224 static int inline_limit;
4225 static gboolean inline_limit_inited;
4228 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4230 MonoMethodHeaderSummary header;
4232 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4233 MonoMethodSignature *sig = mono_method_signature (method);
4237 if (cfg->disable_inline)
4242 if (cfg->inline_depth > 10)
4245 if (!mono_method_get_header_summary (method, &header))
4248 /*runtime, icall and pinvoke are checked by summary call*/
4249 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4250 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4251 (mono_class_is_marshalbyref (method->klass)) ||
4255 /* also consider num_locals? */
4256 /* Do the size check early to avoid creating vtables */
4257 if (!inline_limit_inited) {
4259 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4260 inline_limit = atoi (inlinelimit);
4261 g_free (inlinelimit);
4263 inline_limit = INLINE_LENGTH_LIMIT;
4264 inline_limit_inited = TRUE;
4266 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4270 * if we can initialize the class of the method right away, we do,
4271 * otherwise we don't allow inlining if the class needs initialization,
4272 * since it would mean inserting a call to mono_runtime_class_init()
4273 * inside the inlined code
4275 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4278 if (!(cfg->opt & MONO_OPT_SHARED)) {
4279 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4280 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4281 if (method->klass->has_cctor) {
4282 vtable = mono_class_vtable (cfg->domain, method->klass);
4285 if (!cfg->compile_aot) {
4287 if (!mono_runtime_class_init_full (vtable, &error)) {
4288 mono_error_cleanup (&error);
4293 } else if (mono_class_is_before_field_init (method->klass)) {
4294 if (cfg->run_cctors && method->klass->has_cctor) {
4295 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4296 if (!method->klass->runtime_info)
4297 /* No vtable created yet */
4299 vtable = mono_class_vtable (cfg->domain, method->klass);
4302 /* This makes so that inline cannot trigger */
4303 /* .cctors: too many apps depend on them */
4304 /* running with a specific order... */
4305 if (! vtable->initialized)
4308 if (!mono_runtime_class_init_full (vtable, &error)) {
4309 mono_error_cleanup (&error);
4313 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4314 if (!method->klass->runtime_info)
4315 /* No vtable created yet */
4317 vtable = mono_class_vtable (cfg->domain, method->klass);
4320 if (!vtable->initialized)
4325 * If we're compiling for shared code
4326 * the cctor will need to be run at aot method load time, for example,
4327 * or at the end of the compilation of the inlining method.
4329 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4333 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4334 if (mono_arch_is_soft_float ()) {
4336 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4338 for (i = 0; i < sig->param_count; ++i)
4339 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4344 if (g_list_find (cfg->dont_inline, method))
4347 if (mono_profiler_get_call_instrumentation_flags (method))
4354 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4356 if (!cfg->compile_aot) {
4358 if (vtable->initialized)
4362 if (mono_class_is_before_field_init (klass)) {
4363 if (cfg->method == method)
4367 if (!mono_class_needs_cctor_run (klass, method))
4370 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4371 /* The initialization is already done before the method is called */
4378 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4382 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4385 if (mini_is_gsharedvt_variable_klass (klass)) {
4388 mono_class_init (klass);
4389 size = mono_class_array_element_size (klass);
4392 mult_reg = alloc_preg (cfg);
4393 array_reg = arr->dreg;
4394 index_reg = index->dreg;
4396 #if SIZEOF_REGISTER == 8
4397 /* The array reg is 64 bits but the index reg is only 32 */
4398 if (COMPILE_LLVM (cfg)) {
4400 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4401 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4402 * of OP_X86_LEA for llvm.
4404 index2_reg = index_reg;
4406 index2_reg = alloc_preg (cfg);
4407 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4410 if (index->type == STACK_I8) {
4411 index2_reg = alloc_preg (cfg);
4412 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4414 index2_reg = index_reg;
4419 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4421 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4422 if (size == 1 || size == 2 || size == 4 || size == 8) {
4423 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4425 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4426 ins->klass = mono_class_get_element_class (klass);
4427 ins->type = STACK_MP;
4433 add_reg = alloc_ireg_mp (cfg);
4436 MonoInst *rgctx_ins;
4439 g_assert (cfg->gshared);
4440 context_used = mini_class_check_context_used (cfg, klass);
4441 g_assert (context_used);
4442 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4443 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4447 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4448 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4449 ins->klass = mono_class_get_element_class (klass);
4450 ins->type = STACK_MP;
4451 MONO_ADD_INS (cfg->cbb, ins);
4457 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4459 int bounds_reg = alloc_preg (cfg);
4460 int add_reg = alloc_ireg_mp (cfg);
4461 int mult_reg = alloc_preg (cfg);
4462 int mult2_reg = alloc_preg (cfg);
4463 int low1_reg = alloc_preg (cfg);
4464 int low2_reg = alloc_preg (cfg);
4465 int high1_reg = alloc_preg (cfg);
4466 int high2_reg = alloc_preg (cfg);
4467 int realidx1_reg = alloc_preg (cfg);
4468 int realidx2_reg = alloc_preg (cfg);
4469 int sum_reg = alloc_preg (cfg);
4470 int index1, index2, tmpreg;
4474 mono_class_init (klass);
4475 size = mono_class_array_element_size (klass);
4477 index1 = index_ins1->dreg;
4478 index2 = index_ins2->dreg;
4480 #if SIZEOF_REGISTER == 8
4481 /* The array reg is 64 bits but the index reg is only 32 */
4482 if (COMPILE_LLVM (cfg)) {
4485 tmpreg = alloc_preg (cfg);
4486 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4488 tmpreg = alloc_preg (cfg);
4489 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4493 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4497 /* range checking */
4498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4499 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4502 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4503 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4504 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4505 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4507 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4510 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4511 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4513 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4514 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4515 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4517 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4520 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4521 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4523 ins->type = STACK_MP;
4525 MONO_ADD_INS (cfg->cbb, ins);
4531 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4535 MonoMethod *addr_method;
4537 MonoClass *eclass = cmethod->klass->element_class;
4539 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4542 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4544 /* emit_ldelema_2 depends on OP_LMUL */
4545 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4546 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4549 if (mini_is_gsharedvt_variable_klass (eclass))
4552 element_size = mono_class_array_element_size (eclass);
4553 addr_method = mono_marshal_get_array_address (rank, element_size);
4554 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4559 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4561 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4563 MonoInst *addr, *store, *load;
4564 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4566 /* the bounds check is already done by the callers */
4567 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4570 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4571 if (mini_type_is_reference (&eklass->byval_arg))
4572 mini_emit_write_barrier (cfg, addr, load);
4574 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4575 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4582 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4584 return mini_type_is_reference (&klass->byval_arg);
4588 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4590 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4591 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4592 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4593 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4594 MonoInst *iargs [3];
4597 mono_class_setup_vtable (obj_array);
4598 g_assert (helper->slot);
4600 if (sp [0]->type != STACK_OBJ)
4602 if (sp [2]->type != STACK_OBJ)
4609 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4613 if (mini_is_gsharedvt_variable_klass (klass)) {
4616 // FIXME-VT: OP_ICONST optimization
4617 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4618 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4619 ins->opcode = OP_STOREV_MEMBASE;
4620 } else if (sp [1]->opcode == OP_ICONST) {
4621 int array_reg = sp [0]->dreg;
4622 int index_reg = sp [1]->dreg;
4623 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4625 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4626 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4629 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4630 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4632 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4633 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4634 if (generic_class_is_reference_type (cfg, klass))
4635 mini_emit_write_barrier (cfg, addr, sp [2]);
4642 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4647 eklass = mono_class_from_mono_type (fsig->params [2]);
4649 eklass = mono_class_from_mono_type (fsig->ret);
4652 return emit_array_store (cfg, eklass, args, FALSE);
4654 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4655 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4661 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4664 int param_size, return_size;
4666 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4667 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4669 if (cfg->verbose_level > 3)
4670 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4672 //Don't allow mixing reference types with value types
4673 if (param_klass->valuetype != return_klass->valuetype) {
4674 if (cfg->verbose_level > 3)
4675 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4679 if (!param_klass->valuetype) {
4680 if (cfg->verbose_level > 3)
4681 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4686 if (param_klass->has_references || return_klass->has_references)
4689 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4690 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4691 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4692 if (cfg->verbose_level > 3)
4693 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4697 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4698 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4699 if (cfg->verbose_level > 3)
4700 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4704 param_size = mono_class_value_size (param_klass, &align);
4705 return_size = mono_class_value_size (return_klass, &align);
4707 //We can do it if sizes match
4708 if (param_size == return_size) {
4709 if (cfg->verbose_level > 3)
4710 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4714 //No simple way to handle struct if sizes don't match
4715 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4716 if (cfg->verbose_level > 3)
4717 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4722 * Same reg size category.
4723 * A quick note on why we don't require widening here.
4724 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4726 * Since the source value comes from a function argument, the JIT will already have
4727 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4729 if (param_size <= 4 && return_size <= 4) {
4730 if (cfg->verbose_level > 3)
4731 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4739 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4741 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4742 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4744 if (mini_is_gsharedvt_variable_type (fsig->ret))
4747 //Valuetypes that are semantically equivalent or numbers than can be widened to
4748 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4751 //Arrays of valuetypes that are semantically equivalent
4752 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
4759 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4761 #ifdef MONO_ARCH_SIMD_INTRINSICS
4762 MonoInst *ins = NULL;
4764 if (cfg->opt & MONO_OPT_SIMD) {
4765 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4771 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
4775 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4777 MonoInst *ins = NULL;
4778 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4779 MONO_ADD_INS (cfg->cbb, ins);
4780 ins->backend.memory_barrier_kind = kind;
4786 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4788 MonoInst *ins = NULL;
4791 /* The LLVM backend supports these intrinsics */
4792 if (cmethod->klass == mono_defaults.math_class) {
4793 if (strcmp (cmethod->name, "Sin") == 0) {
4795 } else if (strcmp (cmethod->name, "Cos") == 0) {
4797 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4799 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4803 if (opcode && fsig->param_count == 1) {
4804 MONO_INST_NEW (cfg, ins, opcode);
4805 ins->type = STACK_R8;
4806 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4807 ins->sreg1 = args [0]->dreg;
4808 MONO_ADD_INS (cfg->cbb, ins);
4812 if (cfg->opt & MONO_OPT_CMOV) {
4813 if (strcmp (cmethod->name, "Min") == 0) {
4814 if (fsig->params [0]->type == MONO_TYPE_I4)
4816 if (fsig->params [0]->type == MONO_TYPE_U4)
4817 opcode = OP_IMIN_UN;
4818 else if (fsig->params [0]->type == MONO_TYPE_I8)
4820 else if (fsig->params [0]->type == MONO_TYPE_U8)
4821 opcode = OP_LMIN_UN;
4822 } else if (strcmp (cmethod->name, "Max") == 0) {
4823 if (fsig->params [0]->type == MONO_TYPE_I4)
4825 if (fsig->params [0]->type == MONO_TYPE_U4)
4826 opcode = OP_IMAX_UN;
4827 else if (fsig->params [0]->type == MONO_TYPE_I8)
4829 else if (fsig->params [0]->type == MONO_TYPE_U8)
4830 opcode = OP_LMAX_UN;
4834 if (opcode && fsig->param_count == 2) {
4835 MONO_INST_NEW (cfg, ins, opcode);
4836 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4837 ins->dreg = mono_alloc_dreg (cfg, ins->type);
4838 ins->sreg1 = args [0]->dreg;
4839 ins->sreg2 = args [1]->dreg;
4840 MONO_ADD_INS (cfg->cbb, ins);
4848 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4850 if (cmethod->klass == mono_defaults.array_class) {
4851 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4852 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4853 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4854 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4855 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
4856 return emit_array_unsafe_mov (cfg, fsig, args);
4864 mono_type_is_native_blittable (MonoType *t)
4866 if (MONO_TYPE_IS_REFERENCE (t))
4869 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
4872 MonoClass *klass = mono_class_from_mono_type (t);
4874 //MonoClass::blitable depends on mono_class_setup_fields being done.
4875 mono_class_setup_fields (klass);
4876 if (!klass->blittable)
4879 // If the native marshal size is different we can't convert PtrToStructure to a type load
4880 if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
4888 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4890 MonoInst *ins = NULL;
4891 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
4893 if (cmethod->klass == mono_defaults.string_class) {
4894 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
4895 int dreg = alloc_ireg (cfg);
4896 int index_reg = alloc_preg (cfg);
4897 int add_reg = alloc_preg (cfg);
4899 #if SIZEOF_REGISTER == 8
4900 if (COMPILE_LLVM (cfg)) {
4901 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
4903 /* The array reg is 64 bits but the index reg is only 32 */
4904 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4907 index_reg = args [1]->dreg;
4909 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4911 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4912 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
4913 add_reg = ins->dreg;
4914 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4917 int mult_reg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4919 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4920 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4921 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
4923 type_from_op (cfg, ins, NULL, NULL);
4925 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
4926 int dreg = alloc_ireg (cfg);
4927 /* Decompose later to allow more optimizations */
4928 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4929 ins->type = STACK_I4;
4930 ins->flags |= MONO_INST_FAULT;
4931 cfg->cbb->has_array_access = TRUE;
4932 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4937 } else if (cmethod->klass == mono_defaults.object_class) {
4938 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
4939 int dreg = alloc_ireg_ref (cfg);
4940 int vt_reg = alloc_preg (cfg);
4941 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
4943 type_from_op (cfg, ins, NULL, NULL);
4946 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
4947 int dreg = alloc_ireg (cfg);
4948 int t1 = alloc_ireg (cfg);
4950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4951 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4952 ins->type = STACK_I4;
4955 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
4956 MONO_INST_NEW (cfg, ins, OP_NOP);
4957 MONO_ADD_INS (cfg->cbb, ins);
4961 } else if (cmethod->klass == mono_defaults.array_class) {
4962 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4963 return emit_array_generic_access (cfg, fsig, args, FALSE);
4964 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
4965 return emit_array_generic_access (cfg, fsig, args, TRUE);
4967 #ifndef MONO_BIG_ARRAYS
4969 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4972 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
4973 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
4974 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4975 int dreg = alloc_ireg (cfg);
4976 int bounds_reg = alloc_ireg_mp (cfg);
4977 MonoBasicBlock *end_bb, *szarray_bb;
4978 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4980 NEW_BBLOCK (cfg, end_bb);
4981 NEW_BBLOCK (cfg, szarray_bb);
4983 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4984 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4985 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4986 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4987 /* Non-szarray case */
4989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4990 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4992 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4993 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4994 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4995 MONO_START_BB (cfg, szarray_bb);
4998 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4999 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5001 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5002 MONO_START_BB (cfg, end_bb);
5004 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5005 ins->type = STACK_I4;
5011 if (cmethod->name [0] != 'g')
5014 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5015 int dreg = alloc_ireg (cfg);
5016 int vtable_reg = alloc_preg (cfg);
5017 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5018 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5019 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5020 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5021 type_from_op (cfg, ins, NULL, NULL);
5024 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5025 int dreg = alloc_ireg (cfg);
5027 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5028 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5029 type_from_op (cfg, ins, NULL, NULL);
5034 } else if (cmethod->klass == runtime_helpers_class) {
5035 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5036 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5038 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5039 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5041 g_assert (ctx->method_inst);
5042 g_assert (ctx->method_inst->type_argc == 1);
5043 MonoType *arg_type = ctx->method_inst->type_argv [0];
5049 /* Resolve the argument class as possible so we can handle common cases fast */
5050 t = mini_get_underlying_type (arg_type);
5051 klass = mono_class_from_mono_type (t);
5052 mono_class_init (klass);
5053 if (MONO_TYPE_IS_REFERENCE (t))
5054 EMIT_NEW_ICONST (cfg, ins, 1);
5055 else if (MONO_TYPE_IS_PRIMITIVE (t))
5056 EMIT_NEW_ICONST (cfg, ins, 0);
5057 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5058 EMIT_NEW_ICONST (cfg, ins, 1);
5059 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5060 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5062 g_assert (cfg->gshared);
5064 /* Have to use the original argument class here */
5065 MonoClass *arg_class = mono_class_from_mono_type (arg_type);
5066 int context_used = mini_class_check_context_used (cfg, arg_class);
5068 /* This returns 1 or 2 */
5069 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5070 int dreg = alloc_ireg (cfg);
5071 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5077 } else if (cmethod->klass == mono_defaults.monitor_class) {
5078 gboolean is_enter = FALSE;
5079 gboolean is_v4 = FALSE;
5081 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5085 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5090 * To make async stack traces work, icalls which can block should have a wrapper.
5091 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5093 MonoBasicBlock *end_bb;
5095 NEW_BBLOCK (cfg, end_bb);
5097 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5100 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5101 MONO_START_BB (cfg, end_bb);
5104 } else if (cmethod->klass == mono_defaults.thread_class) {
5105 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5106 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5107 MONO_ADD_INS (cfg->cbb, ins);
5109 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5110 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5111 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5113 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5115 if (fsig->params [0]->type == MONO_TYPE_I1)
5116 opcode = OP_LOADI1_MEMBASE;
5117 else if (fsig->params [0]->type == MONO_TYPE_U1)
5118 opcode = OP_LOADU1_MEMBASE;
5119 else if (fsig->params [0]->type == MONO_TYPE_I2)
5120 opcode = OP_LOADI2_MEMBASE;
5121 else if (fsig->params [0]->type == MONO_TYPE_U2)
5122 opcode = OP_LOADU2_MEMBASE;
5123 else if (fsig->params [0]->type == MONO_TYPE_I4)
5124 opcode = OP_LOADI4_MEMBASE;
5125 else if (fsig->params [0]->type == MONO_TYPE_U4)
5126 opcode = OP_LOADU4_MEMBASE;
5127 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5128 opcode = OP_LOADI8_MEMBASE;
5129 else if (fsig->params [0]->type == MONO_TYPE_R4)
5130 opcode = OP_LOADR4_MEMBASE;
5131 else if (fsig->params [0]->type == MONO_TYPE_R8)
5132 opcode = OP_LOADR8_MEMBASE;
5133 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5134 opcode = OP_LOAD_MEMBASE;
5137 MONO_INST_NEW (cfg, ins, opcode);
5138 ins->inst_basereg = args [0]->dreg;
5139 ins->inst_offset = 0;
5140 MONO_ADD_INS (cfg->cbb, ins);
5142 switch (fsig->params [0]->type) {
5149 ins->dreg = mono_alloc_ireg (cfg);
5150 ins->type = STACK_I4;
5154 ins->dreg = mono_alloc_lreg (cfg);
5155 ins->type = STACK_I8;
5159 ins->dreg = mono_alloc_ireg (cfg);
5160 #if SIZEOF_REGISTER == 8
5161 ins->type = STACK_I8;
5163 ins->type = STACK_I4;
5168 ins->dreg = mono_alloc_freg (cfg);
5169 ins->type = STACK_R8;
5172 g_assert (mini_type_is_reference (fsig->params [0]));
5173 ins->dreg = mono_alloc_ireg_ref (cfg);
5174 ins->type = STACK_OBJ;
5178 if (opcode == OP_LOADI8_MEMBASE)
5179 ins = mono_decompose_opcode (cfg, ins);
5181 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5185 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5187 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5189 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5190 opcode = OP_STOREI1_MEMBASE_REG;
5191 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5192 opcode = OP_STOREI2_MEMBASE_REG;
5193 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5194 opcode = OP_STOREI4_MEMBASE_REG;
5195 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5196 opcode = OP_STOREI8_MEMBASE_REG;
5197 else if (fsig->params [0]->type == MONO_TYPE_R4)
5198 opcode = OP_STORER4_MEMBASE_REG;
5199 else if (fsig->params [0]->type == MONO_TYPE_R8)
5200 opcode = OP_STORER8_MEMBASE_REG;
5201 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5202 opcode = OP_STORE_MEMBASE_REG;
5205 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5207 MONO_INST_NEW (cfg, ins, opcode);
5208 ins->sreg1 = args [1]->dreg;
5209 ins->inst_destbasereg = args [0]->dreg;
5210 ins->inst_offset = 0;
5211 MONO_ADD_INS (cfg->cbb, ins);
5213 if (opcode == OP_STOREI8_MEMBASE_REG)
5214 ins = mono_decompose_opcode (cfg, ins);
5219 } else if (cmethod->klass->image == mono_defaults.corlib &&
5220 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5221 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5224 #if SIZEOF_REGISTER == 8
5225 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5226 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5227 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5228 ins->dreg = mono_alloc_preg (cfg);
5229 ins->sreg1 = args [0]->dreg;
5230 ins->type = STACK_I8;
5231 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5232 MONO_ADD_INS (cfg->cbb, ins);
5236 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5238 /* 64 bit reads are already atomic */
5239 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5240 load_ins->dreg = mono_alloc_preg (cfg);
5241 load_ins->inst_basereg = args [0]->dreg;
5242 load_ins->inst_offset = 0;
5243 load_ins->type = STACK_I8;
5244 MONO_ADD_INS (cfg->cbb, load_ins);
5246 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5253 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5254 MonoInst *ins_iconst;
5257 if (fsig->params [0]->type == MONO_TYPE_I4) {
5258 opcode = OP_ATOMIC_ADD_I4;
5259 cfg->has_atomic_add_i4 = TRUE;
5261 #if SIZEOF_REGISTER == 8
5262 else if (fsig->params [0]->type == MONO_TYPE_I8)
5263 opcode = OP_ATOMIC_ADD_I8;
5266 if (!mono_arch_opcode_supported (opcode))
5268 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5269 ins_iconst->inst_c0 = 1;
5270 ins_iconst->dreg = mono_alloc_ireg (cfg);
5271 MONO_ADD_INS (cfg->cbb, ins_iconst);
5273 MONO_INST_NEW (cfg, ins, opcode);
5274 ins->dreg = mono_alloc_ireg (cfg);
5275 ins->inst_basereg = args [0]->dreg;
5276 ins->inst_offset = 0;
5277 ins->sreg2 = ins_iconst->dreg;
5278 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5279 MONO_ADD_INS (cfg->cbb, ins);
5281 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5282 MonoInst *ins_iconst;
5285 if (fsig->params [0]->type == MONO_TYPE_I4) {
5286 opcode = OP_ATOMIC_ADD_I4;
5287 cfg->has_atomic_add_i4 = TRUE;
5289 #if SIZEOF_REGISTER == 8
5290 else if (fsig->params [0]->type == MONO_TYPE_I8)
5291 opcode = OP_ATOMIC_ADD_I8;
5294 if (!mono_arch_opcode_supported (opcode))
5296 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5297 ins_iconst->inst_c0 = -1;
5298 ins_iconst->dreg = mono_alloc_ireg (cfg);
5299 MONO_ADD_INS (cfg->cbb, ins_iconst);
5301 MONO_INST_NEW (cfg, ins, opcode);
5302 ins->dreg = mono_alloc_ireg (cfg);
5303 ins->inst_basereg = args [0]->dreg;
5304 ins->inst_offset = 0;
5305 ins->sreg2 = ins_iconst->dreg;
5306 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5307 MONO_ADD_INS (cfg->cbb, ins);
5309 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5312 if (fsig->params [0]->type == MONO_TYPE_I4) {
5313 opcode = OP_ATOMIC_ADD_I4;
5314 cfg->has_atomic_add_i4 = TRUE;
5316 #if SIZEOF_REGISTER == 8
5317 else if (fsig->params [0]->type == MONO_TYPE_I8)
5318 opcode = OP_ATOMIC_ADD_I8;
5321 if (!mono_arch_opcode_supported (opcode))
5323 MONO_INST_NEW (cfg, ins, opcode);
5324 ins->dreg = mono_alloc_ireg (cfg);
5325 ins->inst_basereg = args [0]->dreg;
5326 ins->inst_offset = 0;
5327 ins->sreg2 = args [1]->dreg;
5328 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5329 MONO_ADD_INS (cfg->cbb, ins);
5332 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5333 MonoInst *f2i = NULL, *i2f;
5334 guint32 opcode, f2i_opcode, i2f_opcode;
5335 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5336 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5338 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5339 fsig->params [0]->type == MONO_TYPE_R4) {
5340 opcode = OP_ATOMIC_EXCHANGE_I4;
5341 f2i_opcode = OP_MOVE_F_TO_I4;
5342 i2f_opcode = OP_MOVE_I4_TO_F;
5343 cfg->has_atomic_exchange_i4 = TRUE;
5345 #if SIZEOF_REGISTER == 8
5347 fsig->params [0]->type == MONO_TYPE_I8 ||
5348 fsig->params [0]->type == MONO_TYPE_R8 ||
5349 fsig->params [0]->type == MONO_TYPE_I) {
5350 opcode = OP_ATOMIC_EXCHANGE_I8;
5351 f2i_opcode = OP_MOVE_F_TO_I8;
5352 i2f_opcode = OP_MOVE_I8_TO_F;
5355 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5356 opcode = OP_ATOMIC_EXCHANGE_I4;
5357 cfg->has_atomic_exchange_i4 = TRUE;
5363 if (!mono_arch_opcode_supported (opcode))
5367 /* TODO: Decompose these opcodes instead of bailing here. */
5368 if (COMPILE_SOFT_FLOAT (cfg))
5371 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5372 f2i->dreg = mono_alloc_ireg (cfg);
5373 f2i->sreg1 = args [1]->dreg;
5374 if (f2i_opcode == OP_MOVE_F_TO_I4)
5375 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5376 MONO_ADD_INS (cfg->cbb, f2i);
5379 MONO_INST_NEW (cfg, ins, opcode);
5380 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5381 ins->inst_basereg = args [0]->dreg;
5382 ins->inst_offset = 0;
5383 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5384 MONO_ADD_INS (cfg->cbb, ins);
5386 switch (fsig->params [0]->type) {
5388 ins->type = STACK_I4;
5391 ins->type = STACK_I8;
5394 #if SIZEOF_REGISTER == 8
5395 ins->type = STACK_I8;
5397 ins->type = STACK_I4;
5402 ins->type = STACK_R8;
5405 g_assert (mini_type_is_reference (fsig->params [0]));
5406 ins->type = STACK_OBJ;
5411 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5412 i2f->dreg = mono_alloc_freg (cfg);
5413 i2f->sreg1 = ins->dreg;
5414 i2f->type = STACK_R8;
5415 if (i2f_opcode == OP_MOVE_I4_TO_F)
5416 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5417 MONO_ADD_INS (cfg->cbb, i2f);
5422 if (cfg->gen_write_barriers && is_ref)
5423 mini_emit_write_barrier (cfg, args [0], args [1]);
5425 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5426 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5427 guint32 opcode, f2i_opcode, i2f_opcode;
5428 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5429 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5431 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5432 fsig->params [1]->type == MONO_TYPE_R4) {
5433 opcode = OP_ATOMIC_CAS_I4;
5434 f2i_opcode = OP_MOVE_F_TO_I4;
5435 i2f_opcode = OP_MOVE_I4_TO_F;
5436 cfg->has_atomic_cas_i4 = TRUE;
5438 #if SIZEOF_REGISTER == 8
5440 fsig->params [1]->type == MONO_TYPE_I8 ||
5441 fsig->params [1]->type == MONO_TYPE_R8 ||
5442 fsig->params [1]->type == MONO_TYPE_I) {
5443 opcode = OP_ATOMIC_CAS_I8;
5444 f2i_opcode = OP_MOVE_F_TO_I8;
5445 i2f_opcode = OP_MOVE_I8_TO_F;
5448 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5449 opcode = OP_ATOMIC_CAS_I4;
5450 cfg->has_atomic_cas_i4 = TRUE;
5456 if (!mono_arch_opcode_supported (opcode))
5460 /* TODO: Decompose these opcodes instead of bailing here. */
5461 if (COMPILE_SOFT_FLOAT (cfg))
5464 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5465 f2i_new->dreg = mono_alloc_ireg (cfg);
5466 f2i_new->sreg1 = args [1]->dreg;
5467 if (f2i_opcode == OP_MOVE_F_TO_I4)
5468 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5469 MONO_ADD_INS (cfg->cbb, f2i_new);
5471 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5472 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5473 f2i_cmp->sreg1 = args [2]->dreg;
5474 if (f2i_opcode == OP_MOVE_F_TO_I4)
5475 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5476 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5479 MONO_INST_NEW (cfg, ins, opcode);
5480 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5481 ins->sreg1 = args [0]->dreg;
5482 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5483 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5484 MONO_ADD_INS (cfg->cbb, ins);
5486 switch (fsig->params [1]->type) {
5488 ins->type = STACK_I4;
5491 ins->type = STACK_I8;
5494 #if SIZEOF_REGISTER == 8
5495 ins->type = STACK_I8;
5497 ins->type = STACK_I4;
5501 ins->type = cfg->r4_stack_type;
5504 ins->type = STACK_R8;
5507 g_assert (mini_type_is_reference (fsig->params [1]));
5508 ins->type = STACK_OBJ;
5513 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5514 i2f->dreg = mono_alloc_freg (cfg);
5515 i2f->sreg1 = ins->dreg;
5516 i2f->type = STACK_R8;
5517 if (i2f_opcode == OP_MOVE_I4_TO_F)
5518 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5519 MONO_ADD_INS (cfg->cbb, i2f);
5524 if (cfg->gen_write_barriers && is_ref)
5525 mini_emit_write_barrier (cfg, args [0], args [1]);
5527 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5528 fsig->params [1]->type == MONO_TYPE_I4) {
5529 MonoInst *cmp, *ceq;
5531 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5534 /* int32 r = CAS (location, value, comparand); */
5535 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5536 ins->dreg = alloc_ireg (cfg);
5537 ins->sreg1 = args [0]->dreg;
5538 ins->sreg2 = args [1]->dreg;
5539 ins->sreg3 = args [2]->dreg;
5540 ins->type = STACK_I4;
5541 MONO_ADD_INS (cfg->cbb, ins);
5543 /* bool result = r == comparand; */
5544 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5545 cmp->sreg1 = ins->dreg;
5546 cmp->sreg2 = args [2]->dreg;
5547 cmp->type = STACK_I4;
5548 MONO_ADD_INS (cfg->cbb, cmp);
5550 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5551 ceq->dreg = alloc_ireg (cfg);
5552 ceq->type = STACK_I4;
5553 MONO_ADD_INS (cfg->cbb, ceq);
5555 /* *success = result; */
5556 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5558 cfg->has_atomic_cas_i4 = TRUE;
5560 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5561 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5565 } else if (cmethod->klass->image == mono_defaults.corlib &&
5566 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5567 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5570 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5572 MonoType *t = fsig->params [0];
5574 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5576 g_assert (t->byref);
5577 /* t is a byref type, so the reference check is more complicated */
5578 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5579 if (t->type == MONO_TYPE_I1)
5580 opcode = OP_ATOMIC_LOAD_I1;
5581 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5582 opcode = OP_ATOMIC_LOAD_U1;
5583 else if (t->type == MONO_TYPE_I2)
5584 opcode = OP_ATOMIC_LOAD_I2;
5585 else if (t->type == MONO_TYPE_U2)
5586 opcode = OP_ATOMIC_LOAD_U2;
5587 else if (t->type == MONO_TYPE_I4)
5588 opcode = OP_ATOMIC_LOAD_I4;
5589 else if (t->type == MONO_TYPE_U4)
5590 opcode = OP_ATOMIC_LOAD_U4;
5591 else if (t->type == MONO_TYPE_R4)
5592 opcode = OP_ATOMIC_LOAD_R4;
5593 else if (t->type == MONO_TYPE_R8)
5594 opcode = OP_ATOMIC_LOAD_R8;
5595 #if SIZEOF_REGISTER == 8
5596 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5597 opcode = OP_ATOMIC_LOAD_I8;
5598 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5599 opcode = OP_ATOMIC_LOAD_U8;
5601 else if (t->type == MONO_TYPE_I)
5602 opcode = OP_ATOMIC_LOAD_I4;
5603 else if (is_ref || t->type == MONO_TYPE_U)
5604 opcode = OP_ATOMIC_LOAD_U4;
5608 if (!mono_arch_opcode_supported (opcode))
5611 MONO_INST_NEW (cfg, ins, opcode);
5612 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5613 ins->sreg1 = args [0]->dreg;
5614 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5615 MONO_ADD_INS (cfg->cbb, ins);
5618 case MONO_TYPE_BOOLEAN:
5625 ins->type = STACK_I4;
5629 ins->type = STACK_I8;
5633 #if SIZEOF_REGISTER == 8
5634 ins->type = STACK_I8;
5636 ins->type = STACK_I4;
5640 ins->type = cfg->r4_stack_type;
5643 ins->type = STACK_R8;
5647 ins->type = STACK_OBJ;
5653 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5655 MonoType *t = fsig->params [0];
5658 g_assert (t->byref);
5659 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5660 if (t->type == MONO_TYPE_I1)
5661 opcode = OP_ATOMIC_STORE_I1;
5662 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5663 opcode = OP_ATOMIC_STORE_U1;
5664 else if (t->type == MONO_TYPE_I2)
5665 opcode = OP_ATOMIC_STORE_I2;
5666 else if (t->type == MONO_TYPE_U2)
5667 opcode = OP_ATOMIC_STORE_U2;
5668 else if (t->type == MONO_TYPE_I4)
5669 opcode = OP_ATOMIC_STORE_I4;
5670 else if (t->type == MONO_TYPE_U4)
5671 opcode = OP_ATOMIC_STORE_U4;
5672 else if (t->type == MONO_TYPE_R4)
5673 opcode = OP_ATOMIC_STORE_R4;
5674 else if (t->type == MONO_TYPE_R8)
5675 opcode = OP_ATOMIC_STORE_R8;
5676 #if SIZEOF_REGISTER == 8
5677 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5678 opcode = OP_ATOMIC_STORE_I8;
5679 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5680 opcode = OP_ATOMIC_STORE_U8;
5682 else if (t->type == MONO_TYPE_I)
5683 opcode = OP_ATOMIC_STORE_I4;
5684 else if (is_ref || t->type == MONO_TYPE_U)
5685 opcode = OP_ATOMIC_STORE_U4;
5689 if (!mono_arch_opcode_supported (opcode))
5692 MONO_INST_NEW (cfg, ins, opcode);
5693 ins->dreg = args [0]->dreg;
5694 ins->sreg1 = args [1]->dreg;
5695 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5696 MONO_ADD_INS (cfg->cbb, ins);
5698 if (cfg->gen_write_barriers && is_ref)
5699 mini_emit_write_barrier (cfg, args [0], args [1]);
5705 } else if (cmethod->klass->image == mono_defaults.corlib &&
5706 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5707 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5708 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5709 if (mini_should_insert_breakpoint (cfg->method)) {
5710 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5712 MONO_INST_NEW (cfg, ins, OP_NOP);
5713 MONO_ADD_INS (cfg->cbb, ins);
5717 } else if (cmethod->klass->image == mono_defaults.corlib &&
5718 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5719 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5720 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5722 EMIT_NEW_ICONST (cfg, ins, 1);
5724 EMIT_NEW_ICONST (cfg, ins, 0);
5727 } else if (cmethod->klass->image == mono_defaults.corlib &&
5728 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5729 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5730 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5731 /* No stack walks are currently available, so implement this as an intrinsic */
5732 MonoInst *assembly_ins;
5734 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5735 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5738 } else if (cmethod->klass->image == mono_defaults.corlib &&
5739 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5740 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5741 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5742 /* No stack walks are currently available, so implement this as an intrinsic */
5743 MonoInst *method_ins;
5744 MonoMethod *declaring = cfg->method;
5746 /* This returns the declaring generic method */
5747 if (declaring->is_inflated)
5748 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5749 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5750 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5751 cfg->no_inline = TRUE;
5752 if (cfg->method != cfg->current_method)
5753 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5756 } else if (cmethod->klass == mono_defaults.math_class) {
5758 * There is general branchless code for Min/Max, but it does not work for
5760 * http://everything2.com/?node_id=1051618
5762 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5763 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5764 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5765 ins->dreg = alloc_preg (cfg);
5766 ins->type = STACK_I4;
5767 MONO_ADD_INS (cfg->cbb, ins);
5769 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5770 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5771 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5772 !strcmp (cmethod->klass->name, "Selector")) ||
5773 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5774 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5775 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5776 !strcmp (cmethod->klass->name, "Selector"))
5778 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5779 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5780 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5783 MonoJumpInfoToken *ji;
5786 if (args [0]->opcode == OP_GOT_ENTRY) {
5787 pi = (MonoInst *)args [0]->inst_p1;
5788 g_assert (pi->opcode == OP_PATCH_INFO);
5789 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5790 ji = (MonoJumpInfoToken *)pi->inst_p0;
5792 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5793 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
5796 NULLIFY_INS (args [0]);
5798 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
5799 return_val_if_nok (&cfg->error, NULL);
5801 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5802 ins->dreg = mono_alloc_ireg (cfg);
5805 MONO_ADD_INS (cfg->cbb, ins);
5808 } else if (cmethod->klass->image == mono_defaults.corlib &&
5809 (strcmp (cmethod->klass->name_space, "System.Runtime.InteropServices") == 0) &&
5810 (strcmp (cmethod->klass->name, "Marshal") == 0)) {
5811 //Convert Marshal.PtrToStructure<T> of blittable T to direct loads
5812 if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
5813 cmethod->is_inflated &&
5814 fsig->param_count == 1 &&
5815 !mini_method_check_context_used (cfg, cmethod)) {
5817 MonoGenericContext *method_context = mono_method_get_context (cmethod);
5818 MonoType *arg0 = method_context->method_inst->type_argv [0];
5819 if (mono_type_is_native_blittable (arg0))
5820 return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
5824 #ifdef MONO_ARCH_SIMD_INTRINSICS
5825 if (cfg->opt & MONO_OPT_SIMD) {
5826 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5832 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5836 if (COMPILE_LLVM (cfg)) {
5837 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5842 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5846 * This entry point could be used later for arbitrary method
5849 inline static MonoInst*
5850 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5851 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
5853 if (method->klass == mono_defaults.string_class) {
5854 /* managed string allocation support */
5855 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
5856 MonoInst *iargs [2];
5857 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5858 MonoMethod *managed_alloc = NULL;
5860 g_assert (vtable); /*Should not fail since it System.String*/
5861 #ifndef MONO_CROSS_COMPILE
5862 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5866 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5867 iargs [1] = args [0];
5868 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
5875 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5877 MonoInst *store, *temp;
5880 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5881 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5884 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5885 * would be different than the MonoInst's used to represent arguments, and
5886 * the ldelema implementation can't deal with that.
5887 * Solution: When ldelema is used on an inline argument, create a var for
5888 * it, emit ldelema on that var, and emit the saving code below in
5889 * inline_method () if needed.
5891 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5892 cfg->args [i] = temp;
5893 /* This uses cfg->args [i] which is set by the preceeding line */
5894 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5895 store->cil_code = sp [0]->cil_code;
5900 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5901 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5903 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5905 check_inline_called_method_name_limit (MonoMethod *called_method)
5908 static const char *limit = NULL;
5910 if (limit == NULL) {
5911 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5913 if (limit_string != NULL)
5914 limit = limit_string;
5919 if (limit [0] != '\0') {
5920 char *called_method_name = mono_method_full_name (called_method, TRUE);
5922 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5923 g_free (called_method_name);
5925 //return (strncmp_result <= 0);
5926 return (strncmp_result == 0);
5933 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5935 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5938 static const char *limit = NULL;
5940 if (limit == NULL) {
5941 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5942 if (limit_string != NULL) {
5943 limit = limit_string;
5949 if (limit [0] != '\0') {
5950 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5952 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5953 g_free (caller_method_name);
5955 //return (strncmp_result <= 0);
5956 return (strncmp_result == 0);
5964 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5966 static double r8_0 = 0.0;
5967 static float r4_0 = 0.0;
5971 rtype = mini_get_underlying_type (rtype);
5975 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5976 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5977 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5978 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5979 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5980 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
5981 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5982 ins->type = STACK_R4;
5983 ins->inst_p0 = (void*)&r4_0;
5985 MONO_ADD_INS (cfg->cbb, ins);
5986 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5987 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5988 ins->type = STACK_R8;
5989 ins->inst_p0 = (void*)&r8_0;
5991 MONO_ADD_INS (cfg->cbb, ins);
5992 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5993 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5994 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5995 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
5996 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5998 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6003 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6007 rtype = mini_get_underlying_type (rtype);
6011 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6012 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6013 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6014 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6015 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6016 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6017 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6018 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6019 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6020 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6021 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6022 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6023 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6024 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6026 emit_init_rvar (cfg, dreg, rtype);
6030 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6032 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6034 MonoInst *var = cfg->locals [local];
6035 if (COMPILE_SOFT_FLOAT (cfg)) {
6037 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6038 emit_init_rvar (cfg, reg, type);
6039 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6042 emit_init_rvar (cfg, var->dreg, type);
6044 emit_dummy_init_rvar (cfg, var->dreg, type);
6049 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6051 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6057 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6060 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6061 guchar *ip, guint real_offset, gboolean inline_always)
6064 MonoInst *ins, *rvar = NULL;
6065 MonoMethodHeader *cheader;
6066 MonoBasicBlock *ebblock, *sbblock;
6068 MonoMethod *prev_inlined_method;
6069 MonoInst **prev_locals, **prev_args;
6070 MonoType **prev_arg_types;
6071 guint prev_real_offset;
6072 GHashTable *prev_cbb_hash;
6073 MonoBasicBlock **prev_cil_offset_to_bb;
6074 MonoBasicBlock *prev_cbb;
6075 const unsigned char *prev_ip;
6076 unsigned char *prev_cil_start;
6077 guint32 prev_cil_offset_to_bb_len;
6078 MonoMethod *prev_current_method;
6079 MonoGenericContext *prev_generic_context;
6080 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6082 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6084 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6085 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6088 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6089 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6094 fsig = mono_method_signature (cmethod);
6096 if (cfg->verbose_level > 2)
6097 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6099 if (!cmethod->inline_info) {
6100 cfg->stat_inlineable_methods++;
6101 cmethod->inline_info = 1;
6104 /* allocate local variables */
6105 cheader = mono_method_get_header_checked (cmethod, &error);
6107 if (inline_always) {
6108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6109 mono_error_move (&cfg->error, &error);
6111 mono_error_cleanup (&error);
6116 /*Must verify before creating locals as it can cause the JIT to assert.*/
6117 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6118 mono_metadata_free_mh (cheader);
6122 /* allocate space to store the return value */
6123 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6124 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6127 prev_locals = cfg->locals;
6128 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6129 for (i = 0; i < cheader->num_locals; ++i)
6130 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6132 /* allocate start and end blocks */
6133 /* This is needed so if the inline is aborted, we can clean up */
6134 NEW_BBLOCK (cfg, sbblock);
6135 sbblock->real_offset = real_offset;
6137 NEW_BBLOCK (cfg, ebblock);
6138 ebblock->block_num = cfg->num_bblocks++;
6139 ebblock->real_offset = real_offset;
6141 prev_args = cfg->args;
6142 prev_arg_types = cfg->arg_types;
6143 prev_inlined_method = cfg->inlined_method;
6144 cfg->inlined_method = cmethod;
6145 cfg->ret_var_set = FALSE;
6146 cfg->inline_depth ++;
6147 prev_real_offset = cfg->real_offset;
6148 prev_cbb_hash = cfg->cbb_hash;
6149 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6150 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6151 prev_cil_start = cfg->cil_start;
6153 prev_cbb = cfg->cbb;
6154 prev_current_method = cfg->current_method;
6155 prev_generic_context = cfg->generic_context;
6156 prev_ret_var_set = cfg->ret_var_set;
6157 prev_disable_inline = cfg->disable_inline;
6159 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6162 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6164 ret_var_set = cfg->ret_var_set;
6166 cfg->inlined_method = prev_inlined_method;
6167 cfg->real_offset = prev_real_offset;
6168 cfg->cbb_hash = prev_cbb_hash;
6169 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6170 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6171 cfg->cil_start = prev_cil_start;
6173 cfg->locals = prev_locals;
6174 cfg->args = prev_args;
6175 cfg->arg_types = prev_arg_types;
6176 cfg->current_method = prev_current_method;
6177 cfg->generic_context = prev_generic_context;
6178 cfg->ret_var_set = prev_ret_var_set;
6179 cfg->disable_inline = prev_disable_inline;
6180 cfg->inline_depth --;
6182 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6183 if (cfg->verbose_level > 2)
6184 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6186 cfg->stat_inlined_methods++;
6188 /* always add some code to avoid block split failures */
6189 MONO_INST_NEW (cfg, ins, OP_NOP);
6190 MONO_ADD_INS (prev_cbb, ins);
6192 prev_cbb->next_bb = sbblock;
6193 link_bblock (cfg, prev_cbb, sbblock);
6196 * Get rid of the begin and end bblocks if possible to aid local
6199 if (prev_cbb->out_count == 1)
6200 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6202 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6203 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6205 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6206 MonoBasicBlock *prev = ebblock->in_bb [0];
6208 if (prev->next_bb == ebblock) {
6209 mono_merge_basic_blocks (cfg, prev, ebblock);
6211 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6212 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6213 cfg->cbb = prev_cbb;
6216 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6221 * Its possible that the rvar is set in some prev bblock, but not in others.
6227 for (i = 0; i < ebblock->in_count; ++i) {
6228 bb = ebblock->in_bb [i];
6230 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6233 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6243 * If the inlined method contains only a throw, then the ret var is not
6244 * set, so set it to a dummy value.
6247 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6249 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6252 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6255 if (cfg->verbose_level > 2)
6256 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6257 cfg->exception_type = MONO_EXCEPTION_NONE;
6259 /* This gets rid of the newly added bblocks */
6260 cfg->cbb = prev_cbb;
6262 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6267 * Some of these comments may well be out-of-date.
6268 * Design decisions: we do a single pass over the IL code (and we do bblock
6269 * splitting/merging in the few cases when it's required: a back jump to an IL
6270 * address that was not already seen as bblock starting point).
6271 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6272 * Complex operations are decomposed in simpler ones right away. We need to let the
6273 * arch-specific code peek and poke inside this process somehow (except when the
6274 * optimizations can take advantage of the full semantic info of coarse opcodes).
6275 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6276 * MonoInst->opcode initially is the IL opcode or some simplification of that
6277 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6278 * opcode with value bigger than OP_LAST.
6279 * At this point the IR can be handed over to an interpreter, a dumb code generator
6280 * or to the optimizing code generator that will translate it to SSA form.
6282 * Profiling directed optimizations.
6283 * We may compile by default with few or no optimizations and instrument the code
6284 * or the user may indicate what methods to optimize the most either in a config file
6285 * or through repeated runs where the compiler applies offline the optimizations to
6286 * each method and then decides if it was worth it.
6289 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6290 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6291 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6292 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6293 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6294 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6295 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6296 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6298 /* offset from br.s -> br like opcodes */
6299 #define BIG_BRANCH_OFFSET 13
6302 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6304 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6306 return b == NULL || b == bb;
6310 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6312 unsigned char *ip = start;
6313 unsigned char *target;
6316 MonoBasicBlock *bblock;
6317 const MonoOpcode *opcode;
6320 cli_addr = ip - start;
6321 i = mono_opcode_value ((const guint8 **)&ip, end);
6324 opcode = &mono_opcodes [i];
6325 switch (opcode->argument) {
6326 case MonoInlineNone:
6329 case MonoInlineString:
6330 case MonoInlineType:
6331 case MonoInlineField:
6332 case MonoInlineMethod:
6335 case MonoShortInlineR:
6342 case MonoShortInlineVar:
6343 case MonoShortInlineI:
6346 case MonoShortInlineBrTarget:
6347 target = start + cli_addr + 2 + (signed char)ip [1];
6348 GET_BBLOCK (cfg, bblock, target);
6351 GET_BBLOCK (cfg, bblock, ip);
6353 case MonoInlineBrTarget:
6354 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6355 GET_BBLOCK (cfg, bblock, target);
6358 GET_BBLOCK (cfg, bblock, ip);
6360 case MonoInlineSwitch: {
6361 guint32 n = read32 (ip + 1);
6364 cli_addr += 5 + 4 * n;
6365 target = start + cli_addr;
6366 GET_BBLOCK (cfg, bblock, target);
6368 for (j = 0; j < n; ++j) {
6369 target = start + cli_addr + (gint32)read32 (ip);
6370 GET_BBLOCK (cfg, bblock, target);
6380 g_assert_not_reached ();
6383 if (i == CEE_THROW) {
6384 unsigned char *bb_start = ip - 1;
6386 /* Find the start of the bblock containing the throw */
6388 while ((bb_start >= start) && !bblock) {
6389 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6393 bblock->out_of_line = 1;
6403 static inline MonoMethod *
6404 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6410 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6411 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6413 method = mono_class_inflate_generic_method_checked (method, context, error);
6416 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6422 static inline MonoMethod *
6423 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6426 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6428 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6429 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6433 if (!method && !cfg)
6434 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6439 static inline MonoMethodSignature*
6440 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6442 MonoMethodSignature *fsig;
6445 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6446 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6448 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6449 return_val_if_nok (error, NULL);
6452 fsig = mono_inflate_generic_signature(fsig, context, error);
6458 throw_exception (void)
6460 static MonoMethod *method = NULL;
6463 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6464 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6471 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6473 MonoMethod *thrower = throw_exception ();
6476 EMIT_NEW_PCONST (cfg, args [0], ex);
6477 mono_emit_method_call (cfg, thrower, args, NULL);
6481 * Return the original method is a wrapper is specified. We can only access
6482 * the custom attributes from the original method.
6485 get_original_method (MonoMethod *method)
6487 if (method->wrapper_type == MONO_WRAPPER_NONE)
6490 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6491 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6494 /* in other cases we need to find the original method */
6495 return mono_marshal_method_from_wrapper (method);
6499 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6501 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6502 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6504 emit_throw_exception (cfg, ex);
6508 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6510 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6511 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6513 emit_throw_exception (cfg, ex);
6517 * Check that the IL instructions at ip are the array initialization
6518 * sequence and return the pointer to the data and the size.
6521 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6524 * newarr[System.Int32]
6526 * ldtoken field valuetype ...
6527 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6529 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6531 guint32 token = read32 (ip + 7);
6532 guint32 field_token = read32 (ip + 2);
6533 guint32 field_index = field_token & 0xffffff;
6535 const char *data_ptr;
6537 MonoMethod *cmethod;
6538 MonoClass *dummy_class;
6539 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6543 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6547 *out_field_token = field_token;
6549 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6552 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6554 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6558 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6559 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6576 if (size > mono_type_size (field->type, &dummy_align))
6579 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6580 if (!image_is_dynamic (method->klass->image)) {
6581 field_index = read32 (ip + 2) & 0xffffff;
6582 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6583 data_ptr = mono_image_rva_map (method->klass->image, rva);
6584 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6585 /* for aot code we do the lookup on load */
6586 if (aot && data_ptr)
6587 return (const char *)GUINT_TO_POINTER (rva);
6589 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6591 data_ptr = mono_field_get_data (field);
6599 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6602 char *method_fname = mono_method_full_name (method, TRUE);
6604 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6607 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6608 mono_error_cleanup (&error);
6609 } else if (header->code_size == 0)
6610 method_code = g_strdup ("method body is empty.");
6612 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6613 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6614 g_free (method_fname);
6615 g_free (method_code);
6616 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6620 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6623 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6624 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6625 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6626 /* Optimize reg-reg moves away */
6628 * Can't optimize other opcodes, since sp[0] might point to
6629 * the last ins of a decomposed opcode.
6631 sp [0]->dreg = (cfg)->locals [n]->dreg;
6633 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6638 * ldloca inhibits many optimizations so try to get rid of it in common
6641 static inline unsigned char *
6642 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6652 local = read16 (ip + 2);
6656 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6657 /* From the INITOBJ case */
6658 token = read32 (ip + 2);
6659 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6660 CHECK_TYPELOAD (klass);
6661 type = mini_get_underlying_type (&klass->byval_arg);
6662 emit_init_local (cfg, local, type, TRUE);
6670 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6672 MonoInst *icall_args [16];
6673 MonoInst *call_target, *ins, *vtable_ins;
6674 int arg_reg, this_reg, vtable_reg;
6675 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6676 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6677 gboolean variant_iface = FALSE;
6680 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6683 * In llvm-only mode, vtables contain function descriptors instead of
6684 * method addresses/trampolines.
6686 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6689 slot = mono_method_get_imt_slot (cmethod);
6691 slot = mono_method_get_vtable_index (cmethod);
6693 this_reg = sp [0]->dreg;
6695 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6696 variant_iface = TRUE;
6698 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6700 * The simplest case, a normal virtual call.
6702 int slot_reg = alloc_preg (cfg);
6703 int addr_reg = alloc_preg (cfg);
6704 int arg_reg = alloc_preg (cfg);
6705 MonoBasicBlock *non_null_bb;
6707 vtable_reg = alloc_preg (cfg);
6708 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6709 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6711 /* Load the vtable slot, which contains a function descriptor. */
6712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6714 NEW_BBLOCK (cfg, non_null_bb);
6716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6717 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6721 // FIXME: Make the wrapper use the preserveall cconv
6722 // FIXME: Use one icall per slot for small slot numbers ?
6723 icall_args [0] = vtable_ins;
6724 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6725 /* Make the icall return the vtable slot value to save some code space */
6726 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6727 ins->dreg = slot_reg;
6728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6731 MONO_START_BB (cfg, non_null_bb);
6732 /* Load the address + arg from the vtable slot */
6733 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6736 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6739 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6741 * A simple interface call
6743 * We make a call through an imt slot to obtain the function descriptor we need to call.
6744 * The imt slot contains a function descriptor for a runtime function + arg.
6746 int slot_reg = alloc_preg (cfg);
6747 int addr_reg = alloc_preg (cfg);
6748 int arg_reg = alloc_preg (cfg);
6749 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6751 vtable_reg = alloc_preg (cfg);
6752 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6753 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6756 * The slot is already initialized when the vtable is created so there is no need
6760 /* Load the imt slot, which contains a function descriptor. */
6761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6763 /* Load the address + arg of the imt thunk from the imt slot */
6764 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6765 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6767 * IMT thunks in llvm-only mode are C functions which take an info argument
6768 * plus the imt method and return the ftndesc to call.
6770 icall_args [0] = thunk_arg_ins;
6771 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6772 cmethod, MONO_RGCTX_INFO_METHOD);
6773 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6775 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6778 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6780 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6781 * dynamically extended as more instantiations are discovered.
6782 * This handles generic virtual methods both on classes and interfaces.
6784 int slot_reg = alloc_preg (cfg);
6785 int addr_reg = alloc_preg (cfg);
6786 int arg_reg = alloc_preg (cfg);
6787 int ftndesc_reg = alloc_preg (cfg);
6788 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6789 MonoBasicBlock *slowpath_bb, *end_bb;
6791 NEW_BBLOCK (cfg, slowpath_bb);
6792 NEW_BBLOCK (cfg, end_bb);
6794 vtable_reg = alloc_preg (cfg);
6795 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6797 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6799 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6801 /* Load the slot, which contains a function descriptor. */
6802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6804 /* These slots are not initialized, so fall back to the slow path until they are initialized */
6805 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
6806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6807 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6810 /* Same as with iface calls */
6811 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6812 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6813 icall_args [0] = thunk_arg_ins;
6814 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6815 cmethod, MONO_RGCTX_INFO_METHOD);
6816 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6817 ftndesc_ins->dreg = ftndesc_reg;
6819 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
6820 * they don't know about yet. Fall back to the slowpath in that case.
6822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
6823 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
6825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6828 MONO_START_BB (cfg, slowpath_bb);
6829 icall_args [0] = vtable_ins;
6830 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6831 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6832 cmethod, MONO_RGCTX_INFO_METHOD);
6834 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
6836 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
6837 ftndesc_ins->dreg = ftndesc_reg;
6838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6841 MONO_START_BB (cfg, end_bb);
6842 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6846 * Non-optimized cases
6848 icall_args [0] = sp [0];
6849 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6851 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
6852 cmethod, MONO_RGCTX_INFO_METHOD);
6854 arg_reg = alloc_preg (cfg);
6855 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
6856 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
6858 g_assert (is_gsharedvt);
6860 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
6862 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
6865 * Pass the extra argument even if the callee doesn't receive it, most
6866 * calling conventions allow this.
6868 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6872 is_exception_class (MonoClass *klass)
6875 if (klass == mono_defaults.exception_class)
6877 klass = klass->parent;
6883 * is_jit_optimizer_disabled:
6885 * Determine whenever M's assembly has a DebuggableAttribute with the
6886 * IsJITOptimizerDisabled flag set.
6889 is_jit_optimizer_disabled (MonoMethod *m)
6892 MonoAssembly *ass = m->klass->image->assembly;
6893 MonoCustomAttrInfo* attrs;
6896 gboolean val = FALSE;
6899 if (ass->jit_optimizer_disabled_inited)
6900 return ass->jit_optimizer_disabled;
6902 klass = mono_class_try_get_debuggable_attribute_class ();
6906 ass->jit_optimizer_disabled = FALSE;
6907 mono_memory_barrier ();
6908 ass->jit_optimizer_disabled_inited = TRUE;
6912 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
6913 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6915 for (i = 0; i < attrs->num_attrs; ++i) {
6916 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6918 MonoMethodSignature *sig;
6920 if (!attr->ctor || attr->ctor->klass != klass)
6922 /* Decode the attribute. See reflection.c */
6923 p = (const char*)attr->data;
6924 g_assert (read16 (p) == 0x0001);
6927 // FIXME: Support named parameters
6928 sig = mono_method_signature (attr->ctor);
6929 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6931 /* Two boolean arguments */
6935 mono_custom_attrs_free (attrs);
6938 ass->jit_optimizer_disabled = val;
6939 mono_memory_barrier ();
6940 ass->jit_optimizer_disabled_inited = TRUE;
6946 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6948 gboolean supported_tail_call;
6951 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6953 for (i = 0; i < fsig->param_count; ++i) {
6954 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6955 /* These can point to the current method's stack */
6956 supported_tail_call = FALSE;
6958 if (fsig->hasthis && cmethod->klass->valuetype)
6959 /* this might point to the current method's stack */
6960 supported_tail_call = FALSE;
6961 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6962 supported_tail_call = FALSE;
6963 if (cfg->method->save_lmf)
6964 supported_tail_call = FALSE;
6965 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6966 supported_tail_call = FALSE;
6967 if (call_opcode != CEE_CALL)
6968 supported_tail_call = FALSE;
6970 /* Debugging support */
6972 if (supported_tail_call) {
6973 if (!mono_debug_count ())
6974 supported_tail_call = FALSE;
6978 return supported_tail_call;
6984 * Handle calls made to ctors from NEWOBJ opcodes.
6987 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6988 MonoInst **sp, guint8 *ip, int *inline_costs)
6990 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6992 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6993 mono_method_is_generic_sharable (cmethod, TRUE)) {
6994 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6995 mono_class_vtable (cfg->domain, cmethod->klass);
6996 CHECK_TYPELOAD (cmethod->klass);
6998 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6999 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7002 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7003 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7005 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7007 CHECK_TYPELOAD (cmethod->klass);
7008 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7013 /* Avoid virtual calls to ctors if possible */
7014 if (mono_class_is_marshalbyref (cmethod->klass))
7015 callvirt_this_arg = sp [0];
7017 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7018 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7019 CHECK_CFG_EXCEPTION;
7020 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7021 mono_method_check_inlining (cfg, cmethod) &&
7022 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7025 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7026 cfg->real_offset += 5;
7028 *inline_costs += costs - 5;
7030 INLINE_FAILURE ("inline failure");
7031 // FIXME-VT: Clean this up
7032 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7033 GSHAREDVT_FAILURE(*ip);
7034 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7036 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7039 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7041 if (cfg->llvm_only) {
7042 // FIXME: Avoid initializing vtable_arg
7043 emit_llvmonly_calli (cfg, fsig, sp, addr);
7045 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7047 } else if (context_used &&
7048 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7049 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7050 MonoInst *cmethod_addr;
7052 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7054 if (cfg->llvm_only) {
7055 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7056 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7057 emit_llvmonly_calli (cfg, fsig, sp, addr);
7059 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7060 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7062 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7065 INLINE_FAILURE ("ctor call");
7066 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7067 callvirt_this_arg, NULL, vtable_arg);
7074 emit_setret (MonoCompile *cfg, MonoInst *val)
7076 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7079 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7082 if (!cfg->vret_addr) {
7083 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7085 EMIT_NEW_RETLOADA (cfg, ret_addr);
7087 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7088 ins->klass = mono_class_from_mono_type (ret_type);
7091 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7092 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7093 MonoInst *iargs [1];
7097 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7098 mono_arch_emit_setret (cfg, cfg->method, conv);
7100 mono_arch_emit_setret (cfg, cfg->method, val);
7103 mono_arch_emit_setret (cfg, cfg->method, val);
7109 * mono_method_to_ir:
7111 * Translate the .net IL into linear IR.
7113 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7114 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7115 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7116 * @inline_args: if not NULL, contains the arguments to the inline call
7117 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7118 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7120 * This method is used to turn ECMA IL into Mono's internal Linear IR
7121 * reprensetation. It is used both for entire methods, as well as
7122 * inlining existing methods. In the former case, the @start_bblock,
7123 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7124 * inline_offset is set to zero.
7126 * Returns: the inline cost, or -1 if there was an error processing this method.
7129 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7130 MonoInst *return_var, MonoInst **inline_args,
7131 guint inline_offset, gboolean is_virtual_call)
7134 MonoInst *ins, **sp, **stack_start;
7135 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7136 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7137 MonoMethod *cmethod, *method_definition;
7138 MonoInst **arg_array;
7139 MonoMethodHeader *header;
7141 guint32 token, ins_flag;
7143 MonoClass *constrained_class = NULL;
7144 unsigned char *ip, *end, *target, *err_pos;
7145 MonoMethodSignature *sig;
7146 MonoGenericContext *generic_context = NULL;
7147 MonoGenericContainer *generic_container = NULL;
7148 MonoType **param_types;
7149 int i, n, start_new_bblock, dreg;
7150 int num_calls = 0, inline_costs = 0;
7151 int breakpoint_id = 0;
7153 GSList *class_inits = NULL;
7154 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7156 gboolean init_locals, seq_points, skip_dead_blocks;
7157 gboolean sym_seq_points = FALSE;
7158 MonoDebugMethodInfo *minfo;
7159 MonoBitSet *seq_point_locs = NULL;
7160 MonoBitSet *seq_point_set_locs = NULL;
7162 cfg->disable_inline = is_jit_optimizer_disabled (method);
7164 /* serialization and xdomain stuff may need access to private fields and methods */
7165 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7166 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7167 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7168 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7169 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7170 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7172 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7173 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7174 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7175 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7176 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7178 image = method->klass->image;
7179 header = mono_method_get_header_checked (method, &cfg->error);
7181 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7182 goto exception_exit;
7184 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7187 generic_container = mono_method_get_generic_container (method);
7188 sig = mono_method_signature (method);
7189 num_args = sig->hasthis + sig->param_count;
7190 ip = (unsigned char*)header->code;
7191 cfg->cil_start = ip;
7192 end = ip + header->code_size;
7193 cfg->stat_cil_code_size += header->code_size;
7195 seq_points = cfg->gen_seq_points && cfg->method == method;
7197 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7198 /* We could hit a seq point before attaching to the JIT (#8338) */
7202 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7203 minfo = mono_debug_lookup_method (method);
7205 MonoSymSeqPoint *sps;
7206 int i, n_il_offsets;
7208 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7209 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7210 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7211 sym_seq_points = TRUE;
7212 for (i = 0; i < n_il_offsets; ++i) {
7213 if (sps [i].il_offset < header->code_size)
7214 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7218 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7220 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7222 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7223 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7225 mono_debug_free_method_async_debug_info (asyncMethod);
7227 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7228 /* Methods without line number info like auto-generated property accessors */
7229 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7230 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7231 sym_seq_points = TRUE;
7236 * Methods without init_locals set could cause asserts in various passes
7237 * (#497220). To work around this, we emit dummy initialization opcodes
7238 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7239 * on some platforms.
7241 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7242 init_locals = header->init_locals;
7246 method_definition = method;
7247 while (method_definition->is_inflated) {
7248 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7249 method_definition = imethod->declaring;
7252 /* SkipVerification is not allowed if core-clr is enabled */
7253 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7255 dont_verify_stloc = TRUE;
7258 if (sig->is_inflated)
7259 generic_context = mono_method_get_context (method);
7260 else if (generic_container)
7261 generic_context = &generic_container->context;
7262 cfg->generic_context = generic_context;
7265 g_assert (!sig->has_type_parameters);
7267 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7268 g_assert (method->is_inflated);
7269 g_assert (mono_method_get_context (method)->method_inst);
7271 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7272 g_assert (sig->generic_param_count);
7274 if (cfg->method == method) {
7275 cfg->real_offset = 0;
7277 cfg->real_offset = inline_offset;
7280 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7281 cfg->cil_offset_to_bb_len = header->code_size;
7283 cfg->current_method = method;
7285 if (cfg->verbose_level > 2)
7286 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7288 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7290 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7291 for (n = 0; n < sig->param_count; ++n)
7292 param_types [n + sig->hasthis] = sig->params [n];
7293 cfg->arg_types = param_types;
7295 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7296 if (cfg->method == method) {
7298 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7301 NEW_BBLOCK (cfg, start_bblock);
7302 cfg->bb_entry = start_bblock;
7303 start_bblock->cil_code = NULL;
7304 start_bblock->cil_length = 0;
7307 NEW_BBLOCK (cfg, end_bblock);
7308 cfg->bb_exit = end_bblock;
7309 end_bblock->cil_code = NULL;
7310 end_bblock->cil_length = 0;
7311 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7312 g_assert (cfg->num_bblocks == 2);
7314 arg_array = cfg->args;
7316 if (header->num_clauses) {
7317 cfg->spvars = g_hash_table_new (NULL, NULL);
7318 cfg->exvars = g_hash_table_new (NULL, NULL);
7320 /* handle exception clauses */
7321 for (i = 0; i < header->num_clauses; ++i) {
7322 MonoBasicBlock *try_bb;
7323 MonoExceptionClause *clause = &header->clauses [i];
7324 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7326 try_bb->real_offset = clause->try_offset;
7327 try_bb->try_start = TRUE;
7328 try_bb->region = ((i + 1) << 8) | clause->flags;
7329 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7330 tblock->real_offset = clause->handler_offset;
7331 tblock->flags |= BB_EXCEPTION_HANDLER;
7333 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
7334 mono_create_exvar_for_offset (cfg, clause->handler_offset);
7336 * Linking the try block with the EH block hinders inlining as we won't be able to
7337 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7339 if (COMPILE_LLVM (cfg))
7340 link_bblock (cfg, try_bb, tblock);
7342 if (*(ip + clause->handler_offset) == CEE_POP)
7343 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7345 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7346 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7347 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7348 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7349 MONO_ADD_INS (tblock, ins);
7351 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7352 /* finally clauses already have a seq point */
7353 /* seq points for filter clauses are emitted below */
7354 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7355 MONO_ADD_INS (tblock, ins);
7358 /* todo: is a fault block unsafe to optimize? */
7359 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7360 tblock->flags |= BB_EXCEPTION_UNSAFE;
7363 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7365 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7367 /* catch and filter blocks get the exception object on the stack */
7368 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7369 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7371 /* mostly like handle_stack_args (), but just sets the input args */
7372 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7373 tblock->in_scount = 1;
7374 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7375 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7379 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7380 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7381 if (!cfg->compile_llvm) {
7382 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7383 ins->dreg = tblock->in_stack [0]->dreg;
7384 MONO_ADD_INS (tblock, ins);
7387 MonoInst *dummy_use;
7390 * Add a dummy use for the exvar so its liveness info will be
7393 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7396 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7397 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7398 MONO_ADD_INS (tblock, ins);
7401 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7402 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7403 tblock->flags |= BB_EXCEPTION_HANDLER;
7404 tblock->real_offset = clause->data.filter_offset;
7405 tblock->in_scount = 1;
7406 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7407 /* The filter block shares the exvar with the handler block */
7408 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7409 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7410 MONO_ADD_INS (tblock, ins);
7414 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7415 clause->data.catch_class &&
7417 mono_class_check_context_used (clause->data.catch_class)) {
7419 * In shared generic code with catch
7420 * clauses containing type variables
7421 * the exception handling code has to
7422 * be able to get to the rgctx.
7423 * Therefore we have to make sure that
7424 * the vtable/mrgctx argument (for
7425 * static or generic methods) or the
7426 * "this" argument (for non-static
7427 * methods) are live.
7429 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7430 mini_method_get_context (method)->method_inst ||
7431 method->klass->valuetype) {
7432 mono_get_vtable_var (cfg);
7434 MonoInst *dummy_use;
7436 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7441 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7442 cfg->cbb = start_bblock;
7443 cfg->args = arg_array;
7444 mono_save_args (cfg, sig, inline_args);
7447 /* FIRST CODE BLOCK */
7448 NEW_BBLOCK (cfg, tblock);
7449 tblock->cil_code = ip;
7453 ADD_BBLOCK (cfg, tblock);
7455 if (cfg->method == method) {
7456 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7457 if (breakpoint_id) {
7458 MONO_INST_NEW (cfg, ins, OP_BREAK);
7459 MONO_ADD_INS (cfg->cbb, ins);
7463 /* we use a separate basic block for the initialization code */
7464 NEW_BBLOCK (cfg, init_localsbb);
7465 if (cfg->method == method)
7466 cfg->bb_init = init_localsbb;
7467 init_localsbb->real_offset = cfg->real_offset;
7468 start_bblock->next_bb = init_localsbb;
7469 init_localsbb->next_bb = cfg->cbb;
7470 link_bblock (cfg, start_bblock, init_localsbb);
7471 link_bblock (cfg, init_localsbb, cfg->cbb);
7473 cfg->cbb = init_localsbb;
7475 if (cfg->gsharedvt && cfg->method == method) {
7476 MonoGSharedVtMethodInfo *info;
7477 MonoInst *var, *locals_var;
7480 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7481 info->method = cfg->method;
7482 info->count_entries = 16;
7483 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7484 cfg->gsharedvt_info = info;
7486 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7487 /* prevent it from being register allocated */
7488 //var->flags |= MONO_INST_VOLATILE;
7489 cfg->gsharedvt_info_var = var;
7491 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7492 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7494 /* Allocate locals */
7495 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7496 /* prevent it from being register allocated */
7497 //locals_var->flags |= MONO_INST_VOLATILE;
7498 cfg->gsharedvt_locals_var = locals_var;
7500 dreg = alloc_ireg (cfg);
7501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7503 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7504 ins->dreg = locals_var->dreg;
7506 MONO_ADD_INS (cfg->cbb, ins);
7507 cfg->gsharedvt_locals_var_ins = ins;
7509 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7512 ins->flags |= MONO_INST_INIT;
7516 if (mono_security_core_clr_enabled ()) {
7517 /* check if this is native code, e.g. an icall or a p/invoke */
7518 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7519 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7521 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7522 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7524 /* if this ia a native call then it can only be JITted from platform code */
7525 if ((icall || pinvk) && method->klass && method->klass->image) {
7526 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7527 MonoException *ex = icall ? mono_get_exception_security () :
7528 mono_get_exception_method_access ();
7529 emit_throw_exception (cfg, ex);
7536 CHECK_CFG_EXCEPTION;
7538 if (header->code_size == 0)
7541 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7546 if (cfg->method == method)
7547 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7549 for (n = 0; n < header->num_locals; ++n) {
7550 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7555 /* We force the vtable variable here for all shared methods
7556 for the possibility that they might show up in a stack
7557 trace where their exact instantiation is needed. */
7558 if (cfg->gshared && method == cfg->method) {
7559 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7560 mini_method_get_context (method)->method_inst ||
7561 method->klass->valuetype) {
7562 mono_get_vtable_var (cfg);
7564 /* FIXME: Is there a better way to do this?
7565 We need the variable live for the duration
7566 of the whole method. */
7567 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7571 /* add a check for this != NULL to inlined methods */
7572 if (is_virtual_call) {
7575 NEW_ARGLOAD (cfg, arg_ins, 0);
7576 MONO_ADD_INS (cfg->cbb, arg_ins);
7577 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7580 skip_dead_blocks = !dont_verify;
7581 if (skip_dead_blocks) {
7582 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7587 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7588 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7591 start_new_bblock = 0;
7593 if (cfg->method == method)
7594 cfg->real_offset = ip - header->code;
7596 cfg->real_offset = inline_offset;
7601 if (start_new_bblock) {
7602 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7603 if (start_new_bblock == 2) {
7604 g_assert (ip == tblock->cil_code);
7606 GET_BBLOCK (cfg, tblock, ip);
7608 cfg->cbb->next_bb = tblock;
7610 start_new_bblock = 0;
7611 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7612 if (cfg->verbose_level > 3)
7613 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7614 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7618 g_slist_free (class_inits);
7621 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7622 link_bblock (cfg, cfg->cbb, tblock);
7623 if (sp != stack_start) {
7624 handle_stack_args (cfg, stack_start, sp - stack_start);
7626 CHECK_UNVERIFIABLE (cfg);
7628 cfg->cbb->next_bb = tblock;
7630 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7631 if (cfg->verbose_level > 3)
7632 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7633 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7636 g_slist_free (class_inits);
7641 if (skip_dead_blocks) {
7642 int ip_offset = ip - header->code;
7644 if (ip_offset == bb->end)
7648 int op_size = mono_opcode_size (ip, end);
7649 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7651 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7653 if (ip_offset + op_size == bb->end) {
7654 MONO_INST_NEW (cfg, ins, OP_NOP);
7655 MONO_ADD_INS (cfg->cbb, ins);
7656 start_new_bblock = 1;
7664 * Sequence points are points where the debugger can place a breakpoint.
7665 * Currently, we generate these automatically at points where the IL
7668 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7670 * Make methods interruptable at the beginning, and at the targets of
7671 * backward branches.
7672 * Also, do this at the start of every bblock in methods with clauses too,
7673 * to be able to handle instructions with inprecise control flow like
7675 * Backward branches are handled at the end of method-to-ir ().
7677 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7678 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7680 /* Avoid sequence points on empty IL like .volatile */
7681 // FIXME: Enable this
7682 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7683 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7684 if ((sp != stack_start) && !sym_seq_point)
7685 ins->flags |= MONO_INST_NONEMPTY_STACK;
7686 MONO_ADD_INS (cfg->cbb, ins);
7689 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7692 cfg->cbb->real_offset = cfg->real_offset;
7694 if ((cfg->method == method) && cfg->coverage_info) {
7695 guint32 cil_offset = ip - header->code;
7696 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
7697 cfg->coverage_info->data [cil_offset].cil_code = ip;
7699 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
7700 MonoInst *one_ins, *load_ins;
7702 EMIT_NEW_PCONST (cfg, load_ins, counter);
7703 EMIT_NEW_ICONST (cfg, one_ins, 1);
7704 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
7705 ins->dreg = mono_alloc_ireg (cfg);
7706 ins->inst_basereg = load_ins->dreg;
7707 ins->inst_offset = 0;
7708 ins->sreg2 = one_ins->dreg;
7709 ins->type = STACK_I4;
7710 MONO_ADD_INS (cfg->cbb, ins);
7712 EMIT_NEW_PCONST (cfg, ins, counter);
7713 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7717 if (cfg->verbose_level > 3)
7718 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7722 if (seq_points && !sym_seq_points && sp != stack_start) {
7724 * The C# compiler uses these nops to notify the JIT that it should
7725 * insert seq points.
7727 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7728 MONO_ADD_INS (cfg->cbb, ins);
7730 if (cfg->keep_cil_nops)
7731 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7733 MONO_INST_NEW (cfg, ins, OP_NOP);
7735 MONO_ADD_INS (cfg->cbb, ins);
7738 if (mini_should_insert_breakpoint (cfg->method)) {
7739 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7741 MONO_INST_NEW (cfg, ins, OP_NOP);
7744 MONO_ADD_INS (cfg->cbb, ins);
7750 CHECK_STACK_OVF (1);
7751 n = (*ip)-CEE_LDARG_0;
7753 EMIT_NEW_ARGLOAD (cfg, ins, n);
7761 CHECK_STACK_OVF (1);
7762 n = (*ip)-CEE_LDLOC_0;
7764 EMIT_NEW_LOCLOAD (cfg, ins, n);
7773 n = (*ip)-CEE_STLOC_0;
7776 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7778 emit_stloc_ir (cfg, sp, header, n);
7785 CHECK_STACK_OVF (1);
7788 EMIT_NEW_ARGLOAD (cfg, ins, n);
7794 CHECK_STACK_OVF (1);
7797 NEW_ARGLOADA (cfg, ins, n);
7798 MONO_ADD_INS (cfg->cbb, ins);
7808 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7810 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7815 CHECK_STACK_OVF (1);
7818 if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
7819 /* Avoid loading a struct just to load one of its fields */
7820 EMIT_NEW_LOCLOADA (cfg, ins, n);
7822 EMIT_NEW_LOCLOAD (cfg, ins, n);
7827 case CEE_LDLOCA_S: {
7828 unsigned char *tmp_ip;
7830 CHECK_STACK_OVF (1);
7831 CHECK_LOCAL (ip [1]);
7833 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7839 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7848 CHECK_LOCAL (ip [1]);
7849 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7851 emit_stloc_ir (cfg, sp, header, ip [1]);
7856 CHECK_STACK_OVF (1);
7857 EMIT_NEW_PCONST (cfg, ins, NULL);
7858 ins->type = STACK_OBJ;
7863 CHECK_STACK_OVF (1);
7864 EMIT_NEW_ICONST (cfg, ins, -1);
7877 CHECK_STACK_OVF (1);
7878 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7884 CHECK_STACK_OVF (1);
7886 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7892 CHECK_STACK_OVF (1);
7893 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7899 CHECK_STACK_OVF (1);
7900 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7901 ins->type = STACK_I8;
7902 ins->dreg = alloc_dreg (cfg, STACK_I8);
7904 ins->inst_l = (gint64)read64 (ip);
7905 MONO_ADD_INS (cfg->cbb, ins);
7911 gboolean use_aotconst = FALSE;
7913 #ifdef TARGET_POWERPC
7914 /* FIXME: Clean this up */
7915 if (cfg->compile_aot)
7916 use_aotconst = TRUE;
7919 /* FIXME: we should really allocate this only late in the compilation process */
7920 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
7922 CHECK_STACK_OVF (1);
7928 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7930 dreg = alloc_freg (cfg);
7931 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7932 ins->type = cfg->r4_stack_type;
7934 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7935 ins->type = cfg->r4_stack_type;
7936 ins->dreg = alloc_dreg (cfg, STACK_R8);
7938 MONO_ADD_INS (cfg->cbb, ins);
7948 gboolean use_aotconst = FALSE;
7950 #ifdef TARGET_POWERPC
7951 /* FIXME: Clean this up */
7952 if (cfg->compile_aot)
7953 use_aotconst = TRUE;
7956 /* FIXME: we should really allocate this only late in the compilation process */
7957 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
7959 CHECK_STACK_OVF (1);
7965 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7967 dreg = alloc_freg (cfg);
7968 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7969 ins->type = STACK_R8;
7971 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7972 ins->type = STACK_R8;
7973 ins->dreg = alloc_dreg (cfg, STACK_R8);
7975 MONO_ADD_INS (cfg->cbb, ins);
7984 MonoInst *temp, *store;
7986 CHECK_STACK_OVF (1);
7990 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7991 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7993 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7996 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8009 if (sp [0]->type == STACK_R8)
8010 /* we need to pop the value from the x86 FP stack */
8011 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8016 MonoMethodSignature *fsig;
8019 INLINE_FAILURE ("jmp");
8020 GSHAREDVT_FAILURE (*ip);
8023 if (stack_start != sp)
8025 token = read32 (ip + 1);
8026 /* FIXME: check the signature matches */
8027 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8030 if (cfg->gshared && mono_method_check_context_used (cmethod))
8031 GENERIC_SHARING_FAILURE (CEE_JMP);
8033 mini_profiler_emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE, NULL, NULL);
8035 fsig = mono_method_signature (cmethod);
8036 n = fsig->param_count + fsig->hasthis;
8037 if (cfg->llvm_only) {
8040 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8041 for (i = 0; i < n; ++i)
8042 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8043 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8045 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8046 * have to emit a normal return since llvm expects it.
8049 emit_setret (cfg, ins);
8050 MONO_INST_NEW (cfg, ins, OP_BR);
8051 ins->inst_target_bb = end_bblock;
8052 MONO_ADD_INS (cfg->cbb, ins);
8053 link_bblock (cfg, cfg->cbb, end_bblock);
8056 } else if (cfg->backend->have_op_tail_call) {
8057 /* Handle tail calls similarly to calls */
8060 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8061 call->method = cmethod;
8062 call->tail_call = TRUE;
8063 call->signature = mono_method_signature (cmethod);
8064 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8065 call->inst.inst_p0 = cmethod;
8066 for (i = 0; i < n; ++i)
8067 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8069 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8070 call->vret_var = cfg->vret_addr;
8072 mono_arch_emit_call (cfg, call);
8073 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8074 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8076 for (i = 0; i < num_args; ++i)
8077 /* Prevent arguments from being optimized away */
8078 arg_array [i]->flags |= MONO_INST_VOLATILE;
8080 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8081 ins = (MonoInst*)call;
8082 ins->inst_p0 = cmethod;
8083 MONO_ADD_INS (cfg->cbb, ins);
8087 start_new_bblock = 1;
8092 MonoMethodSignature *fsig;
8095 token = read32 (ip + 1);
8099 //GSHAREDVT_FAILURE (*ip);
8104 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8107 if (method->dynamic && fsig->pinvoke) {
8111 * This is a call through a function pointer using a pinvoke
8112 * signature. Have to create a wrapper and call that instead.
8113 * FIXME: This is very slow, need to create a wrapper at JIT time
8114 * instead based on the signature.
8116 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8117 EMIT_NEW_PCONST (cfg, args [1], fsig);
8119 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8122 n = fsig->param_count + fsig->hasthis;
8126 //g_assert (!virtual_ || fsig->hasthis);
8130 inline_costs += 10 * num_calls++;
8133 * Making generic calls out of gsharedvt methods.
8134 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8135 * patching gshared method addresses into a gsharedvt method.
8137 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8139 * We pass the address to the gsharedvt trampoline in the rgctx reg
8141 MonoInst *callee = addr;
8143 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8145 GSHAREDVT_FAILURE (*ip);
8149 GSHAREDVT_FAILURE (*ip);
8151 addr = emit_get_rgctx_sig (cfg, context_used,
8152 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8153 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8157 /* Prevent inlining of methods with indirect calls */
8158 INLINE_FAILURE ("indirect call");
8160 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8161 MonoJumpInfoType info_type;
8165 * Instead of emitting an indirect call, emit a direct call
8166 * with the contents of the aotconst as the patch info.
8168 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8169 info_type = (MonoJumpInfoType)addr->inst_c1;
8170 info_data = addr->inst_p0;
8172 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8173 info_data = addr->inst_right->inst_left;
8176 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8177 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8180 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8181 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8186 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8190 /* End of call, INS should contain the result of the call, if any */
8192 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8194 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8197 CHECK_CFG_EXCEPTION;
8201 constrained_class = NULL;
8205 case CEE_CALLVIRT: {
8206 MonoInst *addr = NULL;
8207 MonoMethodSignature *fsig = NULL;
8209 int virtual_ = *ip == CEE_CALLVIRT;
8210 gboolean pass_imt_from_rgctx = FALSE;
8211 MonoInst *imt_arg = NULL;
8212 MonoInst *keep_this_alive = NULL;
8213 gboolean pass_vtable = FALSE;
8214 gboolean pass_mrgctx = FALSE;
8215 MonoInst *vtable_arg = NULL;
8216 gboolean check_this = FALSE;
8217 gboolean supported_tail_call = FALSE;
8218 gboolean tail_call = FALSE;
8219 gboolean need_seq_point = FALSE;
8220 guint32 call_opcode = *ip;
8221 gboolean emit_widen = TRUE;
8222 gboolean push_res = TRUE;
8223 gboolean skip_ret = FALSE;
8224 gboolean delegate_invoke = FALSE;
8225 gboolean direct_icall = FALSE;
8226 gboolean constrained_partial_call = FALSE;
8227 MonoMethod *cil_method;
8230 token = read32 (ip + 1);
8234 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8237 cil_method = cmethod;
8239 if (constrained_class) {
8240 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8241 if (!mini_is_gsharedvt_klass (constrained_class)) {
8242 g_assert (!cmethod->klass->valuetype);
8243 if (!mini_type_is_reference (&constrained_class->byval_arg))
8244 constrained_partial_call = TRUE;
8248 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8249 if (cfg->verbose_level > 2)
8250 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8251 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8252 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8254 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8258 if (cfg->verbose_level > 2)
8259 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8261 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8263 * This is needed since get_method_constrained can't find
8264 * the method in klass representing a type var.
8265 * The type var is guaranteed to be a reference type in this
8268 if (!mini_is_gsharedvt_klass (constrained_class))
8269 g_assert (!cmethod->klass->valuetype);
8271 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8276 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8277 /* Use the corresponding method from the base type to avoid boxing */
8278 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8279 g_assert (base_type);
8280 constrained_class = mono_class_from_mono_type (base_type);
8281 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8286 if (!dont_verify && !cfg->skip_visibility) {
8287 MonoMethod *target_method = cil_method;
8288 if (method->is_inflated) {
8289 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8292 if (!mono_method_can_access_method (method_definition, target_method) &&
8293 !mono_method_can_access_method (method, cil_method))
8294 emit_method_access_failure (cfg, method, cil_method);
8297 if (mono_security_core_clr_enabled ())
8298 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8300 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8301 /* MS.NET seems to silently convert this to a callvirt */
8306 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8307 * converts to a callvirt.
8309 * tests/bug-515884.il is an example of this behavior
8311 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8312 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8313 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8317 if (!cmethod->klass->inited)
8318 if (!mono_class_init (cmethod->klass))
8319 TYPE_LOAD_ERROR (cmethod->klass);
8321 fsig = mono_method_signature (cmethod);
8324 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8325 mini_class_is_system_array (cmethod->klass)) {
8326 array_rank = cmethod->klass->rank;
8327 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8328 direct_icall = TRUE;
8329 } else if (fsig->pinvoke) {
8330 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8331 fsig = mono_method_signature (wrapper);
8332 } else if (constrained_class) {
8334 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8338 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8339 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8341 /* See code below */
8342 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8343 MonoBasicBlock *tbb;
8345 GET_BBLOCK (cfg, tbb, ip + 5);
8346 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8348 * We want to extend the try block to cover the call, but we can't do it if the
8349 * call is made directly since its followed by an exception check.
8351 direct_icall = FALSE;
8355 mono_save_token_info (cfg, image, token, cil_method);
8357 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8358 need_seq_point = TRUE;
8360 /* Don't support calls made using type arguments for now */
8362 if (cfg->gsharedvt) {
8363 if (mini_is_gsharedvt_signature (fsig))
8364 GSHAREDVT_FAILURE (*ip);
8368 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8369 g_assert_not_reached ();
8371 n = fsig->param_count + fsig->hasthis;
8373 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8377 g_assert (!mono_method_check_context_used (cmethod));
8381 //g_assert (!virtual_ || fsig->hasthis);
8385 if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8386 cfg->cbb->out_of_line = TRUE;
8389 * We have the `constrained.' prefix opcode.
8391 if (constrained_class) {
8392 if (mini_is_gsharedvt_klass (constrained_class)) {
8393 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8394 /* The 'Own method' case below */
8395 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8396 /* 'The type parameter is instantiated as a reference type' case below. */
8398 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8399 CHECK_CFG_EXCEPTION;
8405 if (constrained_partial_call) {
8406 gboolean need_box = TRUE;
8409 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8410 * called method is not known at compile time either. The called method could end up being
8411 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8412 * to box the receiver.
8413 * A simple solution would be to box always and make a normal virtual call, but that would
8414 * be bad performance wise.
8416 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8418 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8423 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8424 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8425 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8426 ins->klass = constrained_class;
8427 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8428 CHECK_CFG_EXCEPTION;
8429 } else if (need_box) {
8431 MonoBasicBlock *is_ref_bb, *end_bb;
8432 MonoInst *nonbox_call;
8435 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8437 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8438 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8440 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8442 NEW_BBLOCK (cfg, is_ref_bb);
8443 NEW_BBLOCK (cfg, end_bb);
8445 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8450 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8455 MONO_START_BB (cfg, is_ref_bb);
8456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8457 ins->klass = constrained_class;
8458 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8459 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8463 MONO_START_BB (cfg, end_bb);
8466 nonbox_call->dreg = ins->dreg;
8469 g_assert (mono_class_is_interface (cmethod->klass));
8470 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8471 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8474 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8476 * The type parameter is instantiated as a valuetype,
8477 * but that type doesn't override the method we're
8478 * calling, so we need to box `this'.
8480 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8481 ins->klass = constrained_class;
8482 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8483 CHECK_CFG_EXCEPTION;
8484 } else if (!constrained_class->valuetype) {
8485 int dreg = alloc_ireg_ref (cfg);
8488 * The type parameter is instantiated as a reference
8489 * type. We have a managed pointer on the stack, so
8490 * we need to dereference it here.
8492 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8493 ins->type = STACK_OBJ;
8496 if (cmethod->klass->valuetype) {
8499 /* Interface method */
8502 mono_class_setup_vtable (constrained_class);
8503 CHECK_TYPELOAD (constrained_class);
8504 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8506 TYPE_LOAD_ERROR (constrained_class);
8507 slot = mono_method_get_vtable_slot (cmethod);
8509 TYPE_LOAD_ERROR (cmethod->klass);
8510 cmethod = constrained_class->vtable [ioffset + slot];
8512 if (cmethod->klass == mono_defaults.enum_class) {
8513 /* Enum implements some interfaces, so treat this as the first case */
8514 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8515 ins->klass = constrained_class;
8516 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8517 CHECK_CFG_EXCEPTION;
8522 constrained_class = NULL;
8525 if (check_call_signature (cfg, fsig, sp))
8528 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8529 delegate_invoke = TRUE;
8531 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8532 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8533 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8541 * If the callee is a shared method, then its static cctor
8542 * might not get called after the call was patched.
8544 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8545 emit_class_init (cfg, cmethod->klass);
8546 CHECK_TYPELOAD (cmethod->klass);
8549 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8552 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8554 context_used = mini_method_check_context_used (cfg, cmethod);
8556 if (context_used && mono_class_is_interface (cmethod->klass)) {
8557 /* Generic method interface
8558 calls are resolved via a
8559 helper function and don't
8561 if (!cmethod_context || !cmethod_context->method_inst)
8562 pass_imt_from_rgctx = TRUE;
8566 * If a shared method calls another
8567 * shared method then the caller must
8568 * have a generic sharing context
8569 * because the magic trampoline
8570 * requires it. FIXME: We shouldn't
8571 * have to force the vtable/mrgctx
8572 * variable here. Instead there
8573 * should be a flag in the cfg to
8574 * request a generic sharing context.
8577 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8578 mono_get_vtable_var (cfg);
8583 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8585 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8587 CHECK_TYPELOAD (cmethod->klass);
8588 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8593 g_assert (!vtable_arg);
8595 if (!cfg->compile_aot) {
8597 * emit_get_rgctx_method () calls mono_class_vtable () so check
8598 * for type load errors before.
8600 mono_class_setup_vtable (cmethod->klass);
8601 CHECK_TYPELOAD (cmethod->klass);
8604 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8606 /* !marshalbyref is needed to properly handle generic methods + remoting */
8607 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8608 MONO_METHOD_IS_FINAL (cmethod)) &&
8609 !mono_class_is_marshalbyref (cmethod->klass)) {
8616 if (pass_imt_from_rgctx) {
8617 g_assert (!pass_vtable);
8619 imt_arg = emit_get_rgctx_method (cfg, context_used,
8620 cmethod, MONO_RGCTX_INFO_METHOD);
8624 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8626 /* Calling virtual generic methods */
8627 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8628 !(MONO_METHOD_IS_FINAL (cmethod) &&
8629 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8630 fsig->generic_param_count &&
8631 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8633 MonoInst *this_temp, *this_arg_temp, *store;
8634 MonoInst *iargs [4];
8636 g_assert (fsig->is_inflated);
8638 /* Prevent inlining of methods that contain indirect calls */
8639 INLINE_FAILURE ("virtual generic call");
8641 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8642 GSHAREDVT_FAILURE (*ip);
8644 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8645 g_assert (!imt_arg);
8647 g_assert (cmethod->is_inflated);
8648 imt_arg = emit_get_rgctx_method (cfg, context_used,
8649 cmethod, MONO_RGCTX_INFO_METHOD);
8650 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8652 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8653 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8654 MONO_ADD_INS (cfg->cbb, store);
8656 /* FIXME: This should be a managed pointer */
8657 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8659 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8660 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8661 cmethod, MONO_RGCTX_INFO_METHOD);
8662 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8663 addr = mono_emit_jit_icall (cfg,
8664 mono_helper_compile_generic_method, iargs);
8666 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8668 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8675 * Implement a workaround for the inherent races involved in locking:
8681 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8682 * try block, the Exit () won't be executed, see:
8683 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8684 * To work around this, we extend such try blocks to include the last x bytes
8685 * of the Monitor.Enter () call.
8687 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8688 MonoBasicBlock *tbb;
8690 GET_BBLOCK (cfg, tbb, ip + 5);
8692 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8693 * from Monitor.Enter like ArgumentNullException.
8695 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8696 /* Mark this bblock as needing to be extended */
8697 tbb->extend_try_block = TRUE;
8701 /* Conversion to a JIT intrinsic */
8702 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8703 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8704 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8712 if ((cfg->opt & MONO_OPT_INLINE) &&
8713 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8714 mono_method_check_inlining (cfg, cmethod)) {
8716 gboolean always = FALSE;
8718 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8719 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8720 /* Prevent inlining of methods that call wrappers */
8721 INLINE_FAILURE ("wrapper call");
8722 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8726 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8728 cfg->real_offset += 5;
8730 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8731 /* *sp is already set by inline_method */
8736 inline_costs += costs;
8742 /* Tail recursion elimination */
8743 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8744 gboolean has_vtargs = FALSE;
8747 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8748 INLINE_FAILURE ("tail call");
8750 /* keep it simple */
8751 for (i = fsig->param_count - 1; i >= 0; i--) {
8752 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8757 if (need_seq_point) {
8758 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8759 need_seq_point = FALSE;
8761 for (i = 0; i < n; ++i)
8762 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8763 MONO_INST_NEW (cfg, ins, OP_BR);
8764 MONO_ADD_INS (cfg->cbb, ins);
8765 tblock = start_bblock->out_bb [0];
8766 link_bblock (cfg, cfg->cbb, tblock);
8767 ins->inst_target_bb = tblock;
8768 start_new_bblock = 1;
8770 /* skip the CEE_RET, too */
8771 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8778 inline_costs += 10 * num_calls++;
8781 * Synchronized wrappers.
8782 * Its hard to determine where to replace a method with its synchronized
8783 * wrapper without causing an infinite recursion. The current solution is
8784 * to add the synchronized wrapper in the trampolines, and to
8785 * change the called method to a dummy wrapper, and resolve that wrapper
8786 * to the real method in mono_jit_compile_method ().
8788 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8789 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8790 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8791 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8795 * Making generic calls out of gsharedvt methods.
8796 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8797 * patching gshared method addresses into a gsharedvt method.
8799 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
8800 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
8801 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
8802 MonoRgctxInfoType info_type;
8805 //if (mono_class_is_interface (cmethod->klass))
8806 //GSHAREDVT_FAILURE (*ip);
8807 // disable for possible remoting calls
8808 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8809 GSHAREDVT_FAILURE (*ip);
8810 if (fsig->generic_param_count) {
8811 /* virtual generic call */
8812 g_assert (!imt_arg);
8813 /* Same as the virtual generic case above */
8814 imt_arg = emit_get_rgctx_method (cfg, context_used,
8815 cmethod, MONO_RGCTX_INFO_METHOD);
8816 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8818 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
8819 /* This can happen when we call a fully instantiated iface method */
8820 imt_arg = emit_get_rgctx_method (cfg, context_used,
8821 cmethod, MONO_RGCTX_INFO_METHOD);
8826 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8827 keep_this_alive = sp [0];
8829 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8830 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8832 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8833 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8835 if (cfg->llvm_only) {
8836 // FIXME: Avoid initializing vtable_arg
8837 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8839 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8844 /* Generic sharing */
8847 * Use this if the callee is gsharedvt sharable too, since
8848 * at runtime we might find an instantiation so the call cannot
8849 * be patched (the 'no_patch' code path in mini-trampolines.c).
8851 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8852 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8853 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8854 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
8855 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8856 INLINE_FAILURE ("gshared");
8858 g_assert (cfg->gshared && cmethod);
8862 * We are compiling a call to a
8863 * generic method from shared code,
8864 * which means that we have to look up
8865 * the method in the rgctx and do an
8869 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8871 if (cfg->llvm_only) {
8872 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
8873 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
8875 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8876 // FIXME: Avoid initializing imt_arg/vtable_arg
8877 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
8879 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8880 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8885 /* Direct calls to icalls */
8887 MonoMethod *wrapper;
8890 /* Inline the wrapper */
8891 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8893 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
8894 g_assert (costs > 0);
8895 cfg->real_offset += 5;
8897 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8898 /* *sp is already set by inline_method */
8903 inline_costs += costs;
8912 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8913 MonoInst *val = sp [fsig->param_count];
8915 if (val->type == STACK_OBJ) {
8916 MonoInst *iargs [2];
8921 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8924 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8925 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8926 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
8927 mini_emit_write_barrier (cfg, addr, val);
8928 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
8929 GSHAREDVT_FAILURE (*ip);
8930 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8931 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8934 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8935 if (!cmethod->klass->element_class->valuetype && !readonly)
8936 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8937 CHECK_TYPELOAD (cmethod->klass);
8940 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8943 g_assert_not_reached ();
8950 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
8954 /* Tail prefix / tail call optimization */
8956 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8957 /* FIXME: runtime generic context pointer for jumps? */
8958 /* FIXME: handle this for generic sharing eventually */
8959 if ((ins_flag & MONO_INST_TAILCALL) &&
8960 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8961 supported_tail_call = TRUE;
8963 if (supported_tail_call) {
8966 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8967 INLINE_FAILURE ("tail call");
8969 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8971 if (cfg->backend->have_op_tail_call) {
8972 /* Handle tail calls similarly to normal calls */
8975 mini_profiler_emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE, NULL, NULL);
8977 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8978 call->tail_call = TRUE;
8979 call->method = cmethod;
8980 call->signature = mono_method_signature (cmethod);
8983 * We implement tail calls by storing the actual arguments into the
8984 * argument variables, then emitting a CEE_JMP.
8986 for (i = 0; i < n; ++i) {
8987 /* Prevent argument from being register allocated */
8988 arg_array [i]->flags |= MONO_INST_VOLATILE;
8989 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8991 ins = (MonoInst*)call;
8992 ins->inst_p0 = cmethod;
8993 ins->inst_p1 = arg_array [0];
8994 MONO_ADD_INS (cfg->cbb, ins);
8995 link_bblock (cfg, cfg->cbb, end_bblock);
8996 start_new_bblock = 1;
8998 // FIXME: Eliminate unreachable epilogs
9001 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9002 * only reachable from this call.
9004 GET_BBLOCK (cfg, tblock, ip + 5);
9005 if (tblock == cfg->cbb || tblock->in_count == 0)
9014 * Virtual calls in llvm-only mode.
9016 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9017 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9022 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9023 INLINE_FAILURE ("call");
9024 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9025 imt_arg, vtable_arg);
9027 if (tail_call && !cfg->llvm_only) {
9028 link_bblock (cfg, cfg->cbb, end_bblock);
9029 start_new_bblock = 1;
9031 // FIXME: Eliminate unreachable epilogs
9034 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9035 * only reachable from this call.
9037 GET_BBLOCK (cfg, tblock, ip + 5);
9038 if (tblock == cfg->cbb || tblock->in_count == 0)
9045 /* End of call, INS should contain the result of the call, if any */
9047 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9050 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9055 if (keep_this_alive) {
9056 MonoInst *dummy_use;
9058 /* See mono_emit_method_call_full () */
9059 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9062 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9064 * Clang can convert these calls to tail calls which screw up the stack
9065 * walk. This happens even when the -fno-optimize-sibling-calls
9066 * option is passed to clang.
9067 * Work around this by emitting a dummy call.
9069 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9072 CHECK_CFG_EXCEPTION;
9076 g_assert (*ip == CEE_RET);
9080 constrained_class = NULL;
9082 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9086 mini_profiler_emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE, sp - 1, sig->ret);
9088 if (cfg->method != method) {
9089 /* return from inlined method */
9091 * If in_count == 0, that means the ret is unreachable due to
9092 * being preceeded by a throw. In that case, inline_method () will
9093 * handle setting the return value
9094 * (test case: test_0_inline_throw ()).
9096 if (return_var && cfg->cbb->in_count) {
9097 MonoType *ret_type = mono_method_signature (method)->ret;
9103 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9106 //g_assert (returnvar != -1);
9107 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9108 cfg->ret_var_set = TRUE;
9111 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9115 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9117 if (seq_points && !sym_seq_points) {
9119 * Place a seq point here too even through the IL stack is not
9120 * empty, so a step over on
9123 * will work correctly.
9125 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9126 MONO_ADD_INS (cfg->cbb, ins);
9129 g_assert (!return_var);
9133 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9136 emit_setret (cfg, *sp);
9139 if (sp != stack_start)
9141 MONO_INST_NEW (cfg, ins, OP_BR);
9143 ins->inst_target_bb = end_bblock;
9144 MONO_ADD_INS (cfg->cbb, ins);
9145 link_bblock (cfg, cfg->cbb, end_bblock);
9146 start_new_bblock = 1;
9150 MONO_INST_NEW (cfg, ins, OP_BR);
9152 target = ip + 1 + (signed char)(*ip);
9154 GET_BBLOCK (cfg, tblock, target);
9155 link_bblock (cfg, cfg->cbb, tblock);
9156 ins->inst_target_bb = tblock;
9157 if (sp != stack_start) {
9158 handle_stack_args (cfg, stack_start, sp - stack_start);
9160 CHECK_UNVERIFIABLE (cfg);
9162 MONO_ADD_INS (cfg->cbb, ins);
9163 start_new_bblock = 1;
9164 inline_costs += BRANCH_COST;
9178 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9180 target = ip + 1 + *(signed char*)ip;
9186 inline_costs += BRANCH_COST;
9190 MONO_INST_NEW (cfg, ins, OP_BR);
9193 target = ip + 4 + (gint32)read32(ip);
9195 GET_BBLOCK (cfg, tblock, target);
9196 link_bblock (cfg, cfg->cbb, tblock);
9197 ins->inst_target_bb = tblock;
9198 if (sp != stack_start) {
9199 handle_stack_args (cfg, stack_start, sp - stack_start);
9201 CHECK_UNVERIFIABLE (cfg);
9204 MONO_ADD_INS (cfg->cbb, ins);
9206 start_new_bblock = 1;
9207 inline_costs += BRANCH_COST;
9214 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9215 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9216 guint32 opsize = is_short ? 1 : 4;
9218 CHECK_OPSIZE (opsize);
9220 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9223 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9228 GET_BBLOCK (cfg, tblock, target);
9229 link_bblock (cfg, cfg->cbb, tblock);
9230 GET_BBLOCK (cfg, tblock, ip);
9231 link_bblock (cfg, cfg->cbb, tblock);
9233 if (sp != stack_start) {
9234 handle_stack_args (cfg, stack_start, sp - stack_start);
9235 CHECK_UNVERIFIABLE (cfg);
9238 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9239 cmp->sreg1 = sp [0]->dreg;
9240 type_from_op (cfg, cmp, sp [0], NULL);
9243 #if SIZEOF_REGISTER == 4
9244 if (cmp->opcode == OP_LCOMPARE_IMM) {
9245 /* Convert it to OP_LCOMPARE */
9246 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9247 ins->type = STACK_I8;
9248 ins->dreg = alloc_dreg (cfg, STACK_I8);
9250 MONO_ADD_INS (cfg->cbb, ins);
9251 cmp->opcode = OP_LCOMPARE;
9252 cmp->sreg2 = ins->dreg;
9255 MONO_ADD_INS (cfg->cbb, cmp);
9257 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9258 type_from_op (cfg, ins, sp [0], NULL);
9259 MONO_ADD_INS (cfg->cbb, ins);
9260 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9261 GET_BBLOCK (cfg, tblock, target);
9262 ins->inst_true_bb = tblock;
9263 GET_BBLOCK (cfg, tblock, ip);
9264 ins->inst_false_bb = tblock;
9265 start_new_bblock = 2;
9268 inline_costs += BRANCH_COST;
9283 MONO_INST_NEW (cfg, ins, *ip);
9285 target = ip + 4 + (gint32)read32(ip);
9291 inline_costs += BRANCH_COST;
9295 MonoBasicBlock **targets;
9296 MonoBasicBlock *default_bblock;
9297 MonoJumpInfoBBTable *table;
9298 int offset_reg = alloc_preg (cfg);
9299 int target_reg = alloc_preg (cfg);
9300 int table_reg = alloc_preg (cfg);
9301 int sum_reg = alloc_preg (cfg);
9302 gboolean use_op_switch;
9306 n = read32 (ip + 1);
9309 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9313 CHECK_OPSIZE (n * sizeof (guint32));
9314 target = ip + n * sizeof (guint32);
9316 GET_BBLOCK (cfg, default_bblock, target);
9317 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9319 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9320 for (i = 0; i < n; ++i) {
9321 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9322 targets [i] = tblock;
9323 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9327 if (sp != stack_start) {
9329 * Link the current bb with the targets as well, so handle_stack_args
9330 * will set their in_stack correctly.
9332 link_bblock (cfg, cfg->cbb, default_bblock);
9333 for (i = 0; i < n; ++i)
9334 link_bblock (cfg, cfg->cbb, targets [i]);
9336 handle_stack_args (cfg, stack_start, sp - stack_start);
9338 CHECK_UNVERIFIABLE (cfg);
9340 /* Undo the links */
9341 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9342 for (i = 0; i < n; ++i)
9343 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9349 for (i = 0; i < n; ++i)
9350 link_bblock (cfg, cfg->cbb, targets [i]);
9352 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9353 table->table = targets;
9354 table->table_size = n;
9356 use_op_switch = FALSE;
9358 /* ARM implements SWITCH statements differently */
9359 /* FIXME: Make it use the generic implementation */
9360 if (!cfg->compile_aot)
9361 use_op_switch = TRUE;
9364 if (COMPILE_LLVM (cfg))
9365 use_op_switch = TRUE;
9367 cfg->cbb->has_jump_table = 1;
9369 if (use_op_switch) {
9370 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9371 ins->sreg1 = src1->dreg;
9372 ins->inst_p0 = table;
9373 ins->inst_many_bb = targets;
9374 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9375 MONO_ADD_INS (cfg->cbb, ins);
9377 if (sizeof (gpointer) == 8)
9378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9382 #if SIZEOF_REGISTER == 8
9383 /* The upper word might not be zero, and we add it to a 64 bit address later */
9384 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9387 if (cfg->compile_aot) {
9388 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9390 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9391 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9392 ins->inst_p0 = table;
9393 ins->dreg = table_reg;
9394 MONO_ADD_INS (cfg->cbb, ins);
9397 /* FIXME: Use load_memindex */
9398 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9400 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9402 start_new_bblock = 1;
9403 inline_costs += (BRANCH_COST * 2);
9420 ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9436 if (ins_flag & MONO_INST_VOLATILE) {
9437 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9438 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9441 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9442 ins->flags |= ins_flag;
9445 MONO_ADD_INS (cfg->cbb, ins);
9447 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9448 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9457 MONO_INST_NEW (cfg, ins, (*ip));
9459 ins->sreg1 = sp [0]->dreg;
9460 ins->sreg2 = sp [1]->dreg;
9461 type_from_op (cfg, ins, sp [0], sp [1]);
9463 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9465 /* Use the immediate opcodes if possible */
9466 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9467 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9468 if (imm_opcode != -1) {
9469 ins->opcode = imm_opcode;
9470 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9473 NULLIFY_INS (sp [1]);
9477 MONO_ADD_INS ((cfg)->cbb, (ins));
9479 *sp++ = mono_decompose_opcode (cfg, ins);
9496 MONO_INST_NEW (cfg, ins, (*ip));
9498 ins->sreg1 = sp [0]->dreg;
9499 ins->sreg2 = sp [1]->dreg;
9500 type_from_op (cfg, ins, sp [0], sp [1]);
9502 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9503 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9505 /* FIXME: Pass opcode to is_inst_imm */
9507 /* Use the immediate opcodes if possible */
9508 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9509 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9510 if (imm_opcode != -1) {
9511 ins->opcode = imm_opcode;
9512 if (sp [1]->opcode == OP_I8CONST) {
9513 #if SIZEOF_REGISTER == 8
9514 ins->inst_imm = sp [1]->inst_l;
9516 ins->inst_ls_word = sp [1]->inst_ls_word;
9517 ins->inst_ms_word = sp [1]->inst_ms_word;
9521 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9524 /* Might be followed by an instruction added by add_widen_op */
9525 if (sp [1]->next == NULL)
9526 NULLIFY_INS (sp [1]);
9529 MONO_ADD_INS ((cfg)->cbb, (ins));
9531 *sp++ = mono_decompose_opcode (cfg, ins);
9544 case CEE_CONV_OVF_I8:
9545 case CEE_CONV_OVF_U8:
9549 /* Special case this earlier so we have long constants in the IR */
9550 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9551 int data = sp [-1]->inst_c0;
9552 sp [-1]->opcode = OP_I8CONST;
9553 sp [-1]->type = STACK_I8;
9554 #if SIZEOF_REGISTER == 8
9555 if ((*ip) == CEE_CONV_U8)
9556 sp [-1]->inst_c0 = (guint32)data;
9558 sp [-1]->inst_c0 = data;
9560 sp [-1]->inst_ls_word = data;
9561 if ((*ip) == CEE_CONV_U8)
9562 sp [-1]->inst_ms_word = 0;
9564 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9566 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9573 case CEE_CONV_OVF_I4:
9574 case CEE_CONV_OVF_I1:
9575 case CEE_CONV_OVF_I2:
9576 case CEE_CONV_OVF_I:
9577 case CEE_CONV_OVF_U:
9580 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9581 ADD_UNOP (CEE_CONV_OVF_I8);
9588 case CEE_CONV_OVF_U1:
9589 case CEE_CONV_OVF_U2:
9590 case CEE_CONV_OVF_U4:
9593 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9594 ADD_UNOP (CEE_CONV_OVF_U8);
9601 case CEE_CONV_OVF_I1_UN:
9602 case CEE_CONV_OVF_I2_UN:
9603 case CEE_CONV_OVF_I4_UN:
9604 case CEE_CONV_OVF_I8_UN:
9605 case CEE_CONV_OVF_U1_UN:
9606 case CEE_CONV_OVF_U2_UN:
9607 case CEE_CONV_OVF_U4_UN:
9608 case CEE_CONV_OVF_U8_UN:
9609 case CEE_CONV_OVF_I_UN:
9610 case CEE_CONV_OVF_U_UN:
9617 CHECK_CFG_EXCEPTION;
9621 case CEE_ADD_OVF_UN:
9623 case CEE_MUL_OVF_UN:
9625 case CEE_SUB_OVF_UN:
9631 GSHAREDVT_FAILURE (*ip);
9634 token = read32 (ip + 1);
9635 klass = mini_get_class (method, token, generic_context);
9636 CHECK_TYPELOAD (klass);
9638 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9649 token = read32 (ip + 1);
9650 klass = mini_get_class (method, token, generic_context);
9651 CHECK_TYPELOAD (klass);
9653 /* Optimize the common ldobj+stloc combination */
9663 loc_index = ip [5] - CEE_STLOC_0;
9670 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9671 CHECK_LOCAL (loc_index);
9673 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9674 ins->dreg = cfg->locals [loc_index]->dreg;
9675 ins->flags |= ins_flag;
9678 if (ins_flag & MONO_INST_VOLATILE) {
9679 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9680 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9686 /* Optimize the ldobj+stobj combination */
9687 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token)) {
9692 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
9699 ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9708 CHECK_STACK_OVF (1);
9710 n = read32 (ip + 1);
9712 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9713 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9714 ins->type = STACK_OBJ;
9717 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9718 MonoInst *iargs [1];
9719 char *str = (char *)mono_method_get_wrapper_data (method, n);
9721 if (cfg->compile_aot)
9722 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9724 EMIT_NEW_PCONST (cfg, iargs [0], str);
9725 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9727 if (cfg->opt & MONO_OPT_SHARED) {
9728 MonoInst *iargs [3];
9730 if (cfg->compile_aot) {
9731 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9733 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9734 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9735 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9736 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9737 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9740 if (cfg->cbb->out_of_line) {
9741 MonoInst *iargs [2];
9743 if (image == mono_defaults.corlib) {
9745 * Avoid relocations in AOT and save some space by using a
9746 * version of helper_ldstr specialized to mscorlib.
9748 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9749 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9751 /* Avoid creating the string object */
9752 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9753 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9754 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9758 if (cfg->compile_aot) {
9759 NEW_LDSTRCONST (cfg, ins, image, n);
9761 MONO_ADD_INS (cfg->cbb, ins);
9764 NEW_PCONST (cfg, ins, NULL);
9765 ins->type = STACK_OBJ;
9766 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9770 OUT_OF_MEMORY_FAILURE;
9773 MONO_ADD_INS (cfg->cbb, ins);
9782 MonoInst *iargs [2];
9783 MonoMethodSignature *fsig;
9786 MonoInst *vtable_arg = NULL;
9789 token = read32 (ip + 1);
9790 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9793 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9796 mono_save_token_info (cfg, image, token, cmethod);
9798 if (!mono_class_init (cmethod->klass))
9799 TYPE_LOAD_ERROR (cmethod->klass);
9801 context_used = mini_method_check_context_used (cfg, cmethod);
9803 if (!dont_verify && !cfg->skip_visibility) {
9804 MonoMethod *cil_method = cmethod;
9805 MonoMethod *target_method = cil_method;
9807 if (method->is_inflated) {
9808 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9812 if (!mono_method_can_access_method (method_definition, target_method) &&
9813 !mono_method_can_access_method (method, cil_method))
9814 emit_method_access_failure (cfg, method, cil_method);
9817 if (mono_security_core_clr_enabled ())
9818 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
9820 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9821 emit_class_init (cfg, cmethod->klass);
9822 CHECK_TYPELOAD (cmethod->klass);
9826 if (cfg->gsharedvt) {
9827 if (mini_is_gsharedvt_variable_signature (sig))
9828 GSHAREDVT_FAILURE (*ip);
9832 n = fsig->param_count;
9836 * Generate smaller code for the common newobj <exception> instruction in
9837 * argument checking code.
9839 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9840 is_exception_class (cmethod->klass) && n <= 2 &&
9841 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9842 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9843 MonoInst *iargs [3];
9847 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9850 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9854 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9859 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9862 g_assert_not_reached ();
9870 /* move the args to allow room for 'this' in the first position */
9876 /* check_call_signature () requires sp[0] to be set */
9877 this_ins.type = STACK_OBJ;
9879 if (check_call_signature (cfg, fsig, sp))
9884 if (mini_class_is_system_array (cmethod->klass)) {
9885 *sp = emit_get_rgctx_method (cfg, context_used,
9886 cmethod, MONO_RGCTX_INFO_METHOD);
9888 /* Avoid varargs in the common case */
9889 if (fsig->param_count == 1)
9890 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9891 else if (fsig->param_count == 2)
9892 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9893 else if (fsig->param_count == 3)
9894 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9895 else if (fsig->param_count == 4)
9896 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9898 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9899 } else if (cmethod->string_ctor) {
9900 g_assert (!context_used);
9901 g_assert (!vtable_arg);
9902 /* we simply pass a null pointer */
9903 EMIT_NEW_PCONST (cfg, *sp, NULL);
9904 /* now call the string ctor */
9905 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9907 if (cmethod->klass->valuetype) {
9908 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9909 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9910 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9915 * The code generated by mini_emit_virtual_call () expects
9916 * iargs [0] to be a boxed instance, but luckily the vcall
9917 * will be transformed into a normal call there.
9919 } else if (context_used) {
9920 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9923 MonoVTable *vtable = NULL;
9925 if (!cfg->compile_aot)
9926 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9927 CHECK_TYPELOAD (cmethod->klass);
9930 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9931 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9932 * As a workaround, we call class cctors before allocating objects.
9934 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9935 emit_class_init (cfg, cmethod->klass);
9936 if (cfg->verbose_level > 2)
9937 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9938 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9941 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9944 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9947 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9949 /* Now call the actual ctor */
9950 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
9951 CHECK_CFG_EXCEPTION;
9954 if (alloc == NULL) {
9956 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9957 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9965 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
9966 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9974 token = read32 (ip + 1);
9975 klass = mini_get_class (method, token, generic_context);
9976 CHECK_TYPELOAD (klass);
9977 if (sp [0]->type != STACK_OBJ)
9980 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
9981 ins->dreg = alloc_preg (cfg);
9982 ins->sreg1 = (*sp)->dreg;
9984 ins->type = STACK_OBJ;
9985 MONO_ADD_INS (cfg->cbb, ins);
9987 CHECK_CFG_EXCEPTION;
9991 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
9994 case CEE_UNBOX_ANY: {
9995 MonoInst *res, *addr;
10000 token = read32 (ip + 1);
10001 klass = mini_get_class (method, token, generic_context);
10002 CHECK_TYPELOAD (klass);
10004 mono_save_token_info (cfg, image, token, klass);
10006 context_used = mini_class_check_context_used (cfg, klass);
10008 if (mini_is_gsharedvt_klass (klass)) {
10009 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10011 } else if (generic_class_is_reference_type (cfg, klass)) {
10012 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10013 EMIT_NEW_PCONST (cfg, res, NULL);
10014 res->type = STACK_OBJ;
10016 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10017 res->dreg = alloc_preg (cfg);
10018 res->sreg1 = (*sp)->dreg;
10019 res->klass = klass;
10020 res->type = STACK_OBJ;
10021 MONO_ADD_INS (cfg->cbb, res);
10022 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10024 } else if (mono_class_is_nullable (klass)) {
10025 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10027 addr = handle_unbox (cfg, klass, sp, context_used);
10029 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10040 MonoClass *enum_class;
10041 MonoMethod *has_flag;
10047 token = read32 (ip + 1);
10048 klass = mini_get_class (method, token, generic_context);
10049 CHECK_TYPELOAD (klass);
10051 mono_save_token_info (cfg, image, token, klass);
10053 context_used = mini_class_check_context_used (cfg, klass);
10055 if (generic_class_is_reference_type (cfg, klass)) {
10061 if (klass == mono_defaults.void_class)
10063 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10065 /* frequent check in generic code: box (struct), brtrue */
10070 * <push int/long ptr>
10073 * constrained. MyFlags
10074 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10076 * If we find this sequence and the operand types on box and constrained
10077 * are equal, we can emit a specialized instruction sequence instead of
10078 * the very slow HasFlag () call.
10080 if ((cfg->opt & MONO_OPT_INTRINS) &&
10081 /* Cheap checks first. */
10082 ip + 5 + 6 + 5 < end &&
10083 ip [5] == CEE_PREFIX1 &&
10084 ip [6] == CEE_CONSTRAINED_ &&
10085 ip [11] == CEE_CALLVIRT &&
10086 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10087 mono_class_is_enum (klass) &&
10088 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10089 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10090 has_flag->klass == mono_defaults.enum_class &&
10091 !strcmp (has_flag->name, "HasFlag") &&
10092 has_flag->signature->hasthis &&
10093 has_flag->signature->param_count == 1) {
10094 CHECK_TYPELOAD (enum_class);
10096 if (enum_class == klass) {
10097 MonoInst *enum_this, *enum_flag;
10102 enum_this = sp [0];
10103 enum_flag = sp [1];
10105 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10110 // FIXME: LLVM can't handle the inconsistent bb linking
10111 if (!mono_class_is_nullable (klass) &&
10112 !mini_is_gsharedvt_klass (klass) &&
10113 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10114 (ip [5] == CEE_BRTRUE ||
10115 ip [5] == CEE_BRTRUE_S ||
10116 ip [5] == CEE_BRFALSE ||
10117 ip [5] == CEE_BRFALSE_S)) {
10118 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10120 MonoBasicBlock *true_bb, *false_bb;
10124 if (cfg->verbose_level > 3) {
10125 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10126 printf ("<box+brtrue opt>\n");
10131 case CEE_BRFALSE_S:
10134 target = ip + 1 + (signed char)(*ip);
10141 target = ip + 4 + (gint)(read32 (ip));
10145 g_assert_not_reached ();
10149 * We need to link both bblocks, since it is needed for handling stack
10150 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10151 * Branching to only one of them would lead to inconsistencies, so
10152 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10154 GET_BBLOCK (cfg, true_bb, target);
10155 GET_BBLOCK (cfg, false_bb, ip);
10157 mono_link_bblock (cfg, cfg->cbb, true_bb);
10158 mono_link_bblock (cfg, cfg->cbb, false_bb);
10160 if (sp != stack_start) {
10161 handle_stack_args (cfg, stack_start, sp - stack_start);
10163 CHECK_UNVERIFIABLE (cfg);
10166 if (COMPILE_LLVM (cfg)) {
10167 dreg = alloc_ireg (cfg);
10168 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10171 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10173 /* The JIT can't eliminate the iconst+compare */
10174 MONO_INST_NEW (cfg, ins, OP_BR);
10175 ins->inst_target_bb = is_true ? true_bb : false_bb;
10176 MONO_ADD_INS (cfg->cbb, ins);
10179 start_new_bblock = 1;
10183 *sp++ = handle_box (cfg, val, klass, context_used);
10185 CHECK_CFG_EXCEPTION;
10194 token = read32 (ip + 1);
10195 klass = mini_get_class (method, token, generic_context);
10196 CHECK_TYPELOAD (klass);
10198 mono_save_token_info (cfg, image, token, klass);
10200 context_used = mini_class_check_context_used (cfg, klass);
10202 if (mono_class_is_nullable (klass)) {
10205 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10206 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10210 ins = handle_unbox (cfg, klass, sp, context_used);
10223 MonoClassField *field;
10224 #ifndef DISABLE_REMOTING
10228 gboolean is_instance;
10230 gpointer addr = NULL;
10231 gboolean is_special_static;
10233 MonoInst *store_val = NULL;
10234 MonoInst *thread_ins;
10237 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10239 if (op == CEE_STFLD) {
10242 store_val = sp [1];
10247 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10249 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10252 if (op == CEE_STSFLD) {
10255 store_val = sp [0];
10260 token = read32 (ip + 1);
10261 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10262 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10263 klass = field->parent;
10266 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10269 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10270 FIELD_ACCESS_FAILURE (method, field);
10271 mono_class_init (klass);
10273 /* if the class is Critical then transparent code cannot access it's fields */
10274 if (!is_instance && mono_security_core_clr_enabled ())
10275 ensure_method_is_allowed_to_access_field (cfg, method, field);
10277 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10278 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10279 if (mono_security_core_clr_enabled ())
10280 ensure_method_is_allowed_to_access_field (cfg, method, field);
10283 ftype = mono_field_get_type (field);
10286 * LDFLD etc. is usable on static fields as well, so convert those cases to
10289 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10301 g_assert_not_reached ();
10303 is_instance = FALSE;
10306 context_used = mini_class_check_context_used (cfg, klass);
10308 /* INSTANCE CASE */
10310 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10311 if (op == CEE_STFLD) {
10312 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10314 #ifndef DISABLE_REMOTING
10315 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10316 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10317 MonoInst *iargs [5];
10319 GSHAREDVT_FAILURE (op);
10321 iargs [0] = sp [0];
10322 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10323 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10324 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10326 iargs [4] = sp [1];
10328 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10329 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10330 iargs, ip, cfg->real_offset, TRUE);
10331 CHECK_CFG_EXCEPTION;
10332 g_assert (costs > 0);
10334 cfg->real_offset += 5;
10336 inline_costs += costs;
10338 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10343 MonoInst *store, *wbarrier_ptr_ins = NULL;
10345 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10347 if (ins_flag & MONO_INST_VOLATILE) {
10348 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10349 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10352 if (mini_is_gsharedvt_klass (klass)) {
10353 MonoInst *offset_ins;
10355 context_used = mini_class_check_context_used (cfg, klass);
10357 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10358 /* The value is offset by 1 */
10359 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10360 dreg = alloc_ireg_mp (cfg);
10361 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10362 wbarrier_ptr_ins = ins;
10363 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
10364 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10366 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10368 if (sp [0]->opcode != OP_LDADDR)
10369 store->flags |= MONO_INST_FAULT;
10371 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10372 if (mini_is_gsharedvt_klass (klass)) {
10373 g_assert (wbarrier_ptr_ins);
10374 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10376 /* insert call to write barrier */
10380 dreg = alloc_ireg_mp (cfg);
10381 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10382 mini_emit_write_barrier (cfg, ptr, sp [1]);
10386 store->flags |= ins_flag;
10393 #ifndef DISABLE_REMOTING
10394 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10395 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10396 MonoInst *iargs [4];
10398 GSHAREDVT_FAILURE (op);
10400 iargs [0] = sp [0];
10401 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10402 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10403 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10404 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10405 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10406 iargs, ip, cfg->real_offset, TRUE);
10407 CHECK_CFG_EXCEPTION;
10408 g_assert (costs > 0);
10410 cfg->real_offset += 5;
10414 inline_costs += costs;
10416 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10422 if (sp [0]->type == STACK_VTYPE) {
10425 /* Have to compute the address of the variable */
10427 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10429 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10431 g_assert (var->klass == klass);
10433 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10437 if (op == CEE_LDFLDA) {
10438 if (sp [0]->type == STACK_OBJ) {
10439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10440 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10443 dreg = alloc_ireg_mp (cfg);
10445 if (mini_is_gsharedvt_klass (klass)) {
10446 MonoInst *offset_ins;
10448 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10449 /* The value is offset by 1 */
10450 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10451 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10453 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10455 ins->klass = mono_class_from_mono_type (field->type);
10456 ins->type = STACK_MP;
10461 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10463 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10464 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10473 MonoInst *field_add_inst = sp [0];
10474 if (mini_is_gsharedvt_klass (klass)) {
10475 MonoInst *offset_ins;
10477 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10478 /* The value is offset by 1 */
10479 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10480 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10484 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10486 if (sp [0]->opcode != OP_LDADDR)
10487 load->flags |= MONO_INST_FAULT;
10499 context_used = mini_class_check_context_used (cfg, klass);
10501 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10502 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10506 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10507 * to be called here.
10509 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10510 mono_class_vtable (cfg->domain, klass);
10511 CHECK_TYPELOAD (klass);
10513 mono_domain_lock (cfg->domain);
10514 if (cfg->domain->special_static_fields)
10515 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10516 mono_domain_unlock (cfg->domain);
10518 is_special_static = mono_class_field_is_special_static (field);
10520 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10521 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10525 /* Generate IR to compute the field address */
10526 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10528 * Fast access to TLS data
10529 * Inline version of get_thread_static_data () in
10533 int idx, static_data_reg, array_reg, dreg;
10535 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10536 GSHAREDVT_FAILURE (op);
10538 static_data_reg = alloc_ireg (cfg);
10539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10541 if (cfg->compile_aot) {
10542 int offset_reg, offset2_reg, idx_reg;
10544 /* For TLS variables, this will return the TLS offset */
10545 EMIT_NEW_SFLDACONST (cfg, ins, field);
10546 offset_reg = ins->dreg;
10547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10548 idx_reg = alloc_ireg (cfg);
10549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10551 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10552 array_reg = alloc_ireg (cfg);
10553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10554 offset2_reg = alloc_ireg (cfg);
10555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10557 dreg = alloc_ireg (cfg);
10558 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10560 offset = (gsize)addr & 0x7fffffff;
10561 idx = offset & 0x3f;
10563 array_reg = alloc_ireg (cfg);
10564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10565 dreg = alloc_ireg (cfg);
10566 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10568 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10569 (cfg->compile_aot && is_special_static) ||
10570 (context_used && is_special_static)) {
10571 MonoInst *iargs [2];
10573 g_assert (field->parent);
10574 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10575 if (context_used) {
10576 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10577 field, MONO_RGCTX_INFO_CLASS_FIELD);
10579 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10581 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10582 } else if (context_used) {
10583 MonoInst *static_data;
10586 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10587 method->klass->name_space, method->klass->name, method->name,
10588 depth, field->offset);
10591 if (mono_class_needs_cctor_run (klass, method))
10592 emit_class_init (cfg, klass);
10595 * The pointer we're computing here is
10597 * super_info.static_data + field->offset
10599 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10600 klass, MONO_RGCTX_INFO_STATIC_DATA);
10602 if (mini_is_gsharedvt_klass (klass)) {
10603 MonoInst *offset_ins;
10605 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10606 /* The value is offset by 1 */
10607 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10608 dreg = alloc_ireg_mp (cfg);
10609 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10610 } else if (field->offset == 0) {
10613 int addr_reg = mono_alloc_preg (cfg);
10614 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10616 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10617 MonoInst *iargs [2];
10619 g_assert (field->parent);
10620 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10621 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10622 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10624 MonoVTable *vtable = NULL;
10626 if (!cfg->compile_aot)
10627 vtable = mono_class_vtable (cfg->domain, klass);
10628 CHECK_TYPELOAD (klass);
10631 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10632 if (!(g_slist_find (class_inits, klass))) {
10633 emit_class_init (cfg, klass);
10634 if (cfg->verbose_level > 2)
10635 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10636 class_inits = g_slist_prepend (class_inits, klass);
10639 if (cfg->run_cctors) {
10640 /* This makes so that inline cannot trigger */
10641 /* .cctors: too many apps depend on them */
10642 /* running with a specific order... */
10644 if (! vtable->initialized)
10645 INLINE_FAILURE ("class init");
10646 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10647 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10648 goto exception_exit;
10652 if (cfg->compile_aot)
10653 EMIT_NEW_SFLDACONST (cfg, ins, field);
10656 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10658 EMIT_NEW_PCONST (cfg, ins, addr);
10661 MonoInst *iargs [1];
10662 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10663 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10667 /* Generate IR to do the actual load/store operation */
10669 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10670 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10671 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10674 if (op == CEE_LDSFLDA) {
10675 ins->klass = mono_class_from_mono_type (ftype);
10676 ins->type = STACK_PTR;
10678 } else if (op == CEE_STSFLD) {
10681 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10682 store->flags |= ins_flag;
10684 gboolean is_const = FALSE;
10685 MonoVTable *vtable = NULL;
10686 gpointer addr = NULL;
10688 if (!context_used) {
10689 vtable = mono_class_vtable (cfg->domain, klass);
10690 CHECK_TYPELOAD (klass);
10692 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10693 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10694 int ro_type = ftype->type;
10696 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10697 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10698 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10701 GSHAREDVT_FAILURE (op);
10703 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10706 case MONO_TYPE_BOOLEAN:
10708 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10712 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10715 case MONO_TYPE_CHAR:
10717 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10721 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10726 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10730 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10735 case MONO_TYPE_PTR:
10736 case MONO_TYPE_FNPTR:
10737 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10738 type_to_eval_stack_type ((cfg), field->type, *sp);
10741 case MONO_TYPE_STRING:
10742 case MONO_TYPE_OBJECT:
10743 case MONO_TYPE_CLASS:
10744 case MONO_TYPE_SZARRAY:
10745 case MONO_TYPE_ARRAY:
10746 if (!mono_gc_is_moving ()) {
10747 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10748 type_to_eval_stack_type ((cfg), field->type, *sp);
10756 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10761 case MONO_TYPE_VALUETYPE:
10771 CHECK_STACK_OVF (1);
10773 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10774 load->flags |= ins_flag;
10780 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10781 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10782 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10793 token = read32 (ip + 1);
10794 klass = mini_get_class (method, token, generic_context);
10795 CHECK_TYPELOAD (klass);
10797 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10798 mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
10809 const char *data_ptr;
10811 guint32 field_token;
10817 token = read32 (ip + 1);
10819 klass = mini_get_class (method, token, generic_context);
10820 CHECK_TYPELOAD (klass);
10821 if (klass->byval_arg.type == MONO_TYPE_VOID)
10824 context_used = mini_class_check_context_used (cfg, klass);
10826 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10827 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10828 ins->sreg1 = sp [0]->dreg;
10829 ins->type = STACK_I4;
10830 ins->dreg = alloc_ireg (cfg);
10831 MONO_ADD_INS (cfg->cbb, ins);
10832 *sp = mono_decompose_opcode (cfg, ins);
10835 if (context_used) {
10836 MonoInst *args [3];
10837 MonoClass *array_class = mono_array_class_get (klass, 1);
10838 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10840 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10843 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
10844 array_class, MONO_RGCTX_INFO_VTABLE);
10849 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10851 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
10853 if (cfg->opt & MONO_OPT_SHARED) {
10854 /* Decompose now to avoid problems with references to the domainvar */
10855 MonoInst *iargs [3];
10857 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10858 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10859 iargs [2] = sp [0];
10861 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
10863 /* Decompose later since it is needed by abcrem */
10864 MonoClass *array_type = mono_array_class_get (klass, 1);
10865 mono_class_vtable (cfg->domain, array_type);
10866 CHECK_TYPELOAD (array_type);
10868 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10869 ins->dreg = alloc_ireg_ref (cfg);
10870 ins->sreg1 = sp [0]->dreg;
10871 ins->inst_newa_class = klass;
10872 ins->type = STACK_OBJ;
10873 ins->klass = array_type;
10874 MONO_ADD_INS (cfg->cbb, ins);
10875 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10876 cfg->cbb->has_array_access = TRUE;
10878 /* Needed so mono_emit_load_get_addr () gets called */
10879 mono_get_got_var (cfg);
10889 * we inline/optimize the initialization sequence if possible.
10890 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10891 * for small sizes open code the memcpy
10892 * ensure the rva field is big enough
10894 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10895 MonoMethod *memcpy_method = mini_get_memcpy_method ();
10896 MonoInst *iargs [3];
10897 int add_reg = alloc_ireg_mp (cfg);
10899 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10900 if (cfg->compile_aot) {
10901 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10903 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10905 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10906 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10915 if (sp [0]->type != STACK_OBJ)
10918 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10919 ins->dreg = alloc_preg (cfg);
10920 ins->sreg1 = sp [0]->dreg;
10921 ins->type = STACK_I4;
10922 /* This flag will be inherited by the decomposition */
10923 ins->flags |= MONO_INST_FAULT;
10924 MONO_ADD_INS (cfg->cbb, ins);
10925 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10926 cfg->cbb->has_array_access = TRUE;
10934 if (sp [0]->type != STACK_OBJ)
10937 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10939 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10940 CHECK_TYPELOAD (klass);
10941 /* we need to make sure that this array is exactly the type it needs
10942 * to be for correctness. the wrappers are lax with their usage
10943 * so we need to ignore them here
10945 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10946 MonoClass *array_class = mono_array_class_get (klass, 1);
10947 mini_emit_check_array_type (cfg, sp [0], array_class);
10948 CHECK_TYPELOAD (array_class);
10952 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10957 case CEE_LDELEM_I1:
10958 case CEE_LDELEM_U1:
10959 case CEE_LDELEM_I2:
10960 case CEE_LDELEM_U2:
10961 case CEE_LDELEM_I4:
10962 case CEE_LDELEM_U4:
10963 case CEE_LDELEM_I8:
10965 case CEE_LDELEM_R4:
10966 case CEE_LDELEM_R8:
10967 case CEE_LDELEM_REF: {
10973 if (*ip == CEE_LDELEM) {
10975 token = read32 (ip + 1);
10976 klass = mini_get_class (method, token, generic_context);
10977 CHECK_TYPELOAD (klass);
10978 mono_class_init (klass);
10981 klass = array_access_to_klass (*ip);
10983 if (sp [0]->type != STACK_OBJ)
10986 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10988 if (mini_is_gsharedvt_variable_klass (klass)) {
10989 // FIXME-VT: OP_ICONST optimization
10990 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10992 ins->opcode = OP_LOADV_MEMBASE;
10993 } else if (sp [1]->opcode == OP_ICONST) {
10994 int array_reg = sp [0]->dreg;
10995 int index_reg = sp [1]->dreg;
10996 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10998 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
10999 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11001 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11004 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11005 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11008 if (*ip == CEE_LDELEM)
11015 case CEE_STELEM_I1:
11016 case CEE_STELEM_I2:
11017 case CEE_STELEM_I4:
11018 case CEE_STELEM_I8:
11019 case CEE_STELEM_R4:
11020 case CEE_STELEM_R8:
11021 case CEE_STELEM_REF:
11026 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11028 if (*ip == CEE_STELEM) {
11030 token = read32 (ip + 1);
11031 klass = mini_get_class (method, token, generic_context);
11032 CHECK_TYPELOAD (klass);
11033 mono_class_init (klass);
11036 klass = array_access_to_klass (*ip);
11038 if (sp [0]->type != STACK_OBJ)
11041 emit_array_store (cfg, klass, sp, TRUE);
11043 if (*ip == CEE_STELEM)
11050 case CEE_CKFINITE: {
11054 if (cfg->llvm_only) {
11055 MonoInst *iargs [1];
11057 iargs [0] = sp [0];
11058 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11060 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11061 ins->sreg1 = sp [0]->dreg;
11062 ins->dreg = alloc_freg (cfg);
11063 ins->type = STACK_R8;
11064 MONO_ADD_INS (cfg->cbb, ins);
11066 *sp++ = mono_decompose_opcode (cfg, ins);
11072 case CEE_REFANYVAL: {
11073 MonoInst *src_var, *src;
11075 int klass_reg = alloc_preg (cfg);
11076 int dreg = alloc_preg (cfg);
11078 GSHAREDVT_FAILURE (*ip);
11081 MONO_INST_NEW (cfg, ins, *ip);
11084 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11085 CHECK_TYPELOAD (klass);
11087 context_used = mini_class_check_context_used (cfg, klass);
11090 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11092 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11093 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11096 if (context_used) {
11097 MonoInst *klass_ins;
11099 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11100 klass, MONO_RGCTX_INFO_KLASS);
11103 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11104 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11106 mini_emit_class_check (cfg, klass_reg, klass);
11108 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11109 ins->type = STACK_MP;
11110 ins->klass = klass;
11115 case CEE_MKREFANY: {
11116 MonoInst *loc, *addr;
11118 GSHAREDVT_FAILURE (*ip);
11121 MONO_INST_NEW (cfg, ins, *ip);
11124 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11125 CHECK_TYPELOAD (klass);
11127 context_used = mini_class_check_context_used (cfg, klass);
11129 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11130 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11132 if (context_used) {
11133 MonoInst *const_ins;
11134 int type_reg = alloc_preg (cfg);
11136 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11141 int const_reg = alloc_preg (cfg);
11142 int type_reg = alloc_preg (cfg);
11144 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11149 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11151 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11152 ins->type = STACK_VTYPE;
11153 ins->klass = mono_defaults.typed_reference_class;
11158 case CEE_LDTOKEN: {
11160 MonoClass *handle_class;
11162 CHECK_STACK_OVF (1);
11165 n = read32 (ip + 1);
11167 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11168 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11169 handle = mono_method_get_wrapper_data (method, n);
11170 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11171 if (handle_class == mono_defaults.typehandle_class)
11172 handle = &((MonoClass*)handle)->byval_arg;
11175 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11180 mono_class_init (handle_class);
11181 if (cfg->gshared) {
11182 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11183 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11184 /* This case handles ldtoken
11185 of an open type, like for
11188 } else if (handle_class == mono_defaults.typehandle_class) {
11189 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11190 } else if (handle_class == mono_defaults.fieldhandle_class)
11191 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11192 else if (handle_class == mono_defaults.methodhandle_class)
11193 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11195 g_assert_not_reached ();
11198 if ((cfg->opt & MONO_OPT_SHARED) &&
11199 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11200 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11201 MonoInst *addr, *vtvar, *iargs [3];
11202 int method_context_used;
11204 method_context_used = mini_method_check_context_used (cfg, method);
11206 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11208 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11209 EMIT_NEW_ICONST (cfg, iargs [1], n);
11210 if (method_context_used) {
11211 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11212 method, MONO_RGCTX_INFO_METHOD);
11213 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11215 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11216 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11218 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11222 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11224 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11225 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11226 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11227 (cmethod->klass == mono_defaults.systemtype_class) &&
11228 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11229 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11231 mono_class_init (tclass);
11232 if (context_used) {
11233 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11234 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11235 } else if (cfg->compile_aot) {
11236 if (method->wrapper_type) {
11237 error_init (&error); //got to do it since there are multiple conditionals below
11238 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11239 /* Special case for static synchronized wrappers */
11240 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11242 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11243 /* FIXME: n is not a normal token */
11245 EMIT_NEW_PCONST (cfg, ins, NULL);
11248 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11251 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11253 EMIT_NEW_PCONST (cfg, ins, rt);
11255 ins->type = STACK_OBJ;
11256 ins->klass = cmethod->klass;
11259 MonoInst *addr, *vtvar;
11261 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11263 if (context_used) {
11264 if (handle_class == mono_defaults.typehandle_class) {
11265 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11266 mono_class_from_mono_type ((MonoType *)handle),
11267 MONO_RGCTX_INFO_TYPE);
11268 } else if (handle_class == mono_defaults.methodhandle_class) {
11269 ins = emit_get_rgctx_method (cfg, context_used,
11270 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11271 } else if (handle_class == mono_defaults.fieldhandle_class) {
11272 ins = emit_get_rgctx_field (cfg, context_used,
11273 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11275 g_assert_not_reached ();
11277 } else if (cfg->compile_aot) {
11278 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11280 EMIT_NEW_PCONST (cfg, ins, handle);
11282 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11283 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11284 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11294 if (sp [-1]->type != STACK_OBJ)
11297 MONO_INST_NEW (cfg, ins, OP_THROW);
11299 ins->sreg1 = sp [0]->dreg;
11301 cfg->cbb->out_of_line = TRUE;
11302 MONO_ADD_INS (cfg->cbb, ins);
11303 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11304 MONO_ADD_INS (cfg->cbb, ins);
11307 link_bblock (cfg, cfg->cbb, end_bblock);
11308 start_new_bblock = 1;
11309 /* This can complicate code generation for llvm since the return value might not be defined */
11310 if (COMPILE_LLVM (cfg))
11311 INLINE_FAILURE ("throw");
11313 case CEE_ENDFINALLY:
11314 if (!ip_in_finally_clause (cfg, ip - header->code))
11316 /* mono_save_seq_point_info () depends on this */
11317 if (sp != stack_start)
11318 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11319 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11320 MONO_ADD_INS (cfg->cbb, ins);
11322 start_new_bblock = 1;
11325 * Control will leave the method so empty the stack, otherwise
11326 * the next basic block will start with a nonempty stack.
11328 while (sp != stack_start) {
11333 case CEE_LEAVE_S: {
11336 if (*ip == CEE_LEAVE) {
11338 target = ip + 5 + (gint32)read32(ip + 1);
11341 target = ip + 2 + (signed char)(ip [1]);
11344 /* empty the stack */
11345 while (sp != stack_start) {
11350 * If this leave statement is in a catch block, check for a
11351 * pending exception, and rethrow it if necessary.
11352 * We avoid doing this in runtime invoke wrappers, since those are called
11353 * by native code which excepts the wrapper to catch all exceptions.
11355 for (i = 0; i < header->num_clauses; ++i) {
11356 MonoExceptionClause *clause = &header->clauses [i];
11359 * Use <= in the final comparison to handle clauses with multiple
11360 * leave statements, like in bug #78024.
11361 * The ordering of the exception clauses guarantees that we find the
11362 * innermost clause.
11364 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11366 MonoBasicBlock *dont_throw;
11371 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11374 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11376 NEW_BBLOCK (cfg, dont_throw);
11379 * Currently, we always rethrow the abort exception, despite the
11380 * fact that this is not correct. See thread6.cs for an example.
11381 * But propagating the abort exception is more important than
11382 * getting the sematics right.
11384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11386 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11388 MONO_START_BB (cfg, dont_throw);
11393 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11396 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11399 for (tmp = handlers; tmp; tmp = tmp->next) {
11400 MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
11401 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
11402 MonoBasicBlock *dont_throw;
11404 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11406 link_bblock (cfg, cfg->cbb, tblock);
11408 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
11410 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11411 ins->inst_target_bb = tblock;
11412 ins->inst_eh_block = clause;
11413 MONO_ADD_INS (cfg->cbb, ins);
11414 cfg->cbb->has_call_handler = 1;
11416 /* Throw exception if exvar is set */
11417 /* FIXME Do we need this for calls from catch/filter ? */
11418 NEW_BBLOCK (cfg, dont_throw);
11419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
11420 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11421 mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
11422 cfg->cbb->clause_hole = clause;
11424 MONO_START_BB (cfg, dont_throw);
11425 cfg->cbb->clause_hole = clause;
11427 if (COMPILE_LLVM (cfg)) {
11428 MonoBasicBlock *target_bb;
11431 * Link the finally bblock with the target, since it will
11432 * conceptually branch there.
11434 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11435 GET_BBLOCK (cfg, target_bb, target);
11436 link_bblock (cfg, tblock, target_bb);
11439 g_list_free (handlers);
11442 MONO_INST_NEW (cfg, ins, OP_BR);
11443 MONO_ADD_INS (cfg->cbb, ins);
11444 GET_BBLOCK (cfg, tblock, target);
11445 link_bblock (cfg, cfg->cbb, tblock);
11446 ins->inst_target_bb = tblock;
11448 start_new_bblock = 1;
11450 if (*ip == CEE_LEAVE)
11459 * Mono specific opcodes
11461 case MONO_CUSTOM_PREFIX: {
11463 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11467 case CEE_MONO_ICALL: {
11469 MonoJitICallInfo *info;
11471 token = read32 (ip + 2);
11472 func = mono_method_get_wrapper_data (method, token);
11473 info = mono_find_jit_icall_by_addr (func);
11475 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11478 CHECK_STACK (info->sig->param_count);
11479 sp -= info->sig->param_count;
11481 if (cfg->compile_aot && !strcmp (info->name, "mono_threads_attach_coop")) {
11485 * This is called on unattached threads, so it cannot go through the trampoline
11486 * infrastructure. Use an indirect call through a got slot initialized at load time
11489 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
11490 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
11492 ins = mono_emit_jit_icall (cfg, info->func, sp);
11495 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11499 inline_costs += 10 * num_calls++;
11503 case CEE_MONO_LDPTR_CARD_TABLE:
11504 case CEE_MONO_LDPTR_NURSERY_START:
11505 case CEE_MONO_LDPTR_NURSERY_BITS:
11506 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11507 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
11508 CHECK_STACK_OVF (1);
11511 case CEE_MONO_LDPTR_CARD_TABLE:
11512 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11514 case CEE_MONO_LDPTR_NURSERY_START:
11515 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11517 case CEE_MONO_LDPTR_NURSERY_BITS:
11518 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11520 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11521 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11523 case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
11524 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
11527 g_assert_not_reached ();
11533 inline_costs += 10 * num_calls++;
11536 case CEE_MONO_LDPTR: {
11539 CHECK_STACK_OVF (1);
11541 token = read32 (ip + 2);
11543 ptr = mono_method_get_wrapper_data (method, token);
11544 EMIT_NEW_PCONST (cfg, ins, ptr);
11547 inline_costs += 10 * num_calls++;
11548 /* Can't embed random pointers into AOT code */
11552 case CEE_MONO_JIT_ICALL_ADDR: {
11553 MonoJitICallInfo *callinfo;
11556 CHECK_STACK_OVF (1);
11558 token = read32 (ip + 2);
11560 ptr = mono_method_get_wrapper_data (method, token);
11561 callinfo = mono_find_jit_icall_by_addr (ptr);
11562 g_assert (callinfo);
11563 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11566 inline_costs += 10 * num_calls++;
11569 case CEE_MONO_ICALL_ADDR: {
11570 MonoMethod *cmethod;
11573 CHECK_STACK_OVF (1);
11575 token = read32 (ip + 2);
11577 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11579 if (cfg->compile_aot) {
11580 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11582 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11583 * before the call, its not needed when using direct pinvoke.
11584 * This is not an optimization, but its used to avoid looking up pinvokes
11585 * on platforms which don't support dlopen ().
11587 EMIT_NEW_PCONST (cfg, ins, NULL);
11589 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11592 ptr = mono_lookup_internal_call (cmethod);
11594 EMIT_NEW_PCONST (cfg, ins, ptr);
11600 case CEE_MONO_VTADDR: {
11601 MonoInst *src_var, *src;
11607 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11608 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11613 case CEE_MONO_NEWOBJ: {
11614 MonoInst *iargs [2];
11616 CHECK_STACK_OVF (1);
11618 token = read32 (ip + 2);
11619 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11620 mono_class_init (klass);
11621 NEW_DOMAINCONST (cfg, iargs [0]);
11622 MONO_ADD_INS (cfg->cbb, iargs [0]);
11623 NEW_CLASSCONST (cfg, iargs [1], klass);
11624 MONO_ADD_INS (cfg->cbb, iargs [1]);
11625 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11627 inline_costs += 10 * num_calls++;
11630 case CEE_MONO_OBJADDR:
11633 MONO_INST_NEW (cfg, ins, OP_MOVE);
11634 ins->dreg = alloc_ireg_mp (cfg);
11635 ins->sreg1 = sp [0]->dreg;
11636 ins->type = STACK_MP;
11637 MONO_ADD_INS (cfg->cbb, ins);
11641 case CEE_MONO_LDNATIVEOBJ:
11643 * Similar to LDOBJ, but instead load the unmanaged
11644 * representation of the vtype to the stack.
11649 token = read32 (ip + 2);
11650 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11651 g_assert (klass->valuetype);
11652 mono_class_init (klass);
11655 MonoInst *src, *dest, *temp;
11658 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11659 temp->backend.is_pinvoke = 1;
11660 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11661 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
11663 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11664 dest->type = STACK_VTYPE;
11665 dest->klass = klass;
11671 case CEE_MONO_RETOBJ: {
11673 * Same as RET, but return the native representation of a vtype
11676 g_assert (cfg->ret);
11677 g_assert (mono_method_signature (method)->pinvoke);
11682 token = read32 (ip + 2);
11683 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11685 if (!cfg->vret_addr) {
11686 g_assert (cfg->ret_var_is_local);
11688 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11690 EMIT_NEW_RETLOADA (cfg, ins);
11692 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
11694 if (sp != stack_start)
11697 MONO_INST_NEW (cfg, ins, OP_BR);
11698 ins->inst_target_bb = end_bblock;
11699 MONO_ADD_INS (cfg->cbb, ins);
11700 link_bblock (cfg, cfg->cbb, end_bblock);
11701 start_new_bblock = 1;
11705 case CEE_MONO_SAVE_LMF:
11706 case CEE_MONO_RESTORE_LMF:
11709 case CEE_MONO_CLASSCONST:
11710 CHECK_STACK_OVF (1);
11712 token = read32 (ip + 2);
11713 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11716 inline_costs += 10 * num_calls++;
11718 case CEE_MONO_NOT_TAKEN:
11719 cfg->cbb->out_of_line = TRUE;
11722 case CEE_MONO_TLS: {
11725 CHECK_STACK_OVF (1);
11727 key = (MonoTlsKey)read32 (ip + 2);
11728 g_assert (key < TLS_KEY_NUM);
11730 ins = mono_create_tls_get (cfg, key);
11732 ins->type = STACK_PTR;
11737 case CEE_MONO_DYN_CALL: {
11738 MonoCallInst *call;
11740 /* It would be easier to call a trampoline, but that would put an
11741 * extra frame on the stack, confusing exception handling. So
11742 * implement it inline using an opcode for now.
11745 if (!cfg->dyn_call_var) {
11746 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11747 /* prevent it from being register allocated */
11748 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11751 /* Has to use a call inst since it local regalloc expects it */
11752 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11753 ins = (MonoInst*)call;
11755 ins->sreg1 = sp [0]->dreg;
11756 ins->sreg2 = sp [1]->dreg;
11757 MONO_ADD_INS (cfg->cbb, ins);
11759 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11762 inline_costs += 10 * num_calls++;
11766 case CEE_MONO_MEMORY_BARRIER: {
11768 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11772 case CEE_MONO_ATOMIC_STORE_I4: {
11773 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11779 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11780 ins->dreg = sp [0]->dreg;
11781 ins->sreg1 = sp [1]->dreg;
11782 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11783 MONO_ADD_INS (cfg->cbb, ins);
11788 case CEE_MONO_JIT_ATTACH: {
11789 MonoInst *args [16], *domain_ins;
11790 MonoInst *ad_ins, *jit_tls_ins;
11791 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11793 g_assert (!mono_threads_is_blocking_transition_enabled ());
11795 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11797 EMIT_NEW_PCONST (cfg, ins, NULL);
11798 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11800 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11801 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11803 if (ad_ins && jit_tls_ins) {
11804 NEW_BBLOCK (cfg, next_bb);
11805 NEW_BBLOCK (cfg, call_bb);
11807 if (cfg->compile_aot) {
11808 /* AOT code is only used in the root domain */
11809 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11811 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11813 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11820 MONO_START_BB (cfg, call_bb);
11823 /* AOT code is only used in the root domain */
11824 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
11825 if (cfg->compile_aot) {
11829 * This is called on unattached threads, so it cannot go through the trampoline
11830 * infrastructure. Use an indirect call through a got slot initialized at load time
11833 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
11834 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
11836 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11838 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11841 MONO_START_BB (cfg, next_bb);
11846 case CEE_MONO_JIT_DETACH: {
11847 MonoInst *args [16];
11849 /* Restore the original domain */
11850 dreg = alloc_ireg (cfg);
11851 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11852 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11856 case CEE_MONO_CALLI_EXTRA_ARG: {
11858 MonoMethodSignature *fsig;
11862 * This is the same as CEE_CALLI, but passes an additional argument
11863 * to the called method in llvmonly mode.
11864 * This is only used by delegate invoke wrappers to call the
11865 * actual delegate method.
11867 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
11870 token = read32 (ip + 2);
11878 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
11881 if (cfg->llvm_only)
11882 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
11884 n = fsig->param_count + fsig->hasthis + 1;
11891 if (cfg->llvm_only) {
11893 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
11894 * cconv. This is set by mono_init_delegate ().
11896 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
11897 MonoInst *callee = addr;
11898 MonoInst *call, *localloc_ins;
11899 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11900 int low_bit_reg = alloc_preg (cfg);
11902 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11903 NEW_BBLOCK (cfg, end_bb);
11905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11909 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
11910 addr = emit_get_rgctx_sig (cfg, context_used,
11911 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11913 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
11915 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
11916 ins->dreg = alloc_preg (cfg);
11917 ins->inst_imm = 2 * SIZEOF_VOID_P;
11918 MONO_ADD_INS (cfg->cbb, ins);
11919 localloc_ins = ins;
11920 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
11922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
11924 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
11925 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11927 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
11928 MONO_START_BB (cfg, is_gsharedvt_bb);
11929 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
11930 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
11931 ins->dreg = call->dreg;
11933 MONO_START_BB (cfg, end_bb);
11935 /* Caller uses a normal calling conv */
11937 MonoInst *callee = addr;
11938 MonoInst *call, *localloc_ins;
11939 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
11940 int low_bit_reg = alloc_preg (cfg);
11942 NEW_BBLOCK (cfg, is_gsharedvt_bb);
11943 NEW_BBLOCK (cfg, end_bb);
11945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
11946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
11947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
11949 /* Normal case: callee uses a normal cconv, no conversion is needed */
11950 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
11951 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11952 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
11953 MONO_START_BB (cfg, is_gsharedvt_bb);
11954 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
11955 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
11956 MONO_ADD_INS (cfg->cbb, addr);
11958 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
11960 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
11961 ins->dreg = alloc_preg (cfg);
11962 ins->inst_imm = 2 * SIZEOF_VOID_P;
11963 MONO_ADD_INS (cfg->cbb, ins);
11964 localloc_ins = ins;
11965 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11966 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
11967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
11969 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
11970 ins->dreg = call->dreg;
11971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
11973 MONO_START_BB (cfg, end_bb);
11976 /* Same as CEE_CALLI */
11977 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
11979 * We pass the address to the gsharedvt trampoline in the rgctx reg
11981 MonoInst *callee = addr;
11983 addr = emit_get_rgctx_sig (cfg, context_used,
11984 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
11985 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
11987 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
11991 if (!MONO_TYPE_IS_VOID (fsig->ret))
11992 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
11994 CHECK_CFG_EXCEPTION;
11998 constrained_class = NULL;
12001 case CEE_MONO_LDDOMAIN:
12002 CHECK_STACK_OVF (1);
12003 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12007 case CEE_MONO_GET_LAST_ERROR:
12009 CHECK_STACK_OVF (1);
12011 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12012 ins->dreg = alloc_dreg (cfg, STACK_I4);
12013 ins->type = STACK_I4;
12014 MONO_ADD_INS (cfg->cbb, ins);
12019 case CEE_MONO_GET_RGCTX_ARG:
12021 CHECK_STACK_OVF (1);
12023 mono_create_rgctx_var (cfg);
12025 MONO_INST_NEW (cfg, ins, OP_MOVE);
12026 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12027 ins->sreg1 = cfg->rgctx_var->dreg;
12028 ins->type = STACK_PTR;
12029 MONO_ADD_INS (cfg->cbb, ins);
12035 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12041 case CEE_PREFIX1: {
12044 case CEE_ARGLIST: {
12045 /* somewhat similar to LDTOKEN */
12046 MonoInst *addr, *vtvar;
12047 CHECK_STACK_OVF (1);
12048 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12050 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12051 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12053 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12054 ins->type = STACK_VTYPE;
12055 ins->klass = mono_defaults.argumenthandle_class;
12065 MonoInst *cmp, *arg1, *arg2;
12073 * The following transforms:
12074 * CEE_CEQ into OP_CEQ
12075 * CEE_CGT into OP_CGT
12076 * CEE_CGT_UN into OP_CGT_UN
12077 * CEE_CLT into OP_CLT
12078 * CEE_CLT_UN into OP_CLT_UN
12080 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12082 MONO_INST_NEW (cfg, ins, cmp->opcode);
12083 cmp->sreg1 = arg1->dreg;
12084 cmp->sreg2 = arg2->dreg;
12085 type_from_op (cfg, cmp, arg1, arg2);
12087 add_widen_op (cfg, cmp, &arg1, &arg2);
12088 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12089 cmp->opcode = OP_LCOMPARE;
12090 else if (arg1->type == STACK_R4)
12091 cmp->opcode = OP_RCOMPARE;
12092 else if (arg1->type == STACK_R8)
12093 cmp->opcode = OP_FCOMPARE;
12095 cmp->opcode = OP_ICOMPARE;
12096 MONO_ADD_INS (cfg->cbb, cmp);
12097 ins->type = STACK_I4;
12098 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12099 type_from_op (cfg, ins, arg1, arg2);
12101 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12103 * The backends expect the fceq opcodes to do the
12106 ins->sreg1 = cmp->sreg1;
12107 ins->sreg2 = cmp->sreg2;
12110 MONO_ADD_INS (cfg->cbb, ins);
12116 MonoInst *argconst;
12117 MonoMethod *cil_method;
12119 CHECK_STACK_OVF (1);
12121 n = read32 (ip + 2);
12122 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12125 mono_class_init (cmethod->klass);
12127 mono_save_token_info (cfg, image, n, cmethod);
12129 context_used = mini_method_check_context_used (cfg, cmethod);
12131 cil_method = cmethod;
12132 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12133 emit_method_access_failure (cfg, method, cil_method);
12135 if (mono_security_core_clr_enabled ())
12136 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12139 * Optimize the common case of ldftn+delegate creation
12141 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12142 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12143 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12144 MonoInst *target_ins, *handle_ins;
12145 MonoMethod *invoke;
12146 int invoke_context_used;
12148 invoke = mono_get_delegate_invoke (ctor_method->klass);
12149 if (!invoke || !mono_method_signature (invoke))
12152 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12154 target_ins = sp [-1];
12156 if (mono_security_core_clr_enabled ())
12157 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12159 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12160 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12161 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12163 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12167 /* FIXME: SGEN support */
12168 if (invoke_context_used == 0 || cfg->llvm_only) {
12170 if (cfg->verbose_level > 3)
12171 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12172 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12175 CHECK_CFG_EXCEPTION;
12185 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12186 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12190 inline_costs += 10 * num_calls++;
12193 case CEE_LDVIRTFTN: {
12194 MonoInst *args [2];
12198 n = read32 (ip + 2);
12199 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12202 mono_class_init (cmethod->klass);
12204 context_used = mini_method_check_context_used (cfg, cmethod);
12206 if (mono_security_core_clr_enabled ())
12207 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12210 * Optimize the common case of ldvirtftn+delegate creation
12212 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12213 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12214 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12215 MonoInst *target_ins, *handle_ins;
12216 MonoMethod *invoke;
12217 int invoke_context_used;
12218 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12220 invoke = mono_get_delegate_invoke (ctor_method->klass);
12221 if (!invoke || !mono_method_signature (invoke))
12224 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12226 target_ins = sp [-1];
12228 if (mono_security_core_clr_enabled ())
12229 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12231 /* FIXME: SGEN support */
12232 if (invoke_context_used == 0 || cfg->llvm_only) {
12234 if (cfg->verbose_level > 3)
12235 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12236 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12239 CHECK_CFG_EXCEPTION;
12252 args [1] = emit_get_rgctx_method (cfg, context_used,
12253 cmethod, MONO_RGCTX_INFO_METHOD);
12256 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12258 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12261 inline_costs += 10 * num_calls++;
12265 CHECK_STACK_OVF (1);
12267 n = read16 (ip + 2);
12269 EMIT_NEW_ARGLOAD (cfg, ins, n);
12274 CHECK_STACK_OVF (1);
12276 n = read16 (ip + 2);
12278 NEW_ARGLOADA (cfg, ins, n);
12279 MONO_ADD_INS (cfg->cbb, ins);
12287 n = read16 (ip + 2);
12289 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12291 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12295 CHECK_STACK_OVF (1);
12297 n = read16 (ip + 2);
12299 if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
12300 /* Avoid loading a struct just to load one of its fields */
12301 EMIT_NEW_LOCLOADA (cfg, ins, n);
12303 EMIT_NEW_LOCLOAD (cfg, ins, n);
12309 unsigned char *tmp_ip;
12310 CHECK_STACK_OVF (1);
12312 n = read16 (ip + 2);
12315 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12321 EMIT_NEW_LOCLOADA (cfg, ins, n);
12330 n = read16 (ip + 2);
12332 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12334 emit_stloc_ir (cfg, sp, header, n);
12338 case CEE_LOCALLOC: {
12340 MonoBasicBlock *non_zero_bb, *end_bb;
12341 int alloc_ptr = alloc_preg (cfg);
12343 if (sp != stack_start)
12345 if (cfg->method != method)
12347 * Inlining this into a loop in a parent could lead to
12348 * stack overflows which is different behavior than the
12349 * non-inlined case, thus disable inlining in this case.
12351 INLINE_FAILURE("localloc");
12353 NEW_BBLOCK (cfg, non_zero_bb);
12354 NEW_BBLOCK (cfg, end_bb);
12356 /* if size != zero */
12357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12360 //size is zero, so result is NULL
12361 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12364 MONO_START_BB (cfg, non_zero_bb);
12365 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12366 ins->dreg = alloc_ptr;
12367 ins->sreg1 = sp [0]->dreg;
12368 ins->type = STACK_PTR;
12369 MONO_ADD_INS (cfg->cbb, ins);
12371 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12373 ins->flags |= MONO_INST_INIT;
12375 MONO_START_BB (cfg, end_bb);
12376 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12377 ins->type = STACK_PTR;
12383 case CEE_ENDFILTER: {
12384 MonoExceptionClause *clause, *nearest;
12389 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12391 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12392 ins->sreg1 = (*sp)->dreg;
12393 MONO_ADD_INS (cfg->cbb, ins);
12394 start_new_bblock = 1;
12398 for (cc = 0; cc < header->num_clauses; ++cc) {
12399 clause = &header->clauses [cc];
12400 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12401 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12402 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12405 g_assert (nearest);
12406 if ((ip - header->code) != nearest->handler_offset)
12411 case CEE_UNALIGNED_:
12412 ins_flag |= MONO_INST_UNALIGNED;
12413 /* FIXME: record alignment? we can assume 1 for now */
12417 case CEE_VOLATILE_:
12418 ins_flag |= MONO_INST_VOLATILE;
12422 ins_flag |= MONO_INST_TAILCALL;
12423 cfg->flags |= MONO_CFG_HAS_TAIL;
12424 /* Can't inline tail calls at this time */
12425 inline_costs += 100000;
12432 token = read32 (ip + 2);
12433 klass = mini_get_class (method, token, generic_context);
12434 CHECK_TYPELOAD (klass);
12435 if (generic_class_is_reference_type (cfg, klass))
12436 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12438 mini_emit_initobj (cfg, *sp, NULL, klass);
12442 case CEE_CONSTRAINED_:
12444 token = read32 (ip + 2);
12445 constrained_class = mini_get_class (method, token, generic_context);
12446 CHECK_TYPELOAD (constrained_class);
12452 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12460 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12468 ins_flag |= MONO_INST_NOTYPECHECK;
12470 ins_flag |= MONO_INST_NORANGECHECK;
12471 /* we ignore the no-nullcheck for now since we
12472 * really do it explicitly only when doing callvirt->call
12476 case CEE_RETHROW: {
12478 int handler_offset = -1;
12480 for (i = 0; i < header->num_clauses; ++i) {
12481 MonoExceptionClause *clause = &header->clauses [i];
12482 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12483 handler_offset = clause->handler_offset;
12488 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12490 if (handler_offset == -1)
12493 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12494 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12495 ins->sreg1 = load->dreg;
12496 MONO_ADD_INS (cfg->cbb, ins);
12498 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12499 MONO_ADD_INS (cfg->cbb, ins);
12502 link_bblock (cfg, cfg->cbb, end_bblock);
12503 start_new_bblock = 1;
12511 CHECK_STACK_OVF (1);
12513 token = read32 (ip + 2);
12514 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12515 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12518 val = mono_type_size (type, &ialign);
12520 MonoClass *klass = mini_get_class (method, token, generic_context);
12521 CHECK_TYPELOAD (klass);
12523 val = mono_type_size (&klass->byval_arg, &ialign);
12525 if (mini_is_gsharedvt_klass (klass))
12526 GSHAREDVT_FAILURE (*ip);
12528 EMIT_NEW_ICONST (cfg, ins, val);
12533 case CEE_REFANYTYPE: {
12534 MonoInst *src_var, *src;
12536 GSHAREDVT_FAILURE (*ip);
12542 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12544 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12545 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12546 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12551 case CEE_READONLY_:
12564 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12574 g_warning ("opcode 0x%02x not handled", *ip);
12578 if (start_new_bblock != 1)
12581 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12582 if (cfg->cbb->next_bb) {
12583 /* This could already be set because of inlining, #693905 */
12584 MonoBasicBlock *bb = cfg->cbb;
12586 while (bb->next_bb)
12588 bb->next_bb = end_bblock;
12590 cfg->cbb->next_bb = end_bblock;
12593 if (cfg->method == method && cfg->domainvar) {
12595 MonoInst *get_domain;
12597 cfg->cbb = init_localsbb;
12599 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12600 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12601 MONO_ADD_INS (cfg->cbb, store);
12604 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12605 if (cfg->compile_aot)
12606 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12607 mono_get_got_var (cfg);
12610 if (cfg->method == method && cfg->got_var)
12611 mono_emit_load_got_addr (cfg);
12613 if (init_localsbb) {
12614 cfg->cbb = init_localsbb;
12616 for (i = 0; i < header->num_locals; ++i) {
12617 emit_init_local (cfg, i, header->locals [i], init_locals);
12621 if (cfg->init_ref_vars && cfg->method == method) {
12622 /* Emit initialization for ref vars */
12623 // FIXME: Avoid duplication initialization for IL locals.
12624 for (i = 0; i < cfg->num_varinfo; ++i) {
12625 MonoInst *ins = cfg->varinfo [i];
12627 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12628 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12632 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12633 cfg->cbb = init_localsbb;
12634 emit_push_lmf (cfg);
12637 cfg->cbb = init_localsbb;
12638 mini_profiler_emit_instrumentation_call (cfg, mono_profiler_raise_method_enter, TRUE, NULL, NULL);
12641 MonoBasicBlock *bb;
12644 * Make seq points at backward branch targets interruptable.
12646 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12647 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12648 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12651 /* Add a sequence point for method entry/exit events */
12652 if (seq_points && cfg->gen_sdb_seq_points) {
12653 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12654 MONO_ADD_INS (init_localsbb, ins);
12655 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12656 MONO_ADD_INS (cfg->bb_exit, ins);
12660 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12661 * the code they refer to was dead (#11880).
12663 if (sym_seq_points) {
12664 for (i = 0; i < header->code_size; ++i) {
12665 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12668 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12669 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12676 if (cfg->method == method) {
12677 MonoBasicBlock *bb;
12678 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12679 if (bb == cfg->bb_init)
12682 bb->region = mono_find_block_region (cfg, bb->real_offset);
12684 mono_create_spvar_for_region (cfg, bb->region);
12685 if (cfg->verbose_level > 2)
12686 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12689 MonoBasicBlock *bb;
12690 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12691 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12692 bb->real_offset = inline_offset;
12696 if (inline_costs < 0) {
12699 /* Method is too large */
12700 mname = mono_method_full_name (method, TRUE);
12701 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12705 if ((cfg->verbose_level > 2) && (cfg->method == method))
12706 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12711 g_assert (!mono_error_ok (&cfg->error));
12715 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12719 set_exception_type_from_invalid_il (cfg, method, ip);
12723 g_slist_free (class_inits);
12724 mono_basic_block_free (original_bb);
12725 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12726 if (cfg->exception_type)
12729 return inline_costs;
12733 store_membase_reg_to_store_membase_imm (int opcode)
12736 case OP_STORE_MEMBASE_REG:
12737 return OP_STORE_MEMBASE_IMM;
12738 case OP_STOREI1_MEMBASE_REG:
12739 return OP_STOREI1_MEMBASE_IMM;
12740 case OP_STOREI2_MEMBASE_REG:
12741 return OP_STOREI2_MEMBASE_IMM;
12742 case OP_STOREI4_MEMBASE_REG:
12743 return OP_STOREI4_MEMBASE_IMM;
12744 case OP_STOREI8_MEMBASE_REG:
12745 return OP_STOREI8_MEMBASE_IMM;
12747 g_assert_not_reached ();
12754 mono_op_to_op_imm (int opcode)
12758 return OP_IADD_IMM;
12760 return OP_ISUB_IMM;
12762 return OP_IDIV_IMM;
12764 return OP_IDIV_UN_IMM;
12766 return OP_IREM_IMM;
12768 return OP_IREM_UN_IMM;
12770 return OP_IMUL_IMM;
12772 return OP_IAND_IMM;
12776 return OP_IXOR_IMM;
12778 return OP_ISHL_IMM;
12780 return OP_ISHR_IMM;
12782 return OP_ISHR_UN_IMM;
12785 return OP_LADD_IMM;
12787 return OP_LSUB_IMM;
12789 return OP_LAND_IMM;
12793 return OP_LXOR_IMM;
12795 return OP_LSHL_IMM;
12797 return OP_LSHR_IMM;
12799 return OP_LSHR_UN_IMM;
12800 #if SIZEOF_REGISTER == 8
12802 return OP_LREM_IMM;
12806 return OP_COMPARE_IMM;
12808 return OP_ICOMPARE_IMM;
12810 return OP_LCOMPARE_IMM;
12812 case OP_STORE_MEMBASE_REG:
12813 return OP_STORE_MEMBASE_IMM;
12814 case OP_STOREI1_MEMBASE_REG:
12815 return OP_STOREI1_MEMBASE_IMM;
12816 case OP_STOREI2_MEMBASE_REG:
12817 return OP_STOREI2_MEMBASE_IMM;
12818 case OP_STOREI4_MEMBASE_REG:
12819 return OP_STOREI4_MEMBASE_IMM;
12821 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12823 return OP_X86_PUSH_IMM;
12824 case OP_X86_COMPARE_MEMBASE_REG:
12825 return OP_X86_COMPARE_MEMBASE_IMM;
12827 #if defined(TARGET_AMD64)
12828 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12829 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12831 case OP_VOIDCALL_REG:
12832 return OP_VOIDCALL;
12840 return OP_LOCALLOC_IMM;
12847 ldind_to_load_membase (int opcode)
12851 return OP_LOADI1_MEMBASE;
12853 return OP_LOADU1_MEMBASE;
12855 return OP_LOADI2_MEMBASE;
12857 return OP_LOADU2_MEMBASE;
12859 return OP_LOADI4_MEMBASE;
12861 return OP_LOADU4_MEMBASE;
12863 return OP_LOAD_MEMBASE;
12864 case CEE_LDIND_REF:
12865 return OP_LOAD_MEMBASE;
12867 return OP_LOADI8_MEMBASE;
12869 return OP_LOADR4_MEMBASE;
12871 return OP_LOADR8_MEMBASE;
12873 g_assert_not_reached ();
12880 stind_to_store_membase (int opcode)
12884 return OP_STOREI1_MEMBASE_REG;
12886 return OP_STOREI2_MEMBASE_REG;
12888 return OP_STOREI4_MEMBASE_REG;
12890 case CEE_STIND_REF:
12891 return OP_STORE_MEMBASE_REG;
12893 return OP_STOREI8_MEMBASE_REG;
12895 return OP_STORER4_MEMBASE_REG;
12897 return OP_STORER8_MEMBASE_REG;
12899 g_assert_not_reached ();
12906 mono_load_membase_to_load_mem (int opcode)
12908 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12909 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12911 case OP_LOAD_MEMBASE:
12912 return OP_LOAD_MEM;
12913 case OP_LOADU1_MEMBASE:
12914 return OP_LOADU1_MEM;
12915 case OP_LOADU2_MEMBASE:
12916 return OP_LOADU2_MEM;
12917 case OP_LOADI4_MEMBASE:
12918 return OP_LOADI4_MEM;
12919 case OP_LOADU4_MEMBASE:
12920 return OP_LOADU4_MEM;
12921 #if SIZEOF_REGISTER == 8
12922 case OP_LOADI8_MEMBASE:
12923 return OP_LOADI8_MEM;
12932 op_to_op_dest_membase (int store_opcode, int opcode)
12934 #if defined(TARGET_X86)
12935 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12940 return OP_X86_ADD_MEMBASE_REG;
12942 return OP_X86_SUB_MEMBASE_REG;
12944 return OP_X86_AND_MEMBASE_REG;
12946 return OP_X86_OR_MEMBASE_REG;
12948 return OP_X86_XOR_MEMBASE_REG;
12951 return OP_X86_ADD_MEMBASE_IMM;
12954 return OP_X86_SUB_MEMBASE_IMM;
12957 return OP_X86_AND_MEMBASE_IMM;
12960 return OP_X86_OR_MEMBASE_IMM;
12963 return OP_X86_XOR_MEMBASE_IMM;
12969 #if defined(TARGET_AMD64)
12970 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12975 return OP_X86_ADD_MEMBASE_REG;
12977 return OP_X86_SUB_MEMBASE_REG;
12979 return OP_X86_AND_MEMBASE_REG;
12981 return OP_X86_OR_MEMBASE_REG;
12983 return OP_X86_XOR_MEMBASE_REG;
12985 return OP_X86_ADD_MEMBASE_IMM;
12987 return OP_X86_SUB_MEMBASE_IMM;
12989 return OP_X86_AND_MEMBASE_IMM;
12991 return OP_X86_OR_MEMBASE_IMM;
12993 return OP_X86_XOR_MEMBASE_IMM;
12995 return OP_AMD64_ADD_MEMBASE_REG;
12997 return OP_AMD64_SUB_MEMBASE_REG;
12999 return OP_AMD64_AND_MEMBASE_REG;
13001 return OP_AMD64_OR_MEMBASE_REG;
13003 return OP_AMD64_XOR_MEMBASE_REG;
13006 return OP_AMD64_ADD_MEMBASE_IMM;
13009 return OP_AMD64_SUB_MEMBASE_IMM;
13012 return OP_AMD64_AND_MEMBASE_IMM;
13015 return OP_AMD64_OR_MEMBASE_IMM;
13018 return OP_AMD64_XOR_MEMBASE_IMM;
13028 op_to_op_store_membase (int store_opcode, int opcode)
13030 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13033 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13034 return OP_X86_SETEQ_MEMBASE;
13036 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13037 return OP_X86_SETNE_MEMBASE;
13045 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13048 /* FIXME: This has sign extension issues */
13050 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13051 return OP_X86_COMPARE_MEMBASE8_IMM;
13054 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13059 return OP_X86_PUSH_MEMBASE;
13060 case OP_COMPARE_IMM:
13061 case OP_ICOMPARE_IMM:
13062 return OP_X86_COMPARE_MEMBASE_IMM;
13065 return OP_X86_COMPARE_MEMBASE_REG;
13069 #ifdef TARGET_AMD64
13070 /* FIXME: This has sign extension issues */
13072 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13073 return OP_X86_COMPARE_MEMBASE8_IMM;
13078 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13079 return OP_X86_PUSH_MEMBASE;
13081 /* FIXME: This only works for 32 bit immediates
13082 case OP_COMPARE_IMM:
13083 case OP_LCOMPARE_IMM:
13084 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13085 return OP_AMD64_COMPARE_MEMBASE_IMM;
13087 case OP_ICOMPARE_IMM:
13088 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13089 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13093 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13094 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13095 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13096 return OP_AMD64_COMPARE_MEMBASE_REG;
13099 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13100 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13109 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13112 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13118 return OP_X86_COMPARE_REG_MEMBASE;
13120 return OP_X86_ADD_REG_MEMBASE;
13122 return OP_X86_SUB_REG_MEMBASE;
13124 return OP_X86_AND_REG_MEMBASE;
13126 return OP_X86_OR_REG_MEMBASE;
13128 return OP_X86_XOR_REG_MEMBASE;
13132 #ifdef TARGET_AMD64
13133 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13136 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13138 return OP_X86_ADD_REG_MEMBASE;
13140 return OP_X86_SUB_REG_MEMBASE;
13142 return OP_X86_AND_REG_MEMBASE;
13144 return OP_X86_OR_REG_MEMBASE;
13146 return OP_X86_XOR_REG_MEMBASE;
13148 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13152 return OP_AMD64_COMPARE_REG_MEMBASE;
13154 return OP_AMD64_ADD_REG_MEMBASE;
13156 return OP_AMD64_SUB_REG_MEMBASE;
13158 return OP_AMD64_AND_REG_MEMBASE;
13160 return OP_AMD64_OR_REG_MEMBASE;
13162 return OP_AMD64_XOR_REG_MEMBASE;
13171 mono_op_to_op_imm_noemul (int opcode)
13174 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13180 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13187 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13192 return mono_op_to_op_imm (opcode);
13197 * mono_handle_global_vregs:
13199 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13203 mono_handle_global_vregs (MonoCompile *cfg)
13205 gint32 *vreg_to_bb;
13206 MonoBasicBlock *bb;
13209 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13211 #ifdef MONO_ARCH_SIMD_INTRINSICS
13212 if (cfg->uses_simd_intrinsics)
13213 mono_simd_simplify_indirection (cfg);
13216 /* Find local vregs used in more than one bb */
13217 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13218 MonoInst *ins = bb->code;
13219 int block_num = bb->block_num;
13221 if (cfg->verbose_level > 2)
13222 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13225 for (; ins; ins = ins->next) {
13226 const char *spec = INS_INFO (ins->opcode);
13227 int regtype = 0, regindex;
13230 if (G_UNLIKELY (cfg->verbose_level > 2))
13231 mono_print_ins (ins);
13233 g_assert (ins->opcode >= MONO_CEE_LAST);
13235 for (regindex = 0; regindex < 4; regindex ++) {
13238 if (regindex == 0) {
13239 regtype = spec [MONO_INST_DEST];
13240 if (regtype == ' ')
13243 } else if (regindex == 1) {
13244 regtype = spec [MONO_INST_SRC1];
13245 if (regtype == ' ')
13248 } else if (regindex == 2) {
13249 regtype = spec [MONO_INST_SRC2];
13250 if (regtype == ' ')
13253 } else if (regindex == 3) {
13254 regtype = spec [MONO_INST_SRC3];
13255 if (regtype == ' ')
13260 #if SIZEOF_REGISTER == 4
13261 /* In the LLVM case, the long opcodes are not decomposed */
13262 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13264 * Since some instructions reference the original long vreg,
13265 * and some reference the two component vregs, it is quite hard
13266 * to determine when it needs to be global. So be conservative.
13268 if (!get_vreg_to_inst (cfg, vreg)) {
13269 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13271 if (cfg->verbose_level > 2)
13272 printf ("LONG VREG R%d made global.\n", vreg);
13276 * Make the component vregs volatile since the optimizations can
13277 * get confused otherwise.
13279 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13280 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13284 g_assert (vreg != -1);
13286 prev_bb = vreg_to_bb [vreg];
13287 if (prev_bb == 0) {
13288 /* 0 is a valid block num */
13289 vreg_to_bb [vreg] = block_num + 1;
13290 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13291 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13294 if (!get_vreg_to_inst (cfg, vreg)) {
13295 if (G_UNLIKELY (cfg->verbose_level > 2))
13296 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13300 if (vreg_is_ref (cfg, vreg))
13301 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13303 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13306 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13309 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13313 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13316 g_assert_not_reached ();
13320 /* Flag as having been used in more than one bb */
13321 vreg_to_bb [vreg] = -1;
13327 /* If a variable is used in only one bblock, convert it into a local vreg */
13328 for (i = 0; i < cfg->num_varinfo; i++) {
13329 MonoInst *var = cfg->varinfo [i];
13330 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13332 switch (var->type) {
13338 #if SIZEOF_REGISTER == 8
13341 #if !defined(TARGET_X86)
13342 /* Enabling this screws up the fp stack on x86 */
13345 if (mono_arch_is_soft_float ())
13349 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13353 /* Arguments are implicitly global */
13354 /* Putting R4 vars into registers doesn't work currently */
13355 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13356 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13358 * Make that the variable's liveness interval doesn't contain a call, since
13359 * that would cause the lvreg to be spilled, making the whole optimization
13362 /* This is too slow for JIT compilation */
13364 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13366 int def_index, call_index, ins_index;
13367 gboolean spilled = FALSE;
13372 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13373 const char *spec = INS_INFO (ins->opcode);
13375 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13376 def_index = ins_index;
13378 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13379 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13380 if (call_index > def_index) {
13386 if (MONO_IS_CALL (ins))
13387 call_index = ins_index;
13397 if (G_UNLIKELY (cfg->verbose_level > 2))
13398 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13399 var->flags |= MONO_INST_IS_DEAD;
13400 cfg->vreg_to_inst [var->dreg] = NULL;
13407 * Compress the varinfo and vars tables so the liveness computation is faster and
13408 * takes up less space.
13411 for (i = 0; i < cfg->num_varinfo; ++i) {
13412 MonoInst *var = cfg->varinfo [i];
13413 if (pos < i && cfg->locals_start == i)
13414 cfg->locals_start = pos;
13415 if (!(var->flags & MONO_INST_IS_DEAD)) {
13417 cfg->varinfo [pos] = cfg->varinfo [i];
13418 cfg->varinfo [pos]->inst_c0 = pos;
13419 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13420 cfg->vars [pos].idx = pos;
13421 #if SIZEOF_REGISTER == 4
13422 if (cfg->varinfo [pos]->type == STACK_I8) {
13423 /* Modify the two component vars too */
13426 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13427 var1->inst_c0 = pos;
13428 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13429 var1->inst_c0 = pos;
13436 cfg->num_varinfo = pos;
13437 if (cfg->locals_start > cfg->num_varinfo)
13438 cfg->locals_start = cfg->num_varinfo;
13442 * mono_allocate_gsharedvt_vars:
13444 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13445 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13448 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13452 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13454 for (i = 0; i < cfg->num_varinfo; ++i) {
13455 MonoInst *ins = cfg->varinfo [i];
13458 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13459 if (i >= cfg->locals_start) {
13461 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13462 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13463 ins->opcode = OP_GSHAREDVT_LOCAL;
13464 ins->inst_imm = idx;
13467 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13468 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13475 * mono_spill_global_vars:
13477 * Generate spill code for variables which are not allocated to registers,
13478 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13479 * code is generated which could be optimized by the local optimization passes.
13482 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13484 MonoBasicBlock *bb;
13486 int orig_next_vreg;
13487 guint32 *vreg_to_lvreg;
13489 guint32 i, lvregs_len, lvregs_size;
13490 gboolean dest_has_lvreg = FALSE;
13491 MonoStackType stacktypes [128];
13492 MonoInst **live_range_start, **live_range_end;
13493 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13495 *need_local_opts = FALSE;
13497 memset (spec2, 0, sizeof (spec2));
13499 /* FIXME: Move this function to mini.c */
13500 stacktypes ['i'] = STACK_PTR;
13501 stacktypes ['l'] = STACK_I8;
13502 stacktypes ['f'] = STACK_R8;
13503 #ifdef MONO_ARCH_SIMD_INTRINSICS
13504 stacktypes ['x'] = STACK_VTYPE;
13507 #if SIZEOF_REGISTER == 4
13508 /* Create MonoInsts for longs */
13509 for (i = 0; i < cfg->num_varinfo; i++) {
13510 MonoInst *ins = cfg->varinfo [i];
13512 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13513 switch (ins->type) {
13518 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13521 g_assert (ins->opcode == OP_REGOFFSET);
13523 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13525 tree->opcode = OP_REGOFFSET;
13526 tree->inst_basereg = ins->inst_basereg;
13527 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13529 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13531 tree->opcode = OP_REGOFFSET;
13532 tree->inst_basereg = ins->inst_basereg;
13533 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13543 if (cfg->compute_gc_maps) {
13544 /* registers need liveness info even for !non refs */
13545 for (i = 0; i < cfg->num_varinfo; i++) {
13546 MonoInst *ins = cfg->varinfo [i];
13548 if (ins->opcode == OP_REGVAR)
13549 ins->flags |= MONO_INST_GC_TRACK;
13553 /* FIXME: widening and truncation */
13556 * As an optimization, when a variable allocated to the stack is first loaded into
13557 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13558 * the variable again.
13560 orig_next_vreg = cfg->next_vreg;
13561 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13562 lvregs_size = 1024;
13563 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13567 * These arrays contain the first and last instructions accessing a given
13569 * Since we emit bblocks in the same order we process them here, and we
13570 * don't split live ranges, these will precisely describe the live range of
13571 * the variable, i.e. the instruction range where a valid value can be found
13572 * in the variables location.
13573 * The live range is computed using the liveness info computed by the liveness pass.
13574 * We can't use vmv->range, since that is an abstract live range, and we need
13575 * one which is instruction precise.
13576 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13578 /* FIXME: Only do this if debugging info is requested */
13579 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13580 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13581 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13582 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13584 /* Add spill loads/stores */
13585 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13588 if (cfg->verbose_level > 2)
13589 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13591 /* Clear vreg_to_lvreg array */
13592 for (i = 0; i < lvregs_len; i++)
13593 vreg_to_lvreg [lvregs [i]] = 0;
13597 MONO_BB_FOR_EACH_INS (bb, ins) {
13598 const char *spec = INS_INFO (ins->opcode);
13599 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13600 gboolean store, no_lvreg;
13601 int sregs [MONO_MAX_SRC_REGS];
13603 if (G_UNLIKELY (cfg->verbose_level > 2))
13604 mono_print_ins (ins);
13606 if (ins->opcode == OP_NOP)
13610 * We handle LDADDR here as well, since it can only be decomposed
13611 * when variable addresses are known.
13613 if (ins->opcode == OP_LDADDR) {
13614 MonoInst *var = (MonoInst *)ins->inst_p0;
13616 if (var->opcode == OP_VTARG_ADDR) {
13617 /* Happens on SPARC/S390 where vtypes are passed by reference */
13618 MonoInst *vtaddr = var->inst_left;
13619 if (vtaddr->opcode == OP_REGVAR) {
13620 ins->opcode = OP_MOVE;
13621 ins->sreg1 = vtaddr->dreg;
13623 else if (var->inst_left->opcode == OP_REGOFFSET) {
13624 ins->opcode = OP_LOAD_MEMBASE;
13625 ins->inst_basereg = vtaddr->inst_basereg;
13626 ins->inst_offset = vtaddr->inst_offset;
13629 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13630 /* gsharedvt arg passed by ref */
13631 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13633 ins->opcode = OP_LOAD_MEMBASE;
13634 ins->inst_basereg = var->inst_basereg;
13635 ins->inst_offset = var->inst_offset;
13636 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13637 MonoInst *load, *load2, *load3;
13638 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13639 int reg1, reg2, reg3;
13640 MonoInst *info_var = cfg->gsharedvt_info_var;
13641 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13645 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13648 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13650 g_assert (info_var);
13651 g_assert (locals_var);
13653 /* Mark the instruction used to compute the locals var as used */
13654 cfg->gsharedvt_locals_var_ins = NULL;
13656 /* Load the offset */
13657 if (info_var->opcode == OP_REGOFFSET) {
13658 reg1 = alloc_ireg (cfg);
13659 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13660 } else if (info_var->opcode == OP_REGVAR) {
13662 reg1 = info_var->dreg;
13664 g_assert_not_reached ();
13666 reg2 = alloc_ireg (cfg);
13667 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13668 /* Load the locals area address */
13669 reg3 = alloc_ireg (cfg);
13670 if (locals_var->opcode == OP_REGOFFSET) {
13671 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13672 } else if (locals_var->opcode == OP_REGVAR) {
13673 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13675 g_assert_not_reached ();
13677 /* Compute the address */
13678 ins->opcode = OP_PADD;
13682 mono_bblock_insert_before_ins (bb, ins, load3);
13683 mono_bblock_insert_before_ins (bb, load3, load2);
13685 mono_bblock_insert_before_ins (bb, load2, load);
13687 g_assert (var->opcode == OP_REGOFFSET);
13689 ins->opcode = OP_ADD_IMM;
13690 ins->sreg1 = var->inst_basereg;
13691 ins->inst_imm = var->inst_offset;
13694 *need_local_opts = TRUE;
13695 spec = INS_INFO (ins->opcode);
13698 if (ins->opcode < MONO_CEE_LAST) {
13699 mono_print_ins (ins);
13700 g_assert_not_reached ();
13704 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13708 if (MONO_IS_STORE_MEMBASE (ins)) {
13709 tmp_reg = ins->dreg;
13710 ins->dreg = ins->sreg2;
13711 ins->sreg2 = tmp_reg;
13714 spec2 [MONO_INST_DEST] = ' ';
13715 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13716 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13717 spec2 [MONO_INST_SRC3] = ' ';
13719 } else if (MONO_IS_STORE_MEMINDEX (ins))
13720 g_assert_not_reached ();
13725 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13726 printf ("\t %.3s %d", spec, ins->dreg);
13727 num_sregs = mono_inst_get_src_registers (ins, sregs);
13728 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13729 printf (" %d", sregs [srcindex]);
13736 regtype = spec [MONO_INST_DEST];
13737 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13740 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13741 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13742 MonoInst *store_ins;
13744 MonoInst *def_ins = ins;
13745 int dreg = ins->dreg; /* The original vreg */
13747 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13749 if (var->opcode == OP_REGVAR) {
13750 ins->dreg = var->dreg;
13751 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13753 * Instead of emitting a load+store, use a _membase opcode.
13755 g_assert (var->opcode == OP_REGOFFSET);
13756 if (ins->opcode == OP_MOVE) {
13760 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13761 ins->inst_basereg = var->inst_basereg;
13762 ins->inst_offset = var->inst_offset;
13765 spec = INS_INFO (ins->opcode);
13769 g_assert (var->opcode == OP_REGOFFSET);
13771 prev_dreg = ins->dreg;
13773 /* Invalidate any previous lvreg for this vreg */
13774 vreg_to_lvreg [ins->dreg] = 0;
13778 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13780 store_opcode = OP_STOREI8_MEMBASE_REG;
13783 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13785 #if SIZEOF_REGISTER != 8
13786 if (regtype == 'l') {
13787 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13788 mono_bblock_insert_after_ins (bb, ins, store_ins);
13789 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13790 mono_bblock_insert_after_ins (bb, ins, store_ins);
13791 def_ins = store_ins;
13796 g_assert (store_opcode != OP_STOREV_MEMBASE);
13798 /* Try to fuse the store into the instruction itself */
13799 /* FIXME: Add more instructions */
13800 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13801 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13802 ins->inst_imm = ins->inst_c0;
13803 ins->inst_destbasereg = var->inst_basereg;
13804 ins->inst_offset = var->inst_offset;
13805 spec = INS_INFO (ins->opcode);
13806 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13807 ins->opcode = store_opcode;
13808 ins->inst_destbasereg = var->inst_basereg;
13809 ins->inst_offset = var->inst_offset;
13813 tmp_reg = ins->dreg;
13814 ins->dreg = ins->sreg2;
13815 ins->sreg2 = tmp_reg;
13818 spec2 [MONO_INST_DEST] = ' ';
13819 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13820 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13821 spec2 [MONO_INST_SRC3] = ' ';
13823 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13824 // FIXME: The backends expect the base reg to be in inst_basereg
13825 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13827 ins->inst_basereg = var->inst_basereg;
13828 ins->inst_offset = var->inst_offset;
13829 spec = INS_INFO (ins->opcode);
13831 /* printf ("INS: "); mono_print_ins (ins); */
13832 /* Create a store instruction */
13833 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13835 /* Insert it after the instruction */
13836 mono_bblock_insert_after_ins (bb, ins, store_ins);
13838 def_ins = store_ins;
13841 * We can't assign ins->dreg to var->dreg here, since the
13842 * sregs could use it. So set a flag, and do it after
13845 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13846 dest_has_lvreg = TRUE;
13851 if (def_ins && !live_range_start [dreg]) {
13852 live_range_start [dreg] = def_ins;
13853 live_range_start_bb [dreg] = bb;
13856 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13859 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13860 tmp->inst_c1 = dreg;
13861 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13868 num_sregs = mono_inst_get_src_registers (ins, sregs);
13869 for (srcindex = 0; srcindex < 3; ++srcindex) {
13870 regtype = spec [MONO_INST_SRC1 + srcindex];
13871 sreg = sregs [srcindex];
13873 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13874 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13875 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13876 MonoInst *use_ins = ins;
13877 MonoInst *load_ins;
13878 guint32 load_opcode;
13880 if (var->opcode == OP_REGVAR) {
13881 sregs [srcindex] = var->dreg;
13882 //mono_inst_set_src_registers (ins, sregs);
13883 live_range_end [sreg] = use_ins;
13884 live_range_end_bb [sreg] = bb;
13886 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13889 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13890 /* var->dreg is a hreg */
13891 tmp->inst_c1 = sreg;
13892 mono_bblock_insert_after_ins (bb, ins, tmp);
13898 g_assert (var->opcode == OP_REGOFFSET);
13900 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13902 g_assert (load_opcode != OP_LOADV_MEMBASE);
13904 if (vreg_to_lvreg [sreg]) {
13905 g_assert (vreg_to_lvreg [sreg] != -1);
13907 /* The variable is already loaded to an lvreg */
13908 if (G_UNLIKELY (cfg->verbose_level > 2))
13909 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13910 sregs [srcindex] = vreg_to_lvreg [sreg];
13911 //mono_inst_set_src_registers (ins, sregs);
13915 /* Try to fuse the load into the instruction */
13916 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
13917 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
13918 sregs [0] = var->inst_basereg;
13919 //mono_inst_set_src_registers (ins, sregs);
13920 ins->inst_offset = var->inst_offset;
13921 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
13922 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
13923 sregs [1] = var->inst_basereg;
13924 //mono_inst_set_src_registers (ins, sregs);
13925 ins->inst_offset = var->inst_offset;
13927 if (MONO_IS_REAL_MOVE (ins)) {
13928 ins->opcode = OP_NOP;
13931 //printf ("%d ", srcindex); mono_print_ins (ins);
13933 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13935 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13936 if (var->dreg == prev_dreg) {
13938 * sreg refers to the value loaded by the load
13939 * emitted below, but we need to use ins->dreg
13940 * since it refers to the store emitted earlier.
13944 g_assert (sreg != -1);
13945 vreg_to_lvreg [var->dreg] = sreg;
13946 if (lvregs_len >= lvregs_size) {
13947 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
13948 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
13949 lvregs = new_lvregs;
13952 lvregs [lvregs_len ++] = var->dreg;
13956 sregs [srcindex] = sreg;
13957 //mono_inst_set_src_registers (ins, sregs);
13959 #if SIZEOF_REGISTER != 8
13960 if (regtype == 'l') {
13961 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13962 mono_bblock_insert_before_ins (bb, ins, load_ins);
13963 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13964 mono_bblock_insert_before_ins (bb, ins, load_ins);
13965 use_ins = load_ins;
13970 #if SIZEOF_REGISTER == 4
13971 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13973 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13974 mono_bblock_insert_before_ins (bb, ins, load_ins);
13975 use_ins = load_ins;
13979 if (var->dreg < orig_next_vreg) {
13980 live_range_end [var->dreg] = use_ins;
13981 live_range_end_bb [var->dreg] = bb;
13984 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13987 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13988 tmp->inst_c1 = var->dreg;
13989 mono_bblock_insert_after_ins (bb, ins, tmp);
13993 mono_inst_set_src_registers (ins, sregs);
13995 if (dest_has_lvreg) {
13996 g_assert (ins->dreg != -1);
13997 vreg_to_lvreg [prev_dreg] = ins->dreg;
13998 if (lvregs_len >= lvregs_size) {
13999 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14000 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14001 lvregs = new_lvregs;
14004 lvregs [lvregs_len ++] = prev_dreg;
14005 dest_has_lvreg = FALSE;
14009 tmp_reg = ins->dreg;
14010 ins->dreg = ins->sreg2;
14011 ins->sreg2 = tmp_reg;
14014 if (MONO_IS_CALL (ins)) {
14015 /* Clear vreg_to_lvreg array */
14016 for (i = 0; i < lvregs_len; i++)
14017 vreg_to_lvreg [lvregs [i]] = 0;
14019 } else if (ins->opcode == OP_NOP) {
14021 MONO_INST_NULLIFY_SREGS (ins);
14024 if (cfg->verbose_level > 2)
14025 mono_print_ins_index (1, ins);
14028 /* Extend the live range based on the liveness info */
14029 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14030 for (i = 0; i < cfg->num_varinfo; i ++) {
14031 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14033 if (vreg_is_volatile (cfg, vi->vreg))
14034 /* The liveness info is incomplete */
14037 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14038 /* Live from at least the first ins of this bb */
14039 live_range_start [vi->vreg] = bb->code;
14040 live_range_start_bb [vi->vreg] = bb;
14043 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14044 /* Live at least until the last ins of this bb */
14045 live_range_end [vi->vreg] = bb->last_ins;
14046 live_range_end_bb [vi->vreg] = bb;
14053 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14054 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14056 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14057 for (i = 0; i < cfg->num_varinfo; ++i) {
14058 int vreg = MONO_VARINFO (cfg, i)->vreg;
14061 if (live_range_start [vreg]) {
14062 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14064 ins->inst_c1 = vreg;
14065 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14067 if (live_range_end [vreg]) {
14068 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14070 ins->inst_c1 = vreg;
14071 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14072 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14074 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14079 if (cfg->gsharedvt_locals_var_ins) {
14080 /* Nullify if unused */
14081 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14082 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14085 g_free (live_range_start);
14086 g_free (live_range_end);
14087 g_free (live_range_start_bb);
14088 g_free (live_range_end_bb);
14094 * - use 'iadd' instead of 'int_add'
14095 * - handling ovf opcodes: decompose in method_to_ir.
14096 * - unify iregs/fregs
14097 * -> partly done, the missing parts are:
14098 * - a more complete unification would involve unifying the hregs as well, so
14099 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14100 * would no longer map to the machine hregs, so the code generators would need to
14101 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14102 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14103 * fp/non-fp branches speeds it up by about 15%.
14104 * - use sext/zext opcodes instead of shifts
14106 * - get rid of TEMPLOADs if possible and use vregs instead
14107 * - clean up usage of OP_P/OP_ opcodes
14108 * - cleanup usage of DUMMY_USE
14109 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14111 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14112 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14113 * - make sure handle_stack_args () is called before the branch is emitted
14114 * - when the new IR is done, get rid of all unused stuff
14115 * - COMPARE/BEQ as separate instructions or unify them ?
14116 * - keeping them separate allows specialized compare instructions like
14117 * compare_imm, compare_membase
14118 * - most back ends unify fp compare+branch, fp compare+ceq
14119 * - integrate mono_save_args into inline_method
14120 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14121 * - handle long shift opts on 32 bit platforms somehow: they require
14122 * 3 sregs (2 for arg1 and 1 for arg2)
14123 * - make byref a 'normal' type.
14124 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14125 * variable if needed.
14126 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14127 * like inline_method.
14128 * - remove inlining restrictions
14129 * - fix LNEG and enable cfold of INEG
14130 * - generalize x86 optimizations like ldelema as a peephole optimization
14131 * - add store_mem_imm for amd64
14132 * - optimize the loading of the interruption flag in the managed->native wrappers
14133 * - avoid special handling of OP_NOP in passes
14134 * - move code inserting instructions into one function/macro.
14135 * - try a coalescing phase after liveness analysis
14136 * - add float -> vreg conversion + local optimizations on !x86
14137 * - figure out how to handle decomposed branches during optimizations, ie.
14138 * compare+branch, op_jump_table+op_br etc.
14139 * - promote RuntimeXHandles to vregs
14140 * - vtype cleanups:
14141 * - add a NEW_VARLOADA_VREG macro
14142 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14143 * accessing vtype fields.
14144 * - get rid of I8CONST on 64 bit platforms
14145 * - dealing with the increase in code size due to branches created during opcode
14147 * - use extended basic blocks
14148 * - all parts of the JIT
14149 * - handle_global_vregs () && local regalloc
14150 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14151 * - sources of increase in code size:
14154 * - isinst and castclass
14155 * - lvregs not allocated to global registers even if used multiple times
14156 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14158 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14159 * - add all micro optimizations from the old JIT
14160 * - put tree optimizations into the deadce pass
14161 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14162 * specific function.
14163 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14164 * fcompare + branchCC.
14165 * - create a helper function for allocating a stack slot, taking into account
14166 * MONO_CFG_HAS_SPILLUP.
14168 * - optimize mono_regstate2_alloc_int/float.
14169 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14170 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14171 * parts of the tree could be separated by other instructions, killing the tree
14172 * arguments, or stores killing loads etc. Also, should we fold loads into other
14173 * instructions if the result of the load is used multiple times ?
14174 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14175 * - LAST MERGE: 108395.
14176 * - when returning vtypes in registers, generate IR and append it to the end of the
14177 * last bb instead of doing it in the epilog.
14178 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14186 - When to decompose opcodes:
14187 - earlier: this makes some optimizations hard to implement, since the low level IR
14188 no longer contains the neccessary information. But it is easier to do.
14189 - later: harder to implement, enables more optimizations.
14190 - Branches inside bblocks:
14191 - created when decomposing complex opcodes.
14192 - branches to another bblock: harmless, but not tracked by the branch
14193 optimizations, so need to branch to a label at the start of the bblock.
14194 - branches to inside the same bblock: very problematic, trips up the local
14195 reg allocator. Can be fixed by spitting the current bblock, but that is a
14196 complex operation, since some local vregs can become global vregs etc.
14197 - Local/global vregs:
14198 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14199 local register allocator.
14200 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14201 structure, created by mono_create_var (). Assigned to hregs or the stack by
14202 the global register allocator.
14203 - When to do optimizations like alu->alu_imm:
14204 - earlier -> saves work later on since the IR will be smaller/simpler
14205 - later -> can work on more instructions
14206 - Handling of valuetypes:
14207 - When a vtype is pushed on the stack, a new temporary is created, an
14208 instruction computing its address (LDADDR) is emitted and pushed on
14209 the stack. Need to optimize cases when the vtype is used immediately as in
14210 argument passing, stloc etc.
14211 - Instead of the to_end stuff in the old JIT, simply call the function handling
14212 the values on the stack before emitting the last instruction of the bb.
14215 #endif /* !DISABLE_JIT */