3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1166 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1172 param_table [STACK_MAX] [STACK_MAX] = {
1177 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1182 switch (args->type) {
1192 for (i = 0; i < sig->param_count; ++i) {
1193 switch (args [i].type) {
1197 if (!sig->params [i]->byref)
1201 if (sig->params [i]->byref)
1203 switch (sig->params [i]->type) {
1204 case MONO_TYPE_CLASS:
1205 case MONO_TYPE_STRING:
1206 case MONO_TYPE_OBJECT:
1207 case MONO_TYPE_SZARRAY:
1208 case MONO_TYPE_ARRAY:
1215 if (sig->params [i]->byref)
1217 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1226 /*if (!param_table [args [i].type] [sig->params [i]->type])
1234 * When we need a pointer to the current domain many times in a method, we
1235 * call mono_domain_get() once and we store the result in a local variable.
1236 * This function returns the variable that represents the MonoDomain*.
1238 inline static MonoInst *
1239 mono_get_domainvar (MonoCompile *cfg)
1241 if (!cfg->domainvar)
1242 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1243 return cfg->domainvar;
1247 * The got_var contains the address of the Global Offset Table when AOT
1251 mono_get_got_var (MonoCompile *cfg)
1253 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1255 if (!cfg->got_var) {
1256 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1258 return cfg->got_var;
1262 mono_create_rgctx_var (MonoCompile *cfg)
1264 if (!cfg->rgctx_var) {
1265 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1266 /* force the var to be stack allocated */
1267 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1272 mono_get_vtable_var (MonoCompile *cfg)
1274 g_assert (cfg->gshared);
1276 mono_create_rgctx_var (cfg);
1278 return cfg->rgctx_var;
1282 type_from_stack_type (MonoInst *ins) {
1283 switch (ins->type) {
1284 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1285 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1286 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1287 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1288 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1290 return &ins->klass->this_arg;
1291 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1292 case STACK_VTYPE: return &ins->klass->byval_arg;
1294 g_error ("stack type %d to monotype not handled\n", ins->type);
1299 static G_GNUC_UNUSED int
1300 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1302 t = mono_type_get_underlying_type (t);
1314 case MONO_TYPE_FNPTR:
1316 case MONO_TYPE_CLASS:
1317 case MONO_TYPE_STRING:
1318 case MONO_TYPE_OBJECT:
1319 case MONO_TYPE_SZARRAY:
1320 case MONO_TYPE_ARRAY:
1326 return cfg->r4_stack_type;
1329 case MONO_TYPE_VALUETYPE:
1330 case MONO_TYPE_TYPEDBYREF:
1332 case MONO_TYPE_GENERICINST:
1333 if (mono_type_generic_inst_is_valuetype (t))
1339 g_assert_not_reached ();
1346 array_access_to_klass (int opcode)
1350 return mono_defaults.byte_class;
1352 return mono_defaults.uint16_class;
1355 return mono_defaults.int_class;
1358 return mono_defaults.sbyte_class;
1361 return mono_defaults.int16_class;
1364 return mono_defaults.int32_class;
1366 return mono_defaults.uint32_class;
1369 return mono_defaults.int64_class;
1372 return mono_defaults.single_class;
1375 return mono_defaults.double_class;
1376 case CEE_LDELEM_REF:
1377 case CEE_STELEM_REF:
1378 return mono_defaults.object_class;
1380 g_assert_not_reached ();
1386 * We try to share variables when possible
1389 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1394 /* inlining can result in deeper stacks */
1395 if (slot >= cfg->header->max_stack)
1396 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1398 pos = ins->type - 1 + slot * STACK_MAX;
1400 switch (ins->type) {
1407 if ((vnum = cfg->intvars [pos]))
1408 return cfg->varinfo [vnum];
1409 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1410 cfg->intvars [pos] = res->inst_c0;
1413 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1419 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1422 * Don't use this if a generic_context is set, since that means AOT can't
1423 * look up the method using just the image+token.
1424 * table == 0 means this is a reference made from a wrapper.
1426 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1427 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1428 jump_info_token->image = image;
1429 jump_info_token->token = token;
1430 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1435 * This function is called to handle items that are left on the evaluation stack
1436 * at basic block boundaries. What happens is that we save the values to local variables
1437 * and we reload them later when first entering the target basic block (with the
1438 * handle_loaded_temps () function).
1439 * A single joint point will use the same variables (stored in the array bb->out_stack or
1440 * bb->in_stack, if the basic block is before or after the joint point).
1442 * This function needs to be called _before_ emitting the last instruction of
1443 * the bb (i.e. before emitting a branch).
1444 * If the stack merge fails at a join point, cfg->unverifiable is set.
1447 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1450 MonoBasicBlock *bb = cfg->cbb;
1451 MonoBasicBlock *outb;
1452 MonoInst *inst, **locals;
1457 if (cfg->verbose_level > 3)
1458 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1459 if (!bb->out_scount) {
1460 bb->out_scount = count;
1461 //printf ("bblock %d has out:", bb->block_num);
1463 for (i = 0; i < bb->out_count; ++i) {
1464 outb = bb->out_bb [i];
1465 /* exception handlers are linked, but they should not be considered for stack args */
1466 if (outb->flags & BB_EXCEPTION_HANDLER)
1468 //printf (" %d", outb->block_num);
1469 if (outb->in_stack) {
1471 bb->out_stack = outb->in_stack;
1477 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1478 for (i = 0; i < count; ++i) {
1480 * try to reuse temps already allocated for this purpouse, if they occupy the same
1481 * stack slot and if they are of the same type.
1482 * This won't cause conflicts since if 'local' is used to
1483 * store one of the values in the in_stack of a bblock, then
1484 * the same variable will be used for the same outgoing stack
1486 * This doesn't work when inlining methods, since the bblocks
1487 * in the inlined methods do not inherit their in_stack from
1488 * the bblock they are inlined to. See bug #58863 for an
1491 if (cfg->inlined_method)
1492 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1494 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1499 for (i = 0; i < bb->out_count; ++i) {
1500 outb = bb->out_bb [i];
1501 /* exception handlers are linked, but they should not be considered for stack args */
1502 if (outb->flags & BB_EXCEPTION_HANDLER)
1504 if (outb->in_scount) {
1505 if (outb->in_scount != bb->out_scount) {
1506 cfg->unverifiable = TRUE;
1509 continue; /* check they are the same locals */
1511 outb->in_scount = count;
1512 outb->in_stack = bb->out_stack;
1515 locals = bb->out_stack;
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1526 * It is possible that the out bblocks already have in_stack assigned, and
1527 * the in_stacks differ. In this case, we will store to all the different
1534 /* Find a bblock which has a different in_stack */
1536 while (bindex < bb->out_count) {
1537 outb = bb->out_bb [bindex];
1538 /* exception handlers are linked, but they should not be considered for stack args */
1539 if (outb->flags & BB_EXCEPTION_HANDLER) {
1543 if (outb->in_stack != locals) {
1544 for (i = 0; i < count; ++i) {
1545 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1546 inst->cil_code = sp [i]->cil_code;
1547 sp [i] = locals [i];
1548 if (cfg->verbose_level > 3)
1549 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1551 locals = outb->in_stack;
1561 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1565 if (cfg->compile_aot) {
1566 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1572 ji.type = patch_type;
1573 ji.data.target = data;
1574 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1575 mono_error_assert_ok (&error);
1577 EMIT_NEW_PCONST (cfg, ins, target);
1583 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1585 int tls_offset = mono_tls_get_tls_offset (key);
1587 if (cfg->compile_aot)
1590 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1592 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1593 ins->dreg = mono_alloc_preg (cfg);
1594 ins->inst_offset = tls_offset;
1601 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1603 int tls_offset = mono_tls_get_tls_offset (key);
1605 if (cfg->compile_aot)
1608 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1610 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1611 ins->sreg1 = value->dreg;
1612 ins->inst_offset = tls_offset;
1620 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1622 MonoInst *fast_tls = NULL;
1624 if (!mini_get_debug_options ()->use_fallback_tls)
1625 fast_tls = mono_create_fast_tls_getter (cfg, key);
1628 MONO_ADD_INS (cfg->cbb, fast_tls);
1632 if (cfg->compile_aot) {
1635 * tls getters are critical pieces of code and we don't want to resolve them
1636 * through the standard plt/tramp mechanism since we might expose ourselves
1637 * to crashes and infinite recursions.
1639 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1640 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1642 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1643 return mono_emit_jit_icall (cfg, getter, NULL);
1648 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1650 MonoInst *fast_tls = NULL;
1652 if (!mini_get_debug_options ()->use_fallback_tls)
1653 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1656 MONO_ADD_INS (cfg->cbb, fast_tls);
1660 if (cfg->compile_aot) {
1662 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1663 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1665 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1666 return mono_emit_jit_icall (cfg, setter, &value);
1673 * Emit IR to push the current LMF onto the LMF stack.
1676 emit_push_lmf (MonoCompile *cfg)
1679 * Emit IR to push the LMF:
1680 * lmf_addr = <lmf_addr from tls>
1681 * lmf->lmf_addr = lmf_addr
1682 * lmf->prev_lmf = *lmf_addr
1685 MonoInst *ins, *lmf_ins;
1690 int lmf_reg, prev_lmf_reg;
1692 * Store lmf_addr in a variable, so it can be allocated to a global register.
1694 if (!cfg->lmf_addr_var)
1695 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1698 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1700 int jit_tls_dreg = ins->dreg;
1702 lmf_reg = alloc_preg (cfg);
1703 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1705 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1708 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1710 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1711 lmf_reg = ins->dreg;
1713 prev_lmf_reg = alloc_preg (cfg);
1714 /* Save previous_lmf */
1715 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1716 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1718 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1724 * Emit IR to pop the current LMF from the LMF stack.
1727 emit_pop_lmf (MonoCompile *cfg)
1729 int lmf_reg, lmf_addr_reg;
1735 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1736 lmf_reg = ins->dreg;
1740 * Emit IR to pop the LMF:
1741 * *(lmf->lmf_addr) = lmf->prev_lmf
1743 /* This could be called before emit_push_lmf () */
1744 if (!cfg->lmf_addr_var)
1745 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1746 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1748 prev_lmf_reg = alloc_preg (cfg);
1749 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1750 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1754 emit_instrumentation_call (MonoCompile *cfg, void *func)
1756 MonoInst *iargs [1];
1759 * Avoid instrumenting inlined methods since it can
1760 * distort profiling results.
1762 if (cfg->method != cfg->current_method)
1765 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1766 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1767 mono_emit_jit_icall (cfg, func, iargs);
1772 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1775 type = mini_get_underlying_type (type);
1776 switch (type->type) {
1777 case MONO_TYPE_VOID:
1778 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1785 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1789 case MONO_TYPE_FNPTR:
1790 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1791 case MONO_TYPE_CLASS:
1792 case MONO_TYPE_STRING:
1793 case MONO_TYPE_OBJECT:
1794 case MONO_TYPE_SZARRAY:
1795 case MONO_TYPE_ARRAY:
1796 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1799 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1802 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1804 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1806 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1807 case MONO_TYPE_VALUETYPE:
1808 if (type->data.klass->enumtype) {
1809 type = mono_class_enum_basetype (type->data.klass);
1812 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1813 case MONO_TYPE_TYPEDBYREF:
1814 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1815 case MONO_TYPE_GENERICINST:
1816 type = &type->data.generic_class->container_class->byval_arg;
1819 case MONO_TYPE_MVAR:
1821 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1823 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1828 //XXX this ignores if t is byref
1829 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1832 * target_type_is_incompatible:
1833 * @cfg: MonoCompile context
1835 * Check that the item @arg on the evaluation stack can be stored
1836 * in the target type (can be a local, or field, etc).
1837 * The cfg arg can be used to check if we need verification or just
1840 * Returns: non-0 value if arg can't be stored on a target.
1843 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1845 MonoType *simple_type;
1848 if (target->byref) {
1849 /* FIXME: check that the pointed to types match */
1850 if (arg->type == STACK_MP) {
1851 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1852 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1853 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1855 /* if the target is native int& or same type */
1856 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1859 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1860 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1861 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1865 if (arg->type == STACK_PTR)
1870 simple_type = mini_get_underlying_type (target);
1871 switch (simple_type->type) {
1872 case MONO_TYPE_VOID:
1880 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1884 /* STACK_MP is needed when setting pinned locals */
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1890 case MONO_TYPE_FNPTR:
1892 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1893 * in native int. (#688008).
1895 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1898 case MONO_TYPE_CLASS:
1899 case MONO_TYPE_STRING:
1900 case MONO_TYPE_OBJECT:
1901 case MONO_TYPE_SZARRAY:
1902 case MONO_TYPE_ARRAY:
1903 if (arg->type != STACK_OBJ)
1905 /* FIXME: check type compatibility */
1909 if (arg->type != STACK_I8)
1913 if (arg->type != cfg->r4_stack_type)
1917 if (arg->type != STACK_R8)
1920 case MONO_TYPE_VALUETYPE:
1921 if (arg->type != STACK_VTYPE)
1923 klass = mono_class_from_mono_type (simple_type);
1924 if (klass != arg->klass)
1927 case MONO_TYPE_TYPEDBYREF:
1928 if (arg->type != STACK_VTYPE)
1930 klass = mono_class_from_mono_type (simple_type);
1931 if (klass != arg->klass)
1934 case MONO_TYPE_GENERICINST:
1935 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1936 MonoClass *target_class;
1937 if (arg->type != STACK_VTYPE)
1939 klass = mono_class_from_mono_type (simple_type);
1940 target_class = mono_class_from_mono_type (target);
1941 /* The second cases is needed when doing partial sharing */
1942 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1946 if (arg->type != STACK_OBJ)
1948 /* FIXME: check type compatibility */
1952 case MONO_TYPE_MVAR:
1953 g_assert (cfg->gshared);
1954 if (mini_type_var_is_vt (simple_type)) {
1955 if (arg->type != STACK_VTYPE)
1958 if (arg->type != STACK_OBJ)
1963 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1969 * Prepare arguments for passing to a function call.
1970 * Return a non-zero value if the arguments can't be passed to the given
1972 * The type checks are not yet complete and some conversions may need
1973 * casts on 32 or 64 bit architectures.
1975 * FIXME: implement this using target_type_is_incompatible ()
1978 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1980 MonoType *simple_type;
1984 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1988 for (i = 0; i < sig->param_count; ++i) {
1989 if (sig->params [i]->byref) {
1990 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1994 simple_type = mini_get_underlying_type (sig->params [i]);
1996 switch (simple_type->type) {
1997 case MONO_TYPE_VOID:
2006 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2012 case MONO_TYPE_FNPTR:
2013 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2016 case MONO_TYPE_CLASS:
2017 case MONO_TYPE_STRING:
2018 case MONO_TYPE_OBJECT:
2019 case MONO_TYPE_SZARRAY:
2020 case MONO_TYPE_ARRAY:
2021 if (args [i]->type != STACK_OBJ)
2026 if (args [i]->type != STACK_I8)
2030 if (args [i]->type != cfg->r4_stack_type)
2034 if (args [i]->type != STACK_R8)
2037 case MONO_TYPE_VALUETYPE:
2038 if (simple_type->data.klass->enumtype) {
2039 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2042 if (args [i]->type != STACK_VTYPE)
2045 case MONO_TYPE_TYPEDBYREF:
2046 if (args [i]->type != STACK_VTYPE)
2049 case MONO_TYPE_GENERICINST:
2050 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2053 case MONO_TYPE_MVAR:
2055 if (args [i]->type != STACK_VTYPE)
2059 g_error ("unknown type 0x%02x in check_call_signature",
2067 callvirt_to_call (int opcode)
2070 case OP_CALL_MEMBASE:
2072 case OP_VOIDCALL_MEMBASE:
2074 case OP_FCALL_MEMBASE:
2076 case OP_RCALL_MEMBASE:
2078 case OP_VCALL_MEMBASE:
2080 case OP_LCALL_MEMBASE:
2083 g_assert_not_reached ();
2090 callvirt_to_call_reg (int opcode)
2093 case OP_CALL_MEMBASE:
2095 case OP_VOIDCALL_MEMBASE:
2096 return OP_VOIDCALL_REG;
2097 case OP_FCALL_MEMBASE:
2098 return OP_FCALL_REG;
2099 case OP_RCALL_MEMBASE:
2100 return OP_RCALL_REG;
2101 case OP_VCALL_MEMBASE:
2102 return OP_VCALL_REG;
2103 case OP_LCALL_MEMBASE:
2104 return OP_LCALL_REG;
2106 g_assert_not_reached ();
2112 /* Either METHOD or IMT_ARG needs to be set */
2114 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2118 if (COMPILE_LLVM (cfg)) {
2120 method_reg = alloc_preg (cfg);
2121 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2123 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2124 method_reg = ins->dreg;
2128 call->imt_arg_reg = method_reg;
2130 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2135 method_reg = alloc_preg (cfg);
2136 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2138 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2139 method_reg = ins->dreg;
2142 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2145 static MonoJumpInfo *
2146 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2148 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2152 ji->data.target = target;
2158 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2161 return mono_class_check_context_used (klass);
2167 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2170 return mono_method_check_context_used (method);
2176 * check_method_sharing:
2178 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2181 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2183 gboolean pass_vtable = FALSE;
2184 gboolean pass_mrgctx = FALSE;
2186 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2187 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2188 gboolean sharable = FALSE;
2190 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2194 * Pass vtable iff target method might
2195 * be shared, which means that sharing
2196 * is enabled for its class and its
2197 * context is sharable (and it's not a
2200 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2204 if (mini_method_get_context (cmethod) &&
2205 mini_method_get_context (cmethod)->method_inst) {
2206 g_assert (!pass_vtable);
2208 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2211 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2216 if (out_pass_vtable)
2217 *out_pass_vtable = pass_vtable;
2218 if (out_pass_mrgctx)
2219 *out_pass_mrgctx = pass_mrgctx;
2222 inline static MonoCallInst *
2223 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2224 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2228 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2236 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2238 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2240 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2243 call->signature = sig;
2244 call->rgctx_reg = rgctx;
2245 sig_ret = mini_get_underlying_type (sig->ret);
2247 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2250 if (mini_type_is_vtype (sig_ret)) {
2251 call->vret_var = cfg->vret_addr;
2252 //g_assert_not_reached ();
2254 } else if (mini_type_is_vtype (sig_ret)) {
2255 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2258 temp->backend.is_pinvoke = sig->pinvoke;
2261 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2262 * address of return value to increase optimization opportunities.
2263 * Before vtype decomposition, the dreg of the call ins itself represents the
2264 * fact the call modifies the return value. After decomposition, the call will
2265 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2266 * will be transformed into an LDADDR.
2268 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2269 loada->dreg = alloc_preg (cfg);
2270 loada->inst_p0 = temp;
2271 /* We reference the call too since call->dreg could change during optimization */
2272 loada->inst_p1 = call;
2273 MONO_ADD_INS (cfg->cbb, loada);
2275 call->inst.dreg = temp->dreg;
2277 call->vret_var = loada;
2278 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2279 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2281 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2282 if (COMPILE_SOFT_FLOAT (cfg)) {
2284 * If the call has a float argument, we would need to do an r8->r4 conversion using
2285 * an icall, but that cannot be done during the call sequence since it would clobber
2286 * the call registers + the stack. So we do it before emitting the call.
2288 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2290 MonoInst *in = call->args [i];
2292 if (i >= sig->hasthis)
2293 t = sig->params [i - sig->hasthis];
2295 t = &mono_defaults.int_class->byval_arg;
2296 t = mono_type_get_underlying_type (t);
2298 if (!t->byref && t->type == MONO_TYPE_R4) {
2299 MonoInst *iargs [1];
2303 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2305 /* The result will be in an int vreg */
2306 call->args [i] = conv;
2312 call->need_unbox_trampoline = unbox_trampoline;
2315 if (COMPILE_LLVM (cfg))
2316 mono_llvm_emit_call (cfg, call);
2318 mono_arch_emit_call (cfg, call);
2320 mono_arch_emit_call (cfg, call);
2323 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2324 cfg->flags |= MONO_CFG_HAS_CALLS;
2330 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2332 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2333 cfg->uses_rgctx_reg = TRUE;
2334 call->rgctx_reg = TRUE;
2336 call->rgctx_arg_reg = rgctx_reg;
2341 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2346 gboolean check_sp = FALSE;
2348 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2349 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2351 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2356 rgctx_reg = mono_alloc_preg (cfg);
2357 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2361 if (!cfg->stack_inbalance_var)
2362 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2364 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2365 ins->dreg = cfg->stack_inbalance_var->dreg;
2366 MONO_ADD_INS (cfg->cbb, ins);
2369 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2371 call->inst.sreg1 = addr->dreg;
2374 emit_imt_argument (cfg, call, NULL, imt_arg);
2376 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2381 sp_reg = mono_alloc_preg (cfg);
2383 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2385 MONO_ADD_INS (cfg->cbb, ins);
2387 /* Restore the stack so we don't crash when throwing the exception */
2388 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2389 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2390 MONO_ADD_INS (cfg->cbb, ins);
2392 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2393 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2397 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2399 return (MonoInst*)call;
2403 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2406 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2407 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2409 #ifndef DISABLE_REMOTING
2410 gboolean might_be_remote = FALSE;
2412 gboolean virtual_ = this_ins != NULL;
2413 gboolean enable_for_aot = TRUE;
2416 MonoInst *call_target = NULL;
2418 gboolean need_unbox_trampoline;
2421 sig = mono_method_signature (method);
2423 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2424 g_assert_not_reached ();
2427 rgctx_reg = mono_alloc_preg (cfg);
2428 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2431 if (method->string_ctor) {
2432 /* Create the real signature */
2433 /* FIXME: Cache these */
2434 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2435 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2440 context_used = mini_method_check_context_used (cfg, method);
2442 #ifndef DISABLE_REMOTING
2443 might_be_remote = this_ins && sig->hasthis &&
2444 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2445 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2447 if (might_be_remote && context_used) {
2450 g_assert (cfg->gshared);
2452 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2454 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2458 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2459 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2461 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2463 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2465 #ifndef DISABLE_REMOTING
2466 if (might_be_remote)
2467 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2470 call->method = method;
2471 call->inst.flags |= MONO_INST_HAS_METHOD;
2472 call->inst.inst_left = this_ins;
2473 call->tail_call = tail;
2476 int vtable_reg, slot_reg, this_reg;
2479 this_reg = this_ins->dreg;
2481 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2482 MonoInst *dummy_use;
2484 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2486 /* Make a call to delegate->invoke_impl */
2487 call->inst.inst_basereg = this_reg;
2488 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2489 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2491 /* We must emit a dummy use here because the delegate trampoline will
2492 replace the 'this' argument with the delegate target making this activation
2493 no longer a root for the delegate.
2494 This is an issue for delegates that target collectible code such as dynamic
2495 methods of GC'able assemblies.
2497 For a test case look into #667921.
2499 FIXME: a dummy use is not the best way to do it as the local register allocator
2500 will put it on a caller save register and spil it around the call.
2501 Ideally, we would either put it on a callee save register or only do the store part.
2503 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2505 return (MonoInst*)call;
2508 if ((!cfg->compile_aot || enable_for_aot) &&
2509 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2510 (MONO_METHOD_IS_FINAL (method) &&
2511 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2512 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2514 * the method is not virtual, we just need to ensure this is not null
2515 * and then we can call the method directly.
2517 #ifndef DISABLE_REMOTING
2518 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2520 * The check above ensures method is not gshared, this is needed since
2521 * gshared methods can't have wrappers.
2523 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2527 if (!method->string_ctor)
2528 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2530 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2531 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2533 * the method is virtual, but we can statically dispatch since either
2534 * it's class or the method itself are sealed.
2535 * But first we need to ensure it's not a null reference.
2537 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2539 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2540 } else if (call_target) {
2541 vtable_reg = alloc_preg (cfg);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2544 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2545 call->inst.sreg1 = call_target->dreg;
2546 call->inst.flags &= !MONO_INST_HAS_METHOD;
2548 vtable_reg = alloc_preg (cfg);
2549 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2550 if (mono_class_is_interface (method->klass)) {
2551 guint32 imt_slot = mono_method_get_imt_slot (method);
2552 emit_imt_argument (cfg, call, call->method, imt_arg);
2553 slot_reg = vtable_reg;
2554 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2556 slot_reg = vtable_reg;
2557 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2558 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2560 g_assert (mono_method_signature (method)->generic_param_count);
2561 emit_imt_argument (cfg, call, call->method, imt_arg);
2565 call->inst.sreg1 = slot_reg;
2566 call->inst.inst_offset = offset;
2567 call->is_virtual = TRUE;
2571 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2574 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2576 return (MonoInst*)call;
2580 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2582 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2586 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2593 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2596 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2598 return (MonoInst*)call;
2602 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2604 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2608 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2612 * mono_emit_abs_call:
2614 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2616 inline static MonoInst*
2617 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2618 MonoMethodSignature *sig, MonoInst **args)
2620 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2624 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2627 if (cfg->abs_patches == NULL)
2628 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2629 g_hash_table_insert (cfg->abs_patches, ji, ji);
2630 ins = mono_emit_native_call (cfg, ji, sig, args);
2631 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2635 static MonoMethodSignature*
2636 sig_to_rgctx_sig (MonoMethodSignature *sig)
2638 // FIXME: memory allocation
2639 MonoMethodSignature *res;
2642 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2643 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2644 res->param_count = sig->param_count + 1;
2645 for (i = 0; i < sig->param_count; ++i)
2646 res->params [i] = sig->params [i];
2647 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2651 /* Make an indirect call to FSIG passing an additional argument */
2653 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2655 MonoMethodSignature *csig;
2656 MonoInst *args_buf [16];
2658 int i, pindex, tmp_reg;
2660 /* Make a call with an rgctx/extra arg */
2661 if (fsig->param_count + 2 < 16)
2664 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2667 args [pindex ++] = orig_args [0];
2668 for (i = 0; i < fsig->param_count; ++i)
2669 args [pindex ++] = orig_args [fsig->hasthis + i];
2670 tmp_reg = alloc_preg (cfg);
2671 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2672 csig = sig_to_rgctx_sig (fsig);
2673 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2676 /* Emit an indirect call to the function descriptor ADDR */
2678 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2680 int addr_reg, arg_reg;
2681 MonoInst *call_target;
2683 g_assert (cfg->llvm_only);
2686 * addr points to a <addr, arg> pair, load both of them, and
2687 * make a call to addr, passing arg as an extra arg.
2689 addr_reg = alloc_preg (cfg);
2690 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2691 arg_reg = alloc_preg (cfg);
2692 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2694 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2698 direct_icalls_enabled (MonoCompile *cfg)
2702 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2704 if (cfg->compile_llvm && !cfg->llvm_only)
2707 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2713 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2716 * Call the jit icall without a wrapper if possible.
2717 * The wrapper is needed for the following reasons:
2718 * - to handle exceptions thrown using mono_raise_exceptions () from the
2719 * icall function. The EH code needs the lmf frame pushed by the
2720 * wrapper to be able to unwind back to managed code.
2721 * - to be able to do stack walks for asynchronously suspended
2722 * threads when debugging.
2724 if (info->no_raise && direct_icalls_enabled (cfg)) {
2728 if (!info->wrapper_method) {
2729 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2730 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2732 mono_memory_barrier ();
2736 * Inline the wrapper method, which is basically a call to the C icall, and
2737 * an exception check.
2739 costs = inline_method (cfg, info->wrapper_method, NULL,
2740 args, NULL, il_offset, TRUE);
2741 g_assert (costs > 0);
2742 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2746 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2751 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2753 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2754 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2758 * Native code might return non register sized integers
2759 * without initializing the upper bits.
2761 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2762 case OP_LOADI1_MEMBASE:
2763 widen_op = OP_ICONV_TO_I1;
2765 case OP_LOADU1_MEMBASE:
2766 widen_op = OP_ICONV_TO_U1;
2768 case OP_LOADI2_MEMBASE:
2769 widen_op = OP_ICONV_TO_I2;
2771 case OP_LOADU2_MEMBASE:
2772 widen_op = OP_ICONV_TO_U2;
2778 if (widen_op != -1) {
2779 int dreg = alloc_preg (cfg);
2782 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2783 widen->type = ins->type;
2794 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2796 MonoInst *args [16];
2798 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2799 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2801 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2805 mini_get_memcpy_method (void)
2807 static MonoMethod *memcpy_method = NULL;
2808 if (!memcpy_method) {
2809 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2811 g_error ("Old corlib found. Install a new one");
2813 return memcpy_method;
2817 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2819 MonoClassField *field;
2820 gpointer iter = NULL;
2822 while ((field = mono_class_get_fields (klass, &iter))) {
2825 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2827 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2828 if (mini_type_is_reference (mono_field_get_type (field))) {
2829 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2830 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2832 MonoClass *field_class = mono_class_from_mono_type (field->type);
2833 if (field_class->has_references)
2834 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2840 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2842 int card_table_shift_bits;
2843 gpointer card_table_mask;
2845 MonoInst *dummy_use;
2846 int nursery_shift_bits;
2847 size_t nursery_size;
2849 if (!cfg->gen_write_barriers)
2852 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2854 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2856 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2859 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2860 wbarrier->sreg1 = ptr->dreg;
2861 wbarrier->sreg2 = value->dreg;
2862 MONO_ADD_INS (cfg->cbb, wbarrier);
2863 } else if (card_table) {
2864 int offset_reg = alloc_preg (cfg);
2869 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2870 * collector case, so, for the serial collector, it might slightly slow down nursery
2871 * collections. We also expect that the host system and the target system have the same card
2872 * table configuration, which is the case if they have the same pointer size.
2875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2876 if (card_table_mask)
2877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2879 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2880 * IMM's larger than 32bits.
2882 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2883 card_reg = ins->dreg;
2885 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2886 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2888 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2889 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2892 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2896 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2898 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2899 unsigned need_wb = 0;
2904 /*types with references can't have alignment smaller than sizeof(void*) */
2905 if (align < SIZEOF_VOID_P)
2908 if (size > 5 * SIZEOF_VOID_P)
2911 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2913 destreg = iargs [0]->dreg;
2914 srcreg = iargs [1]->dreg;
2917 dest_ptr_reg = alloc_preg (cfg);
2918 tmp_reg = alloc_preg (cfg);
2921 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2923 while (size >= SIZEOF_VOID_P) {
2924 MonoInst *load_inst;
2925 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2926 load_inst->dreg = tmp_reg;
2927 load_inst->inst_basereg = srcreg;
2928 load_inst->inst_offset = offset;
2929 MONO_ADD_INS (cfg->cbb, load_inst);
2931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2934 mini_emit_write_barrier (cfg, iargs [0], load_inst);
2936 offset += SIZEOF_VOID_P;
2937 size -= SIZEOF_VOID_P;
2940 /*tmp += sizeof (void*)*/
2941 if (size >= SIZEOF_VOID_P) {
2942 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2943 MONO_ADD_INS (cfg->cbb, iargs [0]);
2947 /* Those cannot be references since size < sizeof (void*) */
2949 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2956 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2973 * Emit code to copy a valuetype of type @klass whose address is stored in
2974 * @src->dreg to memory whose address is stored at @dest->dreg.
2977 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2979 MonoInst *iargs [4];
2982 MonoMethod *memcpy_method;
2983 MonoInst *size_ins = NULL;
2984 MonoInst *memcpy_ins = NULL;
2988 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
2991 * This check breaks with spilled vars... need to handle it during verification anyway.
2992 * g_assert (klass && klass == src->klass && klass == dest->klass);
2995 if (mini_is_gsharedvt_klass (klass)) {
2997 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2998 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3002 n = mono_class_native_size (klass, &align);
3004 n = mono_class_value_size (klass, &align);
3007 align = SIZEOF_VOID_P;
3008 /* if native is true there should be no references in the struct */
3009 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3010 /* Avoid barriers when storing to the stack */
3011 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3012 (dest->opcode == OP_LDADDR))) {
3018 context_used = mini_class_check_context_used (cfg, klass);
3020 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3021 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3023 } else if (size_ins || align < SIZEOF_VOID_P) {
3025 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3027 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3028 if (!cfg->compile_aot)
3029 mono_class_compute_gc_descriptor (klass);
3032 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3034 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3036 /* We don't unroll more than 5 stores to avoid code bloat. */
3037 /*This is harmless and simplify mono_gc_get_range_copy_func */
3038 n += (SIZEOF_VOID_P - 1);
3039 n &= ~(SIZEOF_VOID_P - 1);
3041 EMIT_NEW_ICONST (cfg, iargs [2], n);
3042 mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs);
3047 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3048 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3049 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3054 iargs [2] = size_ins;
3056 EMIT_NEW_ICONST (cfg, iargs [2], n);
3058 memcpy_method = mini_get_memcpy_method ();
3060 mini_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3062 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3067 mini_get_memset_method (void)
3069 static MonoMethod *memset_method = NULL;
3070 if (!memset_method) {
3071 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3073 g_error ("Old corlib found. Install a new one");
3075 return memset_method;
3079 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3081 MonoInst *iargs [3];
3084 MonoMethod *memset_method;
3085 MonoInst *size_ins = NULL;
3086 MonoInst *bzero_ins = NULL;
3087 static MonoMethod *bzero_method;
3089 /* FIXME: Optimize this for the case when dest is an LDADDR */
3090 mono_class_init (klass);
3091 if (mini_is_gsharedvt_klass (klass)) {
3092 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3093 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3095 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3096 g_assert (bzero_method);
3098 iargs [1] = size_ins;
3099 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3103 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3105 n = mono_class_value_size (klass, &align);
3107 if (n <= sizeof (gpointer) * 8) {
3108 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3111 memset_method = mini_get_memset_method ();
3113 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3114 EMIT_NEW_ICONST (cfg, iargs [2], n);
3115 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3122 * Emit IR to return either the this pointer for instance method,
3123 * or the mrgctx for static methods.
3126 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3128 MonoInst *this_ins = NULL;
3130 g_assert (cfg->gshared);
3132 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3133 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3134 !method->klass->valuetype)
3135 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3137 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3138 MonoInst *mrgctx_loc, *mrgctx_var;
3140 g_assert (!this_ins);
3141 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3143 mrgctx_loc = mono_get_vtable_var (cfg);
3144 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3147 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
3148 MonoInst *mrgctx_loc, *mrgctx_var;
3150 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
3151 mrgctx_loc = mono_get_vtable_var (cfg);
3152 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3154 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
3157 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3158 MonoInst *vtable_loc, *vtable_var;
3160 g_assert (!this_ins);
3162 vtable_loc = mono_get_vtable_var (cfg);
3163 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3165 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3166 MonoInst *mrgctx_var = vtable_var;
3169 vtable_reg = alloc_preg (cfg);
3170 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3171 vtable_var->type = STACK_PTR;
3179 vtable_reg = alloc_preg (cfg);
3180 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3185 static MonoJumpInfoRgctxEntry *
3186 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3188 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3189 res->method = method;
3190 res->in_mrgctx = in_mrgctx;
3191 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3192 res->data->type = patch_type;
3193 res->data->data.target = patch_data;
3194 res->info_type = info_type;
3199 static inline MonoInst*
3200 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3202 MonoInst *args [16];
3205 // FIXME: No fastpath since the slot is not a compile time constant
3207 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3208 if (entry->in_mrgctx)
3209 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3211 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3215 * FIXME: This can be called during decompose, which is a problem since it creates
3217 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3219 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3221 MonoBasicBlock *is_null_bb, *end_bb;
3222 MonoInst *res, *ins, *call;
3225 slot = mini_get_rgctx_entry_slot (entry);
3227 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3228 index = MONO_RGCTX_SLOT_INDEX (slot);
3230 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3231 for (depth = 0; ; ++depth) {
3232 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3234 if (index < size - 1)
3239 NEW_BBLOCK (cfg, end_bb);
3240 NEW_BBLOCK (cfg, is_null_bb);
3243 rgctx_reg = rgctx->dreg;
3245 rgctx_reg = alloc_preg (cfg);
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3248 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3249 NEW_BBLOCK (cfg, is_null_bb);
3251 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3252 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3255 for (i = 0; i < depth; ++i) {
3256 int array_reg = alloc_preg (cfg);
3258 /* load ptr to next array */
3259 if (mrgctx && i == 0)
3260 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3263 rgctx_reg = array_reg;
3264 /* is the ptr null? */
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3266 /* if yes, jump to actual trampoline */
3267 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3271 val_reg = alloc_preg (cfg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3273 /* is the slot null? */
3274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3275 /* if yes, jump to actual trampoline */
3276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3279 res_reg = alloc_preg (cfg);
3280 MONO_INST_NEW (cfg, ins, OP_MOVE);
3281 ins->dreg = res_reg;
3282 ins->sreg1 = val_reg;
3283 MONO_ADD_INS (cfg->cbb, ins);
3285 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3288 MONO_START_BB (cfg, is_null_bb);
3290 EMIT_NEW_ICONST (cfg, args [1], index);
3292 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3294 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3295 MONO_INST_NEW (cfg, ins, OP_MOVE);
3296 ins->dreg = res_reg;
3297 ins->sreg1 = call->dreg;
3298 MONO_ADD_INS (cfg->cbb, ins);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3301 MONO_START_BB (cfg, end_bb);
3310 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3313 static inline MonoInst*
3314 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3317 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3319 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3323 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3324 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3326 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3327 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3329 return emit_rgctx_fetch (cfg, rgctx, entry);
3333 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3334 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3336 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3337 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3339 return emit_rgctx_fetch (cfg, rgctx, entry);
3343 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3344 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3346 MonoJumpInfoGSharedVtCall *call_info;
3347 MonoJumpInfoRgctxEntry *entry;
3350 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3351 call_info->sig = sig;
3352 call_info->method = cmethod;
3354 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3355 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3357 return emit_rgctx_fetch (cfg, rgctx, entry);
3361 * emit_get_rgctx_virt_method:
3363 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3366 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3367 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3369 MonoJumpInfoVirtMethod *info;
3370 MonoJumpInfoRgctxEntry *entry;
3373 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3374 info->klass = klass;
3375 info->method = virt_method;
3377 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3378 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3380 return emit_rgctx_fetch (cfg, rgctx, entry);
3384 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3385 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3387 MonoJumpInfoRgctxEntry *entry;
3390 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3391 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3393 return emit_rgctx_fetch (cfg, rgctx, entry);
3397 * emit_get_rgctx_method:
3399 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3400 * normal constants, else emit a load from the rgctx.
3403 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3404 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3406 if (!context_used) {
3409 switch (rgctx_type) {
3410 case MONO_RGCTX_INFO_METHOD:
3411 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3413 case MONO_RGCTX_INFO_METHOD_RGCTX:
3414 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3417 g_assert_not_reached ();
3420 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3421 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3423 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3429 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3431 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3432 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3434 return emit_rgctx_fetch (cfg, rgctx, entry);
3438 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3440 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3441 MonoRuntimeGenericContextInfoTemplate *template_;
3446 for (i = 0; i < info->num_entries; ++i) {
3447 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3449 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3453 if (info->num_entries == info->count_entries) {
3454 MonoRuntimeGenericContextInfoTemplate *new_entries;
3455 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3457 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3459 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3460 info->entries = new_entries;
3461 info->count_entries = new_count_entries;
3464 idx = info->num_entries;
3465 template_ = &info->entries [idx];
3466 template_->info_type = rgctx_type;
3467 template_->data = data;
3469 info->num_entries ++;
3475 * emit_get_gsharedvt_info:
3477 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3480 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3485 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3486 /* Load info->entries [idx] */
3487 dreg = alloc_preg (cfg);
3488 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3494 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3496 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3500 * On return the caller must check @klass for load errors.
3503 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3505 MonoInst *vtable_arg;
3508 context_used = mini_class_check_context_used (cfg, klass);
3511 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3512 klass, MONO_RGCTX_INFO_VTABLE);
3514 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3518 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3521 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3525 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3526 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3528 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3529 ins->sreg1 = vtable_arg->dreg;
3530 MONO_ADD_INS (cfg->cbb, ins);
3533 MonoBasicBlock *inited_bb;
3534 MonoInst *args [16];
3536 inited_reg = alloc_ireg (cfg);
3538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3540 NEW_BBLOCK (cfg, inited_bb);
3542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3545 args [0] = vtable_arg;
3546 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3548 MONO_START_BB (cfg, inited_bb);
3553 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3557 if (cfg->gen_seq_points && cfg->method == method) {
3558 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3560 ins->flags |= MONO_INST_NONEMPTY_STACK;
3561 MONO_ADD_INS (cfg->cbb, ins);
3566 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3568 if (mini_get_debug_options ()->better_cast_details) {
3569 int vtable_reg = alloc_preg (cfg);
3570 int klass_reg = alloc_preg (cfg);
3571 MonoBasicBlock *is_null_bb = NULL;
3573 int to_klass_reg, context_used;
3576 NEW_BBLOCK (cfg, is_null_bb);
3578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3579 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3582 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3584 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3591 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3593 context_used = mini_class_check_context_used (cfg, klass);
3595 MonoInst *class_ins;
3597 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3598 to_klass_reg = class_ins->dreg;
3600 to_klass_reg = alloc_preg (cfg);
3601 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3603 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3606 MONO_START_BB (cfg, is_null_bb);
3611 mini_reset_cast_details (MonoCompile *cfg)
3613 /* Reset the variables holding the cast details */
3614 if (mini_get_debug_options ()->better_cast_details) {
3615 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3616 /* It is enough to reset the from field */
3617 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3622 * On return the caller must check @array_class for load errors
3625 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3627 int vtable_reg = alloc_preg (cfg);
3630 context_used = mini_class_check_context_used (cfg, array_class);
3632 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3634 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3636 if (cfg->opt & MONO_OPT_SHARED) {
3637 int class_reg = alloc_preg (cfg);
3640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3641 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3642 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3643 } else if (context_used) {
3644 MonoInst *vtable_ins;
3646 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3649 if (cfg->compile_aot) {
3653 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3655 vt_reg = alloc_preg (cfg);
3656 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3657 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3660 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3666 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3668 mini_reset_cast_details (cfg);
3672 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3673 * generic code is generated.
3676 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3678 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3681 MonoInst *rgctx, *addr;
3683 /* FIXME: What if the class is shared? We might not
3684 have to get the address of the method from the
3686 addr = emit_get_rgctx_method (cfg, context_used, method,
3687 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3688 if (cfg->llvm_only) {
3689 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3690 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3692 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3694 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3697 gboolean pass_vtable, pass_mrgctx;
3698 MonoInst *rgctx_arg = NULL;
3700 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3701 g_assert (!pass_mrgctx);
3704 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3707 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3710 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3715 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3719 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3720 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3721 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3722 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3724 obj_reg = sp [0]->dreg;
3725 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3728 /* FIXME: generics */
3729 g_assert (klass->rank == 0);
3732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3733 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3739 MonoInst *element_class;
3741 /* This assertion is from the unboxcast insn */
3742 g_assert (klass->rank == 0);
3744 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3745 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3750 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3751 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3752 mini_reset_cast_details (cfg);
3755 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3756 MONO_ADD_INS (cfg->cbb, add);
3757 add->type = STACK_MP;
3764 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3766 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3767 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3771 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3777 args [1] = klass_inst;
3780 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3782 NEW_BBLOCK (cfg, is_ref_bb);
3783 NEW_BBLOCK (cfg, is_nullable_bb);
3784 NEW_BBLOCK (cfg, end_bb);
3785 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3787 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3792 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3793 addr_reg = alloc_dreg (cfg, STACK_MP);
3797 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3798 MONO_ADD_INS (cfg->cbb, addr);
3800 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3803 MONO_START_BB (cfg, is_ref_bb);
3805 /* Save the ref to a temporary */
3806 dreg = alloc_ireg (cfg);
3807 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3808 addr->dreg = addr_reg;
3809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3813 MONO_START_BB (cfg, is_nullable_bb);
3816 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3817 MonoInst *unbox_call;
3818 MonoMethodSignature *unbox_sig;
3820 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3821 unbox_sig->ret = &klass->byval_arg;
3822 unbox_sig->param_count = 1;
3823 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3826 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3828 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3830 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3831 addr->dreg = addr_reg;
3834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3837 MONO_START_BB (cfg, end_bb);
3840 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3846 * Returns NULL and set the cfg exception on error.
3849 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3851 MonoInst *iargs [2];
3856 MonoRgctxInfoType rgctx_info;
3857 MonoInst *iargs [2];
3858 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3860 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3862 if (cfg->opt & MONO_OPT_SHARED)
3863 rgctx_info = MONO_RGCTX_INFO_KLASS;
3865 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3866 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3868 if (cfg->opt & MONO_OPT_SHARED) {
3869 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3871 alloc_ftn = ves_icall_object_new;
3874 alloc_ftn = ves_icall_object_new_specific;
3877 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3878 if (known_instance_size) {
3879 int size = mono_class_instance_size (klass);
3880 if (size < sizeof (MonoObject))
3881 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3883 EMIT_NEW_ICONST (cfg, iargs [1], size);
3885 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3888 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3891 if (cfg->opt & MONO_OPT_SHARED) {
3892 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3893 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3895 alloc_ftn = ves_icall_object_new;
3896 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3897 /* This happens often in argument checking code, eg. throw new FooException... */
3898 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3899 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3900 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3902 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3903 MonoMethod *managed_alloc = NULL;
3907 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3908 cfg->exception_ptr = klass;
3912 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3914 if (managed_alloc) {
3915 int size = mono_class_instance_size (klass);
3916 if (size < sizeof (MonoObject))
3917 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3919 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3920 EMIT_NEW_ICONST (cfg, iargs [1], size);
3921 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3923 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3925 guint32 lw = vtable->klass->instance_size;
3926 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3927 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3928 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3931 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3935 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3939 * Returns NULL and set the cfg exception on error.
3942 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3944 MonoInst *alloc, *ins;
3946 if (mono_class_is_nullable (klass)) {
3947 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3950 if (cfg->llvm_only && cfg->gsharedvt) {
3951 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3952 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3953 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3955 /* FIXME: What if the class is shared? We might not
3956 have to get the method address from the RGCTX. */
3957 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3958 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3959 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3961 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3964 gboolean pass_vtable, pass_mrgctx;
3965 MonoInst *rgctx_arg = NULL;
3967 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3968 g_assert (!pass_mrgctx);
3971 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3974 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3977 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3981 if (mini_is_gsharedvt_klass (klass)) {
3982 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3983 MonoInst *res, *is_ref, *src_var, *addr;
3986 dreg = alloc_ireg (cfg);
3988 NEW_BBLOCK (cfg, is_ref_bb);
3989 NEW_BBLOCK (cfg, is_nullable_bb);
3990 NEW_BBLOCK (cfg, end_bb);
3991 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3995 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3996 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3999 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4002 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4003 ins->opcode = OP_STOREV_MEMBASE;
4005 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4006 res->type = STACK_OBJ;
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4011 MONO_START_BB (cfg, is_ref_bb);
4013 /* val is a vtype, so has to load the value manually */
4014 src_var = get_vreg_to_inst (cfg, val->dreg);
4016 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4017 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4018 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4019 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4022 MONO_START_BB (cfg, is_nullable_bb);
4025 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
4026 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4028 MonoMethodSignature *box_sig;
4031 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4032 * construct that method at JIT time, so have to do things by hand.
4034 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4035 box_sig->ret = &mono_defaults.object_class->byval_arg;
4036 box_sig->param_count = 1;
4037 box_sig->params [0] = &klass->byval_arg;
4040 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4042 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4043 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4044 res->type = STACK_OBJ;
4048 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4050 MONO_START_BB (cfg, end_bb);
4054 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4063 static GHashTable* direct_icall_type_hash;
4066 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4068 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4069 if (!direct_icalls_enabled (cfg))
4073 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4074 * Whitelist a few icalls for now.
4076 if (!direct_icall_type_hash) {
4077 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4079 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4080 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4081 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4082 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4083 mono_memory_barrier ();
4084 direct_icall_type_hash = h;
4087 if (cmethod->klass == mono_defaults.math_class)
4089 /* No locking needed */
4090 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4096 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4098 if (cmethod->klass == mono_defaults.systemtype_class) {
4099 if (!strcmp (cmethod->name, "GetType"))
4105 static G_GNUC_UNUSED MonoInst*
4106 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4108 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4109 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4112 switch (enum_type->type) {
4115 #if SIZEOF_REGISTER == 8
4127 MonoInst *load, *and_, *cmp, *ceq;
4128 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4129 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4130 int dest_reg = alloc_ireg (cfg);
4132 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4133 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4134 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4135 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4137 ceq->type = STACK_I4;
4140 load = mono_decompose_opcode (cfg, load);
4141 and_ = mono_decompose_opcode (cfg, and_);
4142 cmp = mono_decompose_opcode (cfg, cmp);
4143 ceq = mono_decompose_opcode (cfg, ceq);
4151 * Returns NULL and set the cfg exception on error.
4153 static G_GNUC_UNUSED MonoInst*
4154 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4158 gpointer trampoline;
4159 MonoInst *obj, *method_ins, *tramp_ins;
4163 if (virtual_ && !cfg->llvm_only) {
4164 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4167 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4171 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4175 /* Inline the contents of mono_delegate_ctor */
4177 /* Set target field */
4178 /* Optimize away setting of NULL target */
4179 if (!MONO_INS_IS_PCONST_NULL (target)) {
4180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4181 if (cfg->gen_write_barriers) {
4182 dreg = alloc_preg (cfg);
4183 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4184 mini_emit_write_barrier (cfg, ptr, target);
4188 /* Set method field */
4189 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4190 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4193 * To avoid looking up the compiled code belonging to the target method
4194 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4195 * store it, and we fill it after the method has been compiled.
4197 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4198 MonoInst *code_slot_ins;
4201 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4203 domain = mono_domain_get ();
4204 mono_domain_lock (domain);
4205 if (!domain_jit_info (domain)->method_code_hash)
4206 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4207 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4209 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4210 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4212 mono_domain_unlock (domain);
4214 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4216 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4219 if (cfg->llvm_only) {
4220 MonoInst *args [16];
4225 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4226 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4229 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4235 if (cfg->compile_aot) {
4236 MonoDelegateClassMethodPair *del_tramp;
4238 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4239 del_tramp->klass = klass;
4240 del_tramp->method = context_used ? NULL : method;
4241 del_tramp->is_virtual = virtual_;
4242 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4245 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4247 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4248 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4251 /* Set invoke_impl field */
4253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4255 dreg = alloc_preg (cfg);
4256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4257 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4259 dreg = alloc_preg (cfg);
4260 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4261 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4264 dreg = alloc_preg (cfg);
4265 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4268 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4274 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4276 MonoJitICallInfo *info;
4278 /* Need to register the icall so it gets an icall wrapper */
4279 info = mono_get_array_new_va_icall (rank);
4281 cfg->flags |= MONO_CFG_HAS_VARARGS;
4283 /* mono_array_new_va () needs a vararg calling convention */
4284 cfg->exception_message = g_strdup ("array-new");
4285 cfg->disable_llvm = TRUE;
4287 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4288 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4292 * handle_constrained_gsharedvt_call:
4294 * Handle constrained calls where the receiver is a gsharedvt type.
4295 * Return the instruction representing the call. Set the cfg exception on failure.
4298 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4299 gboolean *ref_emit_widen)
4301 MonoInst *ins = NULL;
4302 gboolean emit_widen = *ref_emit_widen;
4305 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4306 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4307 * pack the arguments into an array, and do the rest of the work in in an icall.
4309 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4310 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4311 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4312 MonoInst *args [16];
4315 * This case handles calls to
4316 * - object:ToString()/Equals()/GetHashCode(),
4317 * - System.IComparable<T>:CompareTo()
4318 * - System.IEquatable<T>:Equals ()
4319 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4323 if (mono_method_check_context_used (cmethod))
4324 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4326 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4327 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4329 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4330 if (fsig->hasthis && fsig->param_count) {
4331 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4332 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4333 ins->dreg = alloc_preg (cfg);
4334 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4335 MONO_ADD_INS (cfg->cbb, ins);
4338 if (mini_is_gsharedvt_type (fsig->params [0])) {
4339 int addr_reg, deref_arg_reg;
4341 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4342 deref_arg_reg = alloc_preg (cfg);
4343 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4344 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4346 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4347 addr_reg = ins->dreg;
4348 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4350 EMIT_NEW_ICONST (cfg, args [3], 0);
4351 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4354 EMIT_NEW_ICONST (cfg, args [3], 0);
4355 EMIT_NEW_ICONST (cfg, args [4], 0);
4357 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4360 if (mini_is_gsharedvt_type (fsig->ret)) {
4361 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4362 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4366 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4367 MONO_ADD_INS (cfg->cbb, add);
4369 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4370 MONO_ADD_INS (cfg->cbb, ins);
4371 /* ins represents the call result */
4374 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4377 *ref_emit_widen = emit_widen;
4386 mono_emit_load_got_addr (MonoCompile *cfg)
4388 MonoInst *getaddr, *dummy_use;
4390 if (!cfg->got_var || cfg->got_var_allocated)
4393 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4394 getaddr->cil_code = cfg->header->code;
4395 getaddr->dreg = cfg->got_var->dreg;
4397 /* Add it to the start of the first bblock */
4398 if (cfg->bb_entry->code) {
4399 getaddr->next = cfg->bb_entry->code;
4400 cfg->bb_entry->code = getaddr;
4403 MONO_ADD_INS (cfg->bb_entry, getaddr);
4405 cfg->got_var_allocated = TRUE;
4408 * Add a dummy use to keep the got_var alive, since real uses might
4409 * only be generated by the back ends.
4410 * Add it to end_bblock, so the variable's lifetime covers the whole
4412 * It would be better to make the usage of the got var explicit in all
4413 * cases when the backend needs it (i.e. calls, throw etc.), so this
4414 * wouldn't be needed.
4416 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4417 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4420 static int inline_limit;
4421 static gboolean inline_limit_inited;
4424 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4426 MonoMethodHeaderSummary header;
4428 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4429 MonoMethodSignature *sig = mono_method_signature (method);
4433 if (cfg->disable_inline)
4438 if (cfg->inline_depth > 10)
4441 if (!mono_method_get_header_summary (method, &header))
4444 /*runtime, icall and pinvoke are checked by summary call*/
4445 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4446 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4447 (mono_class_is_marshalbyref (method->klass)) ||
4451 /* also consider num_locals? */
4452 /* Do the size check early to avoid creating vtables */
4453 if (!inline_limit_inited) {
4455 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4456 inline_limit = atoi (inlinelimit);
4457 g_free (inlinelimit);
4459 inline_limit = INLINE_LENGTH_LIMIT;
4460 inline_limit_inited = TRUE;
4462 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4466 * if we can initialize the class of the method right away, we do,
4467 * otherwise we don't allow inlining if the class needs initialization,
4468 * since it would mean inserting a call to mono_runtime_class_init()
4469 * inside the inlined code
4471 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4474 if (!(cfg->opt & MONO_OPT_SHARED)) {
4475 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4476 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4477 if (method->klass->has_cctor) {
4478 vtable = mono_class_vtable (cfg->domain, method->klass);
4481 if (!cfg->compile_aot) {
4483 if (!mono_runtime_class_init_full (vtable, &error)) {
4484 mono_error_cleanup (&error);
4489 } else if (mono_class_is_before_field_init (method->klass)) {
4490 if (cfg->run_cctors && method->klass->has_cctor) {
4491 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4492 if (!method->klass->runtime_info)
4493 /* No vtable created yet */
4495 vtable = mono_class_vtable (cfg->domain, method->klass);
4498 /* This makes so that inline cannot trigger */
4499 /* .cctors: too many apps depend on them */
4500 /* running with a specific order... */
4501 if (! vtable->initialized)
4504 if (!mono_runtime_class_init_full (vtable, &error)) {
4505 mono_error_cleanup (&error);
4509 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4510 if (!method->klass->runtime_info)
4511 /* No vtable created yet */
4513 vtable = mono_class_vtable (cfg->domain, method->klass);
4516 if (!vtable->initialized)
4521 * If we're compiling for shared code
4522 * the cctor will need to be run at aot method load time, for example,
4523 * or at the end of the compilation of the inlining method.
4525 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4529 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4530 if (mono_arch_is_soft_float ()) {
4532 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4534 for (i = 0; i < sig->param_count; ++i)
4535 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4540 if (g_list_find (cfg->dont_inline, method))
4547 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4549 if (!cfg->compile_aot) {
4551 if (vtable->initialized)
4555 if (mono_class_is_before_field_init (klass)) {
4556 if (cfg->method == method)
4560 if (!mono_class_needs_cctor_run (klass, method))
4563 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4564 /* The initialization is already done before the method is called */
4571 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4575 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4578 if (mini_is_gsharedvt_variable_klass (klass)) {
4581 mono_class_init (klass);
4582 size = mono_class_array_element_size (klass);
4585 mult_reg = alloc_preg (cfg);
4586 array_reg = arr->dreg;
4587 index_reg = index->dreg;
4589 #if SIZEOF_REGISTER == 8
4590 /* The array reg is 64 bits but the index reg is only 32 */
4591 if (COMPILE_LLVM (cfg)) {
4593 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4594 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4595 * of OP_X86_LEA for llvm.
4597 index2_reg = index_reg;
4599 index2_reg = alloc_preg (cfg);
4600 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4603 if (index->type == STACK_I8) {
4604 index2_reg = alloc_preg (cfg);
4605 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4607 index2_reg = index_reg;
4612 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4614 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4615 if (size == 1 || size == 2 || size == 4 || size == 8) {
4616 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4618 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4619 ins->klass = mono_class_get_element_class (klass);
4620 ins->type = STACK_MP;
4626 add_reg = alloc_ireg_mp (cfg);
4629 MonoInst *rgctx_ins;
4632 g_assert (cfg->gshared);
4633 context_used = mini_class_check_context_used (cfg, klass);
4634 g_assert (context_used);
4635 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4636 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4640 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4641 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4642 ins->klass = mono_class_get_element_class (klass);
4643 ins->type = STACK_MP;
4644 MONO_ADD_INS (cfg->cbb, ins);
4650 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4652 int bounds_reg = alloc_preg (cfg);
4653 int add_reg = alloc_ireg_mp (cfg);
4654 int mult_reg = alloc_preg (cfg);
4655 int mult2_reg = alloc_preg (cfg);
4656 int low1_reg = alloc_preg (cfg);
4657 int low2_reg = alloc_preg (cfg);
4658 int high1_reg = alloc_preg (cfg);
4659 int high2_reg = alloc_preg (cfg);
4660 int realidx1_reg = alloc_preg (cfg);
4661 int realidx2_reg = alloc_preg (cfg);
4662 int sum_reg = alloc_preg (cfg);
4663 int index1, index2, tmpreg;
4667 mono_class_init (klass);
4668 size = mono_class_array_element_size (klass);
4670 index1 = index_ins1->dreg;
4671 index2 = index_ins2->dreg;
4673 #if SIZEOF_REGISTER == 8
4674 /* The array reg is 64 bits but the index reg is only 32 */
4675 if (COMPILE_LLVM (cfg)) {
4678 tmpreg = alloc_preg (cfg);
4679 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4681 tmpreg = alloc_preg (cfg);
4682 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4686 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4690 /* range checking */
4691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4692 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4694 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4695 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4696 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4697 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4698 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4699 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4700 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4703 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4704 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4705 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4706 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4707 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4708 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4710 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4711 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4713 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4714 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4716 ins->type = STACK_MP;
4718 MONO_ADD_INS (cfg->cbb, ins);
4724 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4728 MonoMethod *addr_method;
4730 MonoClass *eclass = cmethod->klass->element_class;
4732 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4735 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4737 /* emit_ldelema_2 depends on OP_LMUL */
4738 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4739 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4742 if (mini_is_gsharedvt_variable_klass (eclass))
4745 element_size = mono_class_array_element_size (eclass);
4746 addr_method = mono_marshal_get_array_address (rank, element_size);
4747 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4752 static MonoBreakPolicy
4753 always_insert_breakpoint (MonoMethod *method)
4755 return MONO_BREAK_POLICY_ALWAYS;
4758 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4761 * mono_set_break_policy:
4762 * \param policy_callback the new callback function
4764 * Allow embedders to decide wherther to actually obey breakpoint instructions
4765 * (both break IL instructions and \c Debugger.Break method calls), for example
4766 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4767 * untrusted or semi-trusted code.
4769 * \p policy_callback will be called every time a break point instruction needs to
4770 * be inserted with the method argument being the method that calls \c Debugger.Break
4771 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4772 * if it wants the breakpoint to not be effective in the given method.
4773 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4776 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4778 if (policy_callback)
4779 break_policy_func = policy_callback;
4781 break_policy_func = always_insert_breakpoint;
4785 should_insert_brekpoint (MonoMethod *method) {
4786 switch (break_policy_func (method)) {
4787 case MONO_BREAK_POLICY_ALWAYS:
4789 case MONO_BREAK_POLICY_NEVER:
4791 case MONO_BREAK_POLICY_ON_DBG:
4792 g_warning ("mdb no longer supported");
4795 g_warning ("Incorrect value returned from break policy callback");
4800 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4802 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4804 MonoInst *addr, *store, *load;
4805 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4807 /* the bounds check is already done by the callers */
4808 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4810 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4811 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4812 if (mini_type_is_reference (&eklass->byval_arg))
4813 mini_emit_write_barrier (cfg, addr, load);
4815 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4816 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4823 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4825 return mini_type_is_reference (&klass->byval_arg);
4829 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4831 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4832 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4833 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4834 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4835 MonoInst *iargs [3];
4838 mono_class_setup_vtable (obj_array);
4839 g_assert (helper->slot);
4841 if (sp [0]->type != STACK_OBJ)
4843 if (sp [2]->type != STACK_OBJ)
4850 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4854 if (mini_is_gsharedvt_variable_klass (klass)) {
4857 // FIXME-VT: OP_ICONST optimization
4858 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4859 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4860 ins->opcode = OP_STOREV_MEMBASE;
4861 } else if (sp [1]->opcode == OP_ICONST) {
4862 int array_reg = sp [0]->dreg;
4863 int index_reg = sp [1]->dreg;
4864 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4866 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4867 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4870 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4871 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4873 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4875 if (generic_class_is_reference_type (cfg, klass))
4876 mini_emit_write_barrier (cfg, addr, sp [2]);
4883 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4888 eklass = mono_class_from_mono_type (fsig->params [2]);
4890 eklass = mono_class_from_mono_type (fsig->ret);
4893 return emit_array_store (cfg, eklass, args, FALSE);
4895 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4896 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4902 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4905 int param_size, return_size;
4907 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4908 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4910 if (cfg->verbose_level > 3)
4911 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4913 //Don't allow mixing reference types with value types
4914 if (param_klass->valuetype != return_klass->valuetype) {
4915 if (cfg->verbose_level > 3)
4916 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4920 if (!param_klass->valuetype) {
4921 if (cfg->verbose_level > 3)
4922 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4927 if (param_klass->has_references || return_klass->has_references)
4930 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4931 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4932 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4933 if (cfg->verbose_level > 3)
4934 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4938 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4939 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4940 if (cfg->verbose_level > 3)
4941 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4945 param_size = mono_class_value_size (param_klass, &align);
4946 return_size = mono_class_value_size (return_klass, &align);
4948 //We can do it if sizes match
4949 if (param_size == return_size) {
4950 if (cfg->verbose_level > 3)
4951 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4955 //No simple way to handle struct if sizes don't match
4956 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4957 if (cfg->verbose_level > 3)
4958 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4963 * Same reg size category.
4964 * A quick note on why we don't require widening here.
4965 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4967 * Since the source value comes from a function argument, the JIT will already have
4968 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4970 if (param_size <= 4 && return_size <= 4) {
4971 if (cfg->verbose_level > 3)
4972 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4980 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4982 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4983 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4985 if (mini_is_gsharedvt_variable_type (fsig->ret))
4988 //Valuetypes that are semantically equivalent or numbers than can be widened to
4989 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
4992 //Arrays of valuetypes that are semantically equivalent
4993 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5000 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5002 #ifdef MONO_ARCH_SIMD_INTRINSICS
5003 MonoInst *ins = NULL;
5005 if (cfg->opt & MONO_OPT_SIMD) {
5006 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5012 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5016 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
5018 MonoInst *ins = NULL;
5019 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5020 MONO_ADD_INS (cfg->cbb, ins);
5021 ins->backend.memory_barrier_kind = kind;
5027 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5029 MonoInst *ins = NULL;
5032 /* The LLVM backend supports these intrinsics */
5033 if (cmethod->klass == mono_defaults.math_class) {
5034 if (strcmp (cmethod->name, "Sin") == 0) {
5036 } else if (strcmp (cmethod->name, "Cos") == 0) {
5038 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5040 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5044 if (opcode && fsig->param_count == 1) {
5045 MONO_INST_NEW (cfg, ins, opcode);
5046 ins->type = STACK_R8;
5047 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5048 ins->sreg1 = args [0]->dreg;
5049 MONO_ADD_INS (cfg->cbb, ins);
5053 if (cfg->opt & MONO_OPT_CMOV) {
5054 if (strcmp (cmethod->name, "Min") == 0) {
5055 if (fsig->params [0]->type == MONO_TYPE_I4)
5057 if (fsig->params [0]->type == MONO_TYPE_U4)
5058 opcode = OP_IMIN_UN;
5059 else if (fsig->params [0]->type == MONO_TYPE_I8)
5061 else if (fsig->params [0]->type == MONO_TYPE_U8)
5062 opcode = OP_LMIN_UN;
5063 } else if (strcmp (cmethod->name, "Max") == 0) {
5064 if (fsig->params [0]->type == MONO_TYPE_I4)
5066 if (fsig->params [0]->type == MONO_TYPE_U4)
5067 opcode = OP_IMAX_UN;
5068 else if (fsig->params [0]->type == MONO_TYPE_I8)
5070 else if (fsig->params [0]->type == MONO_TYPE_U8)
5071 opcode = OP_LMAX_UN;
5075 if (opcode && fsig->param_count == 2) {
5076 MONO_INST_NEW (cfg, ins, opcode);
5077 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5078 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5079 ins->sreg1 = args [0]->dreg;
5080 ins->sreg2 = args [1]->dreg;
5081 MONO_ADD_INS (cfg->cbb, ins);
5089 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5091 if (cmethod->klass == mono_defaults.array_class) {
5092 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5093 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5094 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5095 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5096 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5097 return emit_array_unsafe_mov (cfg, fsig, args);
5104 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5106 MonoInst *ins = NULL;
5107 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5109 if (cmethod->klass == mono_defaults.string_class) {
5110 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5111 int dreg = alloc_ireg (cfg);
5112 int index_reg = alloc_preg (cfg);
5113 int add_reg = alloc_preg (cfg);
5115 #if SIZEOF_REGISTER == 8
5116 if (COMPILE_LLVM (cfg)) {
5117 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5119 /* The array reg is 64 bits but the index reg is only 32 */
5120 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5123 index_reg = args [1]->dreg;
5125 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5127 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5128 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5129 add_reg = ins->dreg;
5130 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5133 int mult_reg = alloc_preg (cfg);
5134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5135 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5136 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5137 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5139 type_from_op (cfg, ins, NULL, NULL);
5141 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5142 int dreg = alloc_ireg (cfg);
5143 /* Decompose later to allow more optimizations */
5144 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5145 ins->type = STACK_I4;
5146 ins->flags |= MONO_INST_FAULT;
5147 cfg->cbb->has_array_access = TRUE;
5148 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5153 } else if (cmethod->klass == mono_defaults.object_class) {
5154 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5155 int dreg = alloc_ireg_ref (cfg);
5156 int vt_reg = alloc_preg (cfg);
5157 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5158 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5159 type_from_op (cfg, ins, NULL, NULL);
5162 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5163 int dreg = alloc_ireg (cfg);
5164 int t1 = alloc_ireg (cfg);
5166 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5167 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5168 ins->type = STACK_I4;
5171 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5172 MONO_INST_NEW (cfg, ins, OP_NOP);
5173 MONO_ADD_INS (cfg->cbb, ins);
5177 } else if (cmethod->klass == mono_defaults.array_class) {
5178 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5179 return emit_array_generic_access (cfg, fsig, args, FALSE);
5180 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5181 return emit_array_generic_access (cfg, fsig, args, TRUE);
5183 #ifndef MONO_BIG_ARRAYS
5185 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5188 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5189 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5190 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5191 int dreg = alloc_ireg (cfg);
5192 int bounds_reg = alloc_ireg_mp (cfg);
5193 MonoBasicBlock *end_bb, *szarray_bb;
5194 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5196 NEW_BBLOCK (cfg, end_bb);
5197 NEW_BBLOCK (cfg, szarray_bb);
5199 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5200 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5202 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5203 /* Non-szarray case */
5205 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5206 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5208 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5209 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5211 MONO_START_BB (cfg, szarray_bb);
5214 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5215 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5217 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5218 MONO_START_BB (cfg, end_bb);
5220 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5221 ins->type = STACK_I4;
5227 if (cmethod->name [0] != 'g')
5230 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5231 int dreg = alloc_ireg (cfg);
5232 int vtable_reg = alloc_preg (cfg);
5233 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5234 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5235 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5236 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5237 type_from_op (cfg, ins, NULL, NULL);
5240 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5241 int dreg = alloc_ireg (cfg);
5243 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5244 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5245 type_from_op (cfg, ins, NULL, NULL);
5250 } else if (cmethod->klass == runtime_helpers_class) {
5251 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5252 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5254 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5255 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5257 g_assert (ctx->method_inst);
5258 g_assert (ctx->method_inst->type_argc == 1);
5259 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5260 MonoClass *klass = mono_class_from_mono_type (t);
5264 mono_class_init (klass);
5265 if (MONO_TYPE_IS_REFERENCE (t))
5266 EMIT_NEW_ICONST (cfg, ins, 1);
5267 else if (MONO_TYPE_IS_PRIMITIVE (t))
5268 EMIT_NEW_ICONST (cfg, ins, 0);
5269 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5270 EMIT_NEW_ICONST (cfg, ins, 1);
5271 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5272 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5274 g_assert (cfg->gshared);
5276 int context_used = mini_class_check_context_used (cfg, klass);
5278 /* This returns 1 or 2 */
5279 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5280 int dreg = alloc_ireg (cfg);
5281 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5287 } else if (cmethod->klass == mono_defaults.monitor_class) {
5288 gboolean is_enter = FALSE;
5289 gboolean is_v4 = FALSE;
5291 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5295 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5300 * To make async stack traces work, icalls which can block should have a wrapper.
5301 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5303 MonoBasicBlock *end_bb;
5305 NEW_BBLOCK (cfg, end_bb);
5307 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5309 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5310 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5311 MONO_START_BB (cfg, end_bb);
5314 } else if (cmethod->klass == mono_defaults.thread_class) {
5315 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5316 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5317 MONO_ADD_INS (cfg->cbb, ins);
5319 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5320 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5321 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5323 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5325 if (fsig->params [0]->type == MONO_TYPE_I1)
5326 opcode = OP_LOADI1_MEMBASE;
5327 else if (fsig->params [0]->type == MONO_TYPE_U1)
5328 opcode = OP_LOADU1_MEMBASE;
5329 else if (fsig->params [0]->type == MONO_TYPE_I2)
5330 opcode = OP_LOADI2_MEMBASE;
5331 else if (fsig->params [0]->type == MONO_TYPE_U2)
5332 opcode = OP_LOADU2_MEMBASE;
5333 else if (fsig->params [0]->type == MONO_TYPE_I4)
5334 opcode = OP_LOADI4_MEMBASE;
5335 else if (fsig->params [0]->type == MONO_TYPE_U4)
5336 opcode = OP_LOADU4_MEMBASE;
5337 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5338 opcode = OP_LOADI8_MEMBASE;
5339 else if (fsig->params [0]->type == MONO_TYPE_R4)
5340 opcode = OP_LOADR4_MEMBASE;
5341 else if (fsig->params [0]->type == MONO_TYPE_R8)
5342 opcode = OP_LOADR8_MEMBASE;
5343 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5344 opcode = OP_LOAD_MEMBASE;
5347 MONO_INST_NEW (cfg, ins, opcode);
5348 ins->inst_basereg = args [0]->dreg;
5349 ins->inst_offset = 0;
5350 MONO_ADD_INS (cfg->cbb, ins);
5352 switch (fsig->params [0]->type) {
5359 ins->dreg = mono_alloc_ireg (cfg);
5360 ins->type = STACK_I4;
5364 ins->dreg = mono_alloc_lreg (cfg);
5365 ins->type = STACK_I8;
5369 ins->dreg = mono_alloc_ireg (cfg);
5370 #if SIZEOF_REGISTER == 8
5371 ins->type = STACK_I8;
5373 ins->type = STACK_I4;
5378 ins->dreg = mono_alloc_freg (cfg);
5379 ins->type = STACK_R8;
5382 g_assert (mini_type_is_reference (fsig->params [0]));
5383 ins->dreg = mono_alloc_ireg_ref (cfg);
5384 ins->type = STACK_OBJ;
5388 if (opcode == OP_LOADI8_MEMBASE)
5389 ins = mono_decompose_opcode (cfg, ins);
5391 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5395 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5397 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5399 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5400 opcode = OP_STOREI1_MEMBASE_REG;
5401 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5402 opcode = OP_STOREI2_MEMBASE_REG;
5403 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5404 opcode = OP_STOREI4_MEMBASE_REG;
5405 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5406 opcode = OP_STOREI8_MEMBASE_REG;
5407 else if (fsig->params [0]->type == MONO_TYPE_R4)
5408 opcode = OP_STORER4_MEMBASE_REG;
5409 else if (fsig->params [0]->type == MONO_TYPE_R8)
5410 opcode = OP_STORER8_MEMBASE_REG;
5411 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5412 opcode = OP_STORE_MEMBASE_REG;
5415 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5417 MONO_INST_NEW (cfg, ins, opcode);
5418 ins->sreg1 = args [1]->dreg;
5419 ins->inst_destbasereg = args [0]->dreg;
5420 ins->inst_offset = 0;
5421 MONO_ADD_INS (cfg->cbb, ins);
5423 if (opcode == OP_STOREI8_MEMBASE_REG)
5424 ins = mono_decompose_opcode (cfg, ins);
5429 } else if (cmethod->klass->image == mono_defaults.corlib &&
5430 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5431 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5434 #if SIZEOF_REGISTER == 8
5435 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5436 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5437 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5438 ins->dreg = mono_alloc_preg (cfg);
5439 ins->sreg1 = args [0]->dreg;
5440 ins->type = STACK_I8;
5441 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5442 MONO_ADD_INS (cfg->cbb, ins);
5446 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5448 /* 64 bit reads are already atomic */
5449 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5450 load_ins->dreg = mono_alloc_preg (cfg);
5451 load_ins->inst_basereg = args [0]->dreg;
5452 load_ins->inst_offset = 0;
5453 load_ins->type = STACK_I8;
5454 MONO_ADD_INS (cfg->cbb, load_ins);
5456 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5463 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5464 MonoInst *ins_iconst;
5467 if (fsig->params [0]->type == MONO_TYPE_I4) {
5468 opcode = OP_ATOMIC_ADD_I4;
5469 cfg->has_atomic_add_i4 = TRUE;
5471 #if SIZEOF_REGISTER == 8
5472 else if (fsig->params [0]->type == MONO_TYPE_I8)
5473 opcode = OP_ATOMIC_ADD_I8;
5476 if (!mono_arch_opcode_supported (opcode))
5478 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5479 ins_iconst->inst_c0 = 1;
5480 ins_iconst->dreg = mono_alloc_ireg (cfg);
5481 MONO_ADD_INS (cfg->cbb, ins_iconst);
5483 MONO_INST_NEW (cfg, ins, opcode);
5484 ins->dreg = mono_alloc_ireg (cfg);
5485 ins->inst_basereg = args [0]->dreg;
5486 ins->inst_offset = 0;
5487 ins->sreg2 = ins_iconst->dreg;
5488 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5489 MONO_ADD_INS (cfg->cbb, ins);
5491 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5492 MonoInst *ins_iconst;
5495 if (fsig->params [0]->type == MONO_TYPE_I4) {
5496 opcode = OP_ATOMIC_ADD_I4;
5497 cfg->has_atomic_add_i4 = TRUE;
5499 #if SIZEOF_REGISTER == 8
5500 else if (fsig->params [0]->type == MONO_TYPE_I8)
5501 opcode = OP_ATOMIC_ADD_I8;
5504 if (!mono_arch_opcode_supported (opcode))
5506 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5507 ins_iconst->inst_c0 = -1;
5508 ins_iconst->dreg = mono_alloc_ireg (cfg);
5509 MONO_ADD_INS (cfg->cbb, ins_iconst);
5511 MONO_INST_NEW (cfg, ins, opcode);
5512 ins->dreg = mono_alloc_ireg (cfg);
5513 ins->inst_basereg = args [0]->dreg;
5514 ins->inst_offset = 0;
5515 ins->sreg2 = ins_iconst->dreg;
5516 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5517 MONO_ADD_INS (cfg->cbb, ins);
5519 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5522 if (fsig->params [0]->type == MONO_TYPE_I4) {
5523 opcode = OP_ATOMIC_ADD_I4;
5524 cfg->has_atomic_add_i4 = TRUE;
5526 #if SIZEOF_REGISTER == 8
5527 else if (fsig->params [0]->type == MONO_TYPE_I8)
5528 opcode = OP_ATOMIC_ADD_I8;
5531 if (!mono_arch_opcode_supported (opcode))
5533 MONO_INST_NEW (cfg, ins, opcode);
5534 ins->dreg = mono_alloc_ireg (cfg);
5535 ins->inst_basereg = args [0]->dreg;
5536 ins->inst_offset = 0;
5537 ins->sreg2 = args [1]->dreg;
5538 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5539 MONO_ADD_INS (cfg->cbb, ins);
5542 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5543 MonoInst *f2i = NULL, *i2f;
5544 guint32 opcode, f2i_opcode, i2f_opcode;
5545 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5546 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5548 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5549 fsig->params [0]->type == MONO_TYPE_R4) {
5550 opcode = OP_ATOMIC_EXCHANGE_I4;
5551 f2i_opcode = OP_MOVE_F_TO_I4;
5552 i2f_opcode = OP_MOVE_I4_TO_F;
5553 cfg->has_atomic_exchange_i4 = TRUE;
5555 #if SIZEOF_REGISTER == 8
5557 fsig->params [0]->type == MONO_TYPE_I8 ||
5558 fsig->params [0]->type == MONO_TYPE_R8 ||
5559 fsig->params [0]->type == MONO_TYPE_I) {
5560 opcode = OP_ATOMIC_EXCHANGE_I8;
5561 f2i_opcode = OP_MOVE_F_TO_I8;
5562 i2f_opcode = OP_MOVE_I8_TO_F;
5565 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5566 opcode = OP_ATOMIC_EXCHANGE_I4;
5567 cfg->has_atomic_exchange_i4 = TRUE;
5573 if (!mono_arch_opcode_supported (opcode))
5577 /* TODO: Decompose these opcodes instead of bailing here. */
5578 if (COMPILE_SOFT_FLOAT (cfg))
5581 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5582 f2i->dreg = mono_alloc_ireg (cfg);
5583 f2i->sreg1 = args [1]->dreg;
5584 if (f2i_opcode == OP_MOVE_F_TO_I4)
5585 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5586 MONO_ADD_INS (cfg->cbb, f2i);
5589 MONO_INST_NEW (cfg, ins, opcode);
5590 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5591 ins->inst_basereg = args [0]->dreg;
5592 ins->inst_offset = 0;
5593 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5594 MONO_ADD_INS (cfg->cbb, ins);
5596 switch (fsig->params [0]->type) {
5598 ins->type = STACK_I4;
5601 ins->type = STACK_I8;
5604 #if SIZEOF_REGISTER == 8
5605 ins->type = STACK_I8;
5607 ins->type = STACK_I4;
5612 ins->type = STACK_R8;
5615 g_assert (mini_type_is_reference (fsig->params [0]));
5616 ins->type = STACK_OBJ;
5621 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5622 i2f->dreg = mono_alloc_freg (cfg);
5623 i2f->sreg1 = ins->dreg;
5624 i2f->type = STACK_R8;
5625 if (i2f_opcode == OP_MOVE_I4_TO_F)
5626 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5627 MONO_ADD_INS (cfg->cbb, i2f);
5632 if (cfg->gen_write_barriers && is_ref)
5633 mini_emit_write_barrier (cfg, args [0], args [1]);
5635 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5636 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5637 guint32 opcode, f2i_opcode, i2f_opcode;
5638 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5639 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5641 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5642 fsig->params [1]->type == MONO_TYPE_R4) {
5643 opcode = OP_ATOMIC_CAS_I4;
5644 f2i_opcode = OP_MOVE_F_TO_I4;
5645 i2f_opcode = OP_MOVE_I4_TO_F;
5646 cfg->has_atomic_cas_i4 = TRUE;
5648 #if SIZEOF_REGISTER == 8
5650 fsig->params [1]->type == MONO_TYPE_I8 ||
5651 fsig->params [1]->type == MONO_TYPE_R8 ||
5652 fsig->params [1]->type == MONO_TYPE_I) {
5653 opcode = OP_ATOMIC_CAS_I8;
5654 f2i_opcode = OP_MOVE_F_TO_I8;
5655 i2f_opcode = OP_MOVE_I8_TO_F;
5658 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5659 opcode = OP_ATOMIC_CAS_I4;
5660 cfg->has_atomic_cas_i4 = TRUE;
5666 if (!mono_arch_opcode_supported (opcode))
5670 /* TODO: Decompose these opcodes instead of bailing here. */
5671 if (COMPILE_SOFT_FLOAT (cfg))
5674 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5675 f2i_new->dreg = mono_alloc_ireg (cfg);
5676 f2i_new->sreg1 = args [1]->dreg;
5677 if (f2i_opcode == OP_MOVE_F_TO_I4)
5678 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5679 MONO_ADD_INS (cfg->cbb, f2i_new);
5681 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5682 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5683 f2i_cmp->sreg1 = args [2]->dreg;
5684 if (f2i_opcode == OP_MOVE_F_TO_I4)
5685 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5686 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5689 MONO_INST_NEW (cfg, ins, opcode);
5690 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5691 ins->sreg1 = args [0]->dreg;
5692 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5693 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5694 MONO_ADD_INS (cfg->cbb, ins);
5696 switch (fsig->params [1]->type) {
5698 ins->type = STACK_I4;
5701 ins->type = STACK_I8;
5704 #if SIZEOF_REGISTER == 8
5705 ins->type = STACK_I8;
5707 ins->type = STACK_I4;
5711 ins->type = cfg->r4_stack_type;
5714 ins->type = STACK_R8;
5717 g_assert (mini_type_is_reference (fsig->params [1]));
5718 ins->type = STACK_OBJ;
5723 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5724 i2f->dreg = mono_alloc_freg (cfg);
5725 i2f->sreg1 = ins->dreg;
5726 i2f->type = STACK_R8;
5727 if (i2f_opcode == OP_MOVE_I4_TO_F)
5728 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5729 MONO_ADD_INS (cfg->cbb, i2f);
5734 if (cfg->gen_write_barriers && is_ref)
5735 mini_emit_write_barrier (cfg, args [0], args [1]);
5737 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5738 fsig->params [1]->type == MONO_TYPE_I4) {
5739 MonoInst *cmp, *ceq;
5741 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5744 /* int32 r = CAS (location, value, comparand); */
5745 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5746 ins->dreg = alloc_ireg (cfg);
5747 ins->sreg1 = args [0]->dreg;
5748 ins->sreg2 = args [1]->dreg;
5749 ins->sreg3 = args [2]->dreg;
5750 ins->type = STACK_I4;
5751 MONO_ADD_INS (cfg->cbb, ins);
5753 /* bool result = r == comparand; */
5754 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5755 cmp->sreg1 = ins->dreg;
5756 cmp->sreg2 = args [2]->dreg;
5757 cmp->type = STACK_I4;
5758 MONO_ADD_INS (cfg->cbb, cmp);
5760 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5761 ceq->dreg = alloc_ireg (cfg);
5762 ceq->type = STACK_I4;
5763 MONO_ADD_INS (cfg->cbb, ceq);
5765 /* *success = result; */
5766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5768 cfg->has_atomic_cas_i4 = TRUE;
5770 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5771 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5775 } else if (cmethod->klass->image == mono_defaults.corlib &&
5776 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5777 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5780 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5782 MonoType *t = fsig->params [0];
5784 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5786 g_assert (t->byref);
5787 /* t is a byref type, so the reference check is more complicated */
5788 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5789 if (t->type == MONO_TYPE_I1)
5790 opcode = OP_ATOMIC_LOAD_I1;
5791 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5792 opcode = OP_ATOMIC_LOAD_U1;
5793 else if (t->type == MONO_TYPE_I2)
5794 opcode = OP_ATOMIC_LOAD_I2;
5795 else if (t->type == MONO_TYPE_U2)
5796 opcode = OP_ATOMIC_LOAD_U2;
5797 else if (t->type == MONO_TYPE_I4)
5798 opcode = OP_ATOMIC_LOAD_I4;
5799 else if (t->type == MONO_TYPE_U4)
5800 opcode = OP_ATOMIC_LOAD_U4;
5801 else if (t->type == MONO_TYPE_R4)
5802 opcode = OP_ATOMIC_LOAD_R4;
5803 else if (t->type == MONO_TYPE_R8)
5804 opcode = OP_ATOMIC_LOAD_R8;
5805 #if SIZEOF_REGISTER == 8
5806 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5807 opcode = OP_ATOMIC_LOAD_I8;
5808 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5809 opcode = OP_ATOMIC_LOAD_U8;
5811 else if (t->type == MONO_TYPE_I)
5812 opcode = OP_ATOMIC_LOAD_I4;
5813 else if (is_ref || t->type == MONO_TYPE_U)
5814 opcode = OP_ATOMIC_LOAD_U4;
5818 if (!mono_arch_opcode_supported (opcode))
5821 MONO_INST_NEW (cfg, ins, opcode);
5822 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5823 ins->sreg1 = args [0]->dreg;
5824 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5825 MONO_ADD_INS (cfg->cbb, ins);
5828 case MONO_TYPE_BOOLEAN:
5835 ins->type = STACK_I4;
5839 ins->type = STACK_I8;
5843 #if SIZEOF_REGISTER == 8
5844 ins->type = STACK_I8;
5846 ins->type = STACK_I4;
5850 ins->type = cfg->r4_stack_type;
5853 ins->type = STACK_R8;
5857 ins->type = STACK_OBJ;
5863 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5865 MonoType *t = fsig->params [0];
5868 g_assert (t->byref);
5869 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5870 if (t->type == MONO_TYPE_I1)
5871 opcode = OP_ATOMIC_STORE_I1;
5872 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5873 opcode = OP_ATOMIC_STORE_U1;
5874 else if (t->type == MONO_TYPE_I2)
5875 opcode = OP_ATOMIC_STORE_I2;
5876 else if (t->type == MONO_TYPE_U2)
5877 opcode = OP_ATOMIC_STORE_U2;
5878 else if (t->type == MONO_TYPE_I4)
5879 opcode = OP_ATOMIC_STORE_I4;
5880 else if (t->type == MONO_TYPE_U4)
5881 opcode = OP_ATOMIC_STORE_U4;
5882 else if (t->type == MONO_TYPE_R4)
5883 opcode = OP_ATOMIC_STORE_R4;
5884 else if (t->type == MONO_TYPE_R8)
5885 opcode = OP_ATOMIC_STORE_R8;
5886 #if SIZEOF_REGISTER == 8
5887 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5888 opcode = OP_ATOMIC_STORE_I8;
5889 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5890 opcode = OP_ATOMIC_STORE_U8;
5892 else if (t->type == MONO_TYPE_I)
5893 opcode = OP_ATOMIC_STORE_I4;
5894 else if (is_ref || t->type == MONO_TYPE_U)
5895 opcode = OP_ATOMIC_STORE_U4;
5899 if (!mono_arch_opcode_supported (opcode))
5902 MONO_INST_NEW (cfg, ins, opcode);
5903 ins->dreg = args [0]->dreg;
5904 ins->sreg1 = args [1]->dreg;
5905 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5906 MONO_ADD_INS (cfg->cbb, ins);
5908 if (cfg->gen_write_barriers && is_ref)
5909 mini_emit_write_barrier (cfg, args [0], args [1]);
5915 } else if (cmethod->klass->image == mono_defaults.corlib &&
5916 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5917 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5918 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5919 if (should_insert_brekpoint (cfg->method)) {
5920 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5922 MONO_INST_NEW (cfg, ins, OP_NOP);
5923 MONO_ADD_INS (cfg->cbb, ins);
5927 } else if (cmethod->klass->image == mono_defaults.corlib &&
5928 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5929 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5930 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5932 EMIT_NEW_ICONST (cfg, ins, 1);
5934 EMIT_NEW_ICONST (cfg, ins, 0);
5937 } else if (cmethod->klass->image == mono_defaults.corlib &&
5938 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5939 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5940 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5941 /* No stack walks are currently available, so implement this as an intrinsic */
5942 MonoInst *assembly_ins;
5944 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5945 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5948 } else if (cmethod->klass->image == mono_defaults.corlib &&
5949 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5950 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5951 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5952 /* No stack walks are currently available, so implement this as an intrinsic */
5953 MonoInst *method_ins;
5954 MonoMethod *declaring = cfg->method;
5956 /* This returns the declaring generic method */
5957 if (declaring->is_inflated)
5958 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5959 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5960 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5961 cfg->no_inline = TRUE;
5962 if (cfg->method != cfg->current_method)
5963 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5966 } else if (cmethod->klass == mono_defaults.math_class) {
5968 * There is general branchless code for Min/Max, but it does not work for
5970 * http://everything2.com/?node_id=1051618
5972 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5973 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5974 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5975 ins->dreg = alloc_preg (cfg);
5976 ins->type = STACK_I4;
5977 MONO_ADD_INS (cfg->cbb, ins);
5979 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5980 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5981 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5982 !strcmp (cmethod->klass->name, "Selector")) ||
5983 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5984 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5985 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
5986 !strcmp (cmethod->klass->name, "Selector"))
5988 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
5989 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
5990 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
5993 MonoJumpInfoToken *ji;
5996 if (args [0]->opcode == OP_GOT_ENTRY) {
5997 pi = (MonoInst *)args [0]->inst_p1;
5998 g_assert (pi->opcode == OP_PATCH_INFO);
5999 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6000 ji = (MonoJumpInfoToken *)pi->inst_p0;
6002 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6003 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6006 NULLIFY_INS (args [0]);
6008 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6009 return_val_if_nok (&cfg->error, NULL);
6011 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6012 ins->dreg = mono_alloc_ireg (cfg);
6015 MONO_ADD_INS (cfg->cbb, ins);
6020 #ifdef MONO_ARCH_SIMD_INTRINSICS
6021 if (cfg->opt & MONO_OPT_SIMD) {
6022 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6028 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6032 if (COMPILE_LLVM (cfg)) {
6033 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6038 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6042 * This entry point could be used later for arbitrary method
6045 inline static MonoInst*
6046 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6047 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6049 if (method->klass == mono_defaults.string_class) {
6050 /* managed string allocation support */
6051 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6052 MonoInst *iargs [2];
6053 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6054 MonoMethod *managed_alloc = NULL;
6056 g_assert (vtable); /*Should not fail since it System.String*/
6057 #ifndef MONO_CROSS_COMPILE
6058 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6062 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6063 iargs [1] = args [0];
6064 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6071 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6073 MonoInst *store, *temp;
6076 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6077 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6080 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6081 * would be different than the MonoInst's used to represent arguments, and
6082 * the ldelema implementation can't deal with that.
6083 * Solution: When ldelema is used on an inline argument, create a var for
6084 * it, emit ldelema on that var, and emit the saving code below in
6085 * inline_method () if needed.
6087 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6088 cfg->args [i] = temp;
6089 /* This uses cfg->args [i] which is set by the preceeding line */
6090 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6091 store->cil_code = sp [0]->cil_code;
6096 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6097 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6099 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6101 check_inline_called_method_name_limit (MonoMethod *called_method)
6104 static const char *limit = NULL;
6106 if (limit == NULL) {
6107 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6109 if (limit_string != NULL)
6110 limit = limit_string;
6115 if (limit [0] != '\0') {
6116 char *called_method_name = mono_method_full_name (called_method, TRUE);
6118 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6119 g_free (called_method_name);
6121 //return (strncmp_result <= 0);
6122 return (strncmp_result == 0);
6129 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6131 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6134 static const char *limit = NULL;
6136 if (limit == NULL) {
6137 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6138 if (limit_string != NULL) {
6139 limit = limit_string;
6145 if (limit [0] != '\0') {
6146 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6148 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6149 g_free (caller_method_name);
6151 //return (strncmp_result <= 0);
6152 return (strncmp_result == 0);
6160 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6162 static double r8_0 = 0.0;
6163 static float r4_0 = 0.0;
6167 rtype = mini_get_underlying_type (rtype);
6171 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6172 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6173 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6174 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6175 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6176 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6177 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6178 ins->type = STACK_R4;
6179 ins->inst_p0 = (void*)&r4_0;
6181 MONO_ADD_INS (cfg->cbb, ins);
6182 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6183 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6184 ins->type = STACK_R8;
6185 ins->inst_p0 = (void*)&r8_0;
6187 MONO_ADD_INS (cfg->cbb, ins);
6188 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6189 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6190 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6191 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6192 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6194 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6199 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6203 rtype = mini_get_underlying_type (rtype);
6207 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6208 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6209 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6210 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6211 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6212 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6213 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6214 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6215 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6216 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6217 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6218 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6219 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6220 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6222 emit_init_rvar (cfg, dreg, rtype);
6226 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6228 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6230 MonoInst *var = cfg->locals [local];
6231 if (COMPILE_SOFT_FLOAT (cfg)) {
6233 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6234 emit_init_rvar (cfg, reg, type);
6235 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6238 emit_init_rvar (cfg, var->dreg, type);
6240 emit_dummy_init_rvar (cfg, var->dreg, type);
6245 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6247 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6253 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6256 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6257 guchar *ip, guint real_offset, gboolean inline_always)
6260 MonoInst *ins, *rvar = NULL;
6261 MonoMethodHeader *cheader;
6262 MonoBasicBlock *ebblock, *sbblock;
6264 MonoMethod *prev_inlined_method;
6265 MonoInst **prev_locals, **prev_args;
6266 MonoType **prev_arg_types;
6267 guint prev_real_offset;
6268 GHashTable *prev_cbb_hash;
6269 MonoBasicBlock **prev_cil_offset_to_bb;
6270 MonoBasicBlock *prev_cbb;
6271 const unsigned char *prev_ip;
6272 unsigned char *prev_cil_start;
6273 guint32 prev_cil_offset_to_bb_len;
6274 MonoMethod *prev_current_method;
6275 MonoGenericContext *prev_generic_context;
6276 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6278 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6280 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6281 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6284 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6285 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6290 fsig = mono_method_signature (cmethod);
6292 if (cfg->verbose_level > 2)
6293 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6295 if (!cmethod->inline_info) {
6296 cfg->stat_inlineable_methods++;
6297 cmethod->inline_info = 1;
6300 /* allocate local variables */
6301 cheader = mono_method_get_header_checked (cmethod, &error);
6303 if (inline_always) {
6304 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6305 mono_error_move (&cfg->error, &error);
6307 mono_error_cleanup (&error);
6312 /*Must verify before creating locals as it can cause the JIT to assert.*/
6313 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6314 mono_metadata_free_mh (cheader);
6318 /* allocate space to store the return value */
6319 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6320 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6323 prev_locals = cfg->locals;
6324 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6325 for (i = 0; i < cheader->num_locals; ++i)
6326 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6328 /* allocate start and end blocks */
6329 /* This is needed so if the inline is aborted, we can clean up */
6330 NEW_BBLOCK (cfg, sbblock);
6331 sbblock->real_offset = real_offset;
6333 NEW_BBLOCK (cfg, ebblock);
6334 ebblock->block_num = cfg->num_bblocks++;
6335 ebblock->real_offset = real_offset;
6337 prev_args = cfg->args;
6338 prev_arg_types = cfg->arg_types;
6339 prev_inlined_method = cfg->inlined_method;
6340 cfg->inlined_method = cmethod;
6341 cfg->ret_var_set = FALSE;
6342 cfg->inline_depth ++;
6343 prev_real_offset = cfg->real_offset;
6344 prev_cbb_hash = cfg->cbb_hash;
6345 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6346 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6347 prev_cil_start = cfg->cil_start;
6349 prev_cbb = cfg->cbb;
6350 prev_current_method = cfg->current_method;
6351 prev_generic_context = cfg->generic_context;
6352 prev_ret_var_set = cfg->ret_var_set;
6353 prev_disable_inline = cfg->disable_inline;
6355 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6358 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6360 ret_var_set = cfg->ret_var_set;
6362 cfg->inlined_method = prev_inlined_method;
6363 cfg->real_offset = prev_real_offset;
6364 cfg->cbb_hash = prev_cbb_hash;
6365 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6366 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6367 cfg->cil_start = prev_cil_start;
6369 cfg->locals = prev_locals;
6370 cfg->args = prev_args;
6371 cfg->arg_types = prev_arg_types;
6372 cfg->current_method = prev_current_method;
6373 cfg->generic_context = prev_generic_context;
6374 cfg->ret_var_set = prev_ret_var_set;
6375 cfg->disable_inline = prev_disable_inline;
6376 cfg->inline_depth --;
6378 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6379 if (cfg->verbose_level > 2)
6380 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6382 cfg->stat_inlined_methods++;
6384 /* always add some code to avoid block split failures */
6385 MONO_INST_NEW (cfg, ins, OP_NOP);
6386 MONO_ADD_INS (prev_cbb, ins);
6388 prev_cbb->next_bb = sbblock;
6389 link_bblock (cfg, prev_cbb, sbblock);
6392 * Get rid of the begin and end bblocks if possible to aid local
6395 if (prev_cbb->out_count == 1)
6396 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6398 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6399 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6401 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6402 MonoBasicBlock *prev = ebblock->in_bb [0];
6404 if (prev->next_bb == ebblock) {
6405 mono_merge_basic_blocks (cfg, prev, ebblock);
6407 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6408 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6409 cfg->cbb = prev_cbb;
6412 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6417 * Its possible that the rvar is set in some prev bblock, but not in others.
6423 for (i = 0; i < ebblock->in_count; ++i) {
6424 bb = ebblock->in_bb [i];
6426 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6429 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6439 * If the inlined method contains only a throw, then the ret var is not
6440 * set, so set it to a dummy value.
6443 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6445 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6448 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6451 if (cfg->verbose_level > 2)
6452 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6453 cfg->exception_type = MONO_EXCEPTION_NONE;
6455 /* This gets rid of the newly added bblocks */
6456 cfg->cbb = prev_cbb;
6458 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6463 * Some of these comments may well be out-of-date.
6464 * Design decisions: we do a single pass over the IL code (and we do bblock
6465 * splitting/merging in the few cases when it's required: a back jump to an IL
6466 * address that was not already seen as bblock starting point).
6467 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6468 * Complex operations are decomposed in simpler ones right away. We need to let the
6469 * arch-specific code peek and poke inside this process somehow (except when the
6470 * optimizations can take advantage of the full semantic info of coarse opcodes).
6471 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6472 * MonoInst->opcode initially is the IL opcode or some simplification of that
6473 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6474 * opcode with value bigger than OP_LAST.
6475 * At this point the IR can be handed over to an interpreter, a dumb code generator
6476 * or to the optimizing code generator that will translate it to SSA form.
6478 * Profiling directed optimizations.
6479 * We may compile by default with few or no optimizations and instrument the code
6480 * or the user may indicate what methods to optimize the most either in a config file
6481 * or through repeated runs where the compiler applies offline the optimizations to
6482 * each method and then decides if it was worth it.
6485 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6486 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6487 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6488 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6489 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6490 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6491 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6492 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6494 /* offset from br.s -> br like opcodes */
6495 #define BIG_BRANCH_OFFSET 13
6498 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6500 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6502 return b == NULL || b == bb;
6506 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6508 unsigned char *ip = start;
6509 unsigned char *target;
6512 MonoBasicBlock *bblock;
6513 const MonoOpcode *opcode;
6516 cli_addr = ip - start;
6517 i = mono_opcode_value ((const guint8 **)&ip, end);
6520 opcode = &mono_opcodes [i];
6521 switch (opcode->argument) {
6522 case MonoInlineNone:
6525 case MonoInlineString:
6526 case MonoInlineType:
6527 case MonoInlineField:
6528 case MonoInlineMethod:
6531 case MonoShortInlineR:
6538 case MonoShortInlineVar:
6539 case MonoShortInlineI:
6542 case MonoShortInlineBrTarget:
6543 target = start + cli_addr + 2 + (signed char)ip [1];
6544 GET_BBLOCK (cfg, bblock, target);
6547 GET_BBLOCK (cfg, bblock, ip);
6549 case MonoInlineBrTarget:
6550 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6551 GET_BBLOCK (cfg, bblock, target);
6554 GET_BBLOCK (cfg, bblock, ip);
6556 case MonoInlineSwitch: {
6557 guint32 n = read32 (ip + 1);
6560 cli_addr += 5 + 4 * n;
6561 target = start + cli_addr;
6562 GET_BBLOCK (cfg, bblock, target);
6564 for (j = 0; j < n; ++j) {
6565 target = start + cli_addr + (gint32)read32 (ip);
6566 GET_BBLOCK (cfg, bblock, target);
6576 g_assert_not_reached ();
6579 if (i == CEE_THROW) {
6580 unsigned char *bb_start = ip - 1;
6582 /* Find the start of the bblock containing the throw */
6584 while ((bb_start >= start) && !bblock) {
6585 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6589 bblock->out_of_line = 1;
6599 static inline MonoMethod *
6600 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6606 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6607 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6609 method = mono_class_inflate_generic_method_checked (method, context, error);
6612 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6618 static inline MonoMethod *
6619 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6622 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6624 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6625 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6629 if (!method && !cfg)
6630 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6635 static inline MonoClass*
6636 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6641 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6642 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6644 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6645 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6648 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6649 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6652 mono_class_init (klass);
6656 static inline MonoMethodSignature*
6657 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6659 MonoMethodSignature *fsig;
6662 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6663 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6665 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6666 return_val_if_nok (error, NULL);
6669 fsig = mono_inflate_generic_signature(fsig, context, error);
6675 throw_exception (void)
6677 static MonoMethod *method = NULL;
6680 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6681 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6688 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6690 MonoMethod *thrower = throw_exception ();
6693 EMIT_NEW_PCONST (cfg, args [0], ex);
6694 mono_emit_method_call (cfg, thrower, args, NULL);
6698 * Return the original method is a wrapper is specified. We can only access
6699 * the custom attributes from the original method.
6702 get_original_method (MonoMethod *method)
6704 if (method->wrapper_type == MONO_WRAPPER_NONE)
6707 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6708 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6711 /* in other cases we need to find the original method */
6712 return mono_marshal_method_from_wrapper (method);
6716 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6718 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6719 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6721 emit_throw_exception (cfg, ex);
6725 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6727 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6728 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6730 emit_throw_exception (cfg, ex);
6734 * Check that the IL instructions at ip are the array initialization
6735 * sequence and return the pointer to the data and the size.
6738 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6741 * newarr[System.Int32]
6743 * ldtoken field valuetype ...
6744 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6746 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6748 guint32 token = read32 (ip + 7);
6749 guint32 field_token = read32 (ip + 2);
6750 guint32 field_index = field_token & 0xffffff;
6752 const char *data_ptr;
6754 MonoMethod *cmethod;
6755 MonoClass *dummy_class;
6756 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6760 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6764 *out_field_token = field_token;
6766 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6769 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6771 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6775 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6776 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6793 if (size > mono_type_size (field->type, &dummy_align))
6796 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6797 if (!image_is_dynamic (method->klass->image)) {
6798 field_index = read32 (ip + 2) & 0xffffff;
6799 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6800 data_ptr = mono_image_rva_map (method->klass->image, rva);
6801 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6802 /* for aot code we do the lookup on load */
6803 if (aot && data_ptr)
6804 return (const char *)GUINT_TO_POINTER (rva);
6806 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6808 data_ptr = mono_field_get_data (field);
6816 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6819 char *method_fname = mono_method_full_name (method, TRUE);
6821 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6824 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6825 mono_error_cleanup (&error);
6826 } else if (header->code_size == 0)
6827 method_code = g_strdup ("method body is empty.");
6829 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6830 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6831 g_free (method_fname);
6832 g_free (method_code);
6833 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6837 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6840 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6841 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6842 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6843 /* Optimize reg-reg moves away */
6845 * Can't optimize other opcodes, since sp[0] might point to
6846 * the last ins of a decomposed opcode.
6848 sp [0]->dreg = (cfg)->locals [n]->dreg;
6850 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6855 * ldloca inhibits many optimizations so try to get rid of it in common
6858 static inline unsigned char *
6859 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6869 local = read16 (ip + 2);
6873 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6874 /* From the INITOBJ case */
6875 token = read32 (ip + 2);
6876 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6877 CHECK_TYPELOAD (klass);
6878 type = mini_get_underlying_type (&klass->byval_arg);
6879 emit_init_local (cfg, local, type, TRUE);
6887 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6889 MonoInst *icall_args [16];
6890 MonoInst *call_target, *ins, *vtable_ins;
6891 int arg_reg, this_reg, vtable_reg;
6892 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6893 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6894 gboolean variant_iface = FALSE;
6897 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6900 * In llvm-only mode, vtables contain function descriptors instead of
6901 * method addresses/trampolines.
6903 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6906 slot = mono_method_get_imt_slot (cmethod);
6908 slot = mono_method_get_vtable_index (cmethod);
6910 this_reg = sp [0]->dreg;
6912 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6913 variant_iface = TRUE;
6915 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6917 * The simplest case, a normal virtual call.
6919 int slot_reg = alloc_preg (cfg);
6920 int addr_reg = alloc_preg (cfg);
6921 int arg_reg = alloc_preg (cfg);
6922 MonoBasicBlock *non_null_bb;
6924 vtable_reg = alloc_preg (cfg);
6925 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6926 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6928 /* Load the vtable slot, which contains a function descriptor. */
6929 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6931 NEW_BBLOCK (cfg, non_null_bb);
6933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6934 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6935 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6938 // FIXME: Make the wrapper use the preserveall cconv
6939 // FIXME: Use one icall per slot for small slot numbers ?
6940 icall_args [0] = vtable_ins;
6941 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6942 /* Make the icall return the vtable slot value to save some code space */
6943 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6944 ins->dreg = slot_reg;
6945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6948 MONO_START_BB (cfg, non_null_bb);
6949 /* Load the address + arg from the vtable slot */
6950 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6951 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6953 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6956 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6958 * A simple interface call
6960 * We make a call through an imt slot to obtain the function descriptor we need to call.
6961 * The imt slot contains a function descriptor for a runtime function + arg.
6963 int slot_reg = alloc_preg (cfg);
6964 int addr_reg = alloc_preg (cfg);
6965 int arg_reg = alloc_preg (cfg);
6966 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6968 vtable_reg = alloc_preg (cfg);
6969 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6970 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6973 * The slot is already initialized when the vtable is created so there is no need
6977 /* Load the imt slot, which contains a function descriptor. */
6978 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6980 /* Load the address + arg of the imt thunk from the imt slot */
6981 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6982 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6984 * IMT thunks in llvm-only mode are C functions which take an info argument
6985 * plus the imt method and return the ftndesc to call.
6987 icall_args [0] = thunk_arg_ins;
6988 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
6989 cmethod, MONO_RGCTX_INFO_METHOD);
6990 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
6992 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
6995 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
6997 * This is similar to the interface case, the vtable slot points to an imt thunk which is
6998 * dynamically extended as more instantiations are discovered.
6999 * This handles generic virtual methods both on classes and interfaces.
7001 int slot_reg = alloc_preg (cfg);
7002 int addr_reg = alloc_preg (cfg);
7003 int arg_reg = alloc_preg (cfg);
7004 int ftndesc_reg = alloc_preg (cfg);
7005 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7006 MonoBasicBlock *slowpath_bb, *end_bb;
7008 NEW_BBLOCK (cfg, slowpath_bb);
7009 NEW_BBLOCK (cfg, end_bb);
7011 vtable_reg = alloc_preg (cfg);
7012 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7014 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7016 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7018 /* Load the slot, which contains a function descriptor. */
7019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7021 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7022 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7027 /* Same as with iface calls */
7028 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7029 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7030 icall_args [0] = thunk_arg_ins;
7031 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7032 cmethod, MONO_RGCTX_INFO_METHOD);
7033 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7034 ftndesc_ins->dreg = ftndesc_reg;
7036 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7037 * they don't know about yet. Fall back to the slowpath in that case.
7039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7040 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7045 MONO_START_BB (cfg, slowpath_bb);
7046 icall_args [0] = vtable_ins;
7047 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7048 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7049 cmethod, MONO_RGCTX_INFO_METHOD);
7051 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7053 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7054 ftndesc_ins->dreg = ftndesc_reg;
7055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7058 MONO_START_BB (cfg, end_bb);
7059 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7063 * Non-optimized cases
7065 icall_args [0] = sp [0];
7066 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7068 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7069 cmethod, MONO_RGCTX_INFO_METHOD);
7071 arg_reg = alloc_preg (cfg);
7072 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7073 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7075 g_assert (is_gsharedvt);
7077 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7079 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7082 * Pass the extra argument even if the callee doesn't receive it, most
7083 * calling conventions allow this.
7085 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7089 is_exception_class (MonoClass *klass)
7092 if (klass == mono_defaults.exception_class)
7094 klass = klass->parent;
7100 * is_jit_optimizer_disabled:
7102 * Determine whenever M's assembly has a DebuggableAttribute with the
7103 * IsJITOptimizerDisabled flag set.
7106 is_jit_optimizer_disabled (MonoMethod *m)
7109 MonoAssembly *ass = m->klass->image->assembly;
7110 MonoCustomAttrInfo* attrs;
7113 gboolean val = FALSE;
7116 if (ass->jit_optimizer_disabled_inited)
7117 return ass->jit_optimizer_disabled;
7119 klass = mono_class_try_get_debuggable_attribute_class ();
7123 ass->jit_optimizer_disabled = FALSE;
7124 mono_memory_barrier ();
7125 ass->jit_optimizer_disabled_inited = TRUE;
7129 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7130 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7132 for (i = 0; i < attrs->num_attrs; ++i) {
7133 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7135 MonoMethodSignature *sig;
7137 if (!attr->ctor || attr->ctor->klass != klass)
7139 /* Decode the attribute. See reflection.c */
7140 p = (const char*)attr->data;
7141 g_assert (read16 (p) == 0x0001);
7144 // FIXME: Support named parameters
7145 sig = mono_method_signature (attr->ctor);
7146 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7148 /* Two boolean arguments */
7152 mono_custom_attrs_free (attrs);
7155 ass->jit_optimizer_disabled = val;
7156 mono_memory_barrier ();
7157 ass->jit_optimizer_disabled_inited = TRUE;
7163 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7165 gboolean supported_tail_call;
7168 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7170 for (i = 0; i < fsig->param_count; ++i) {
7171 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7172 /* These can point to the current method's stack */
7173 supported_tail_call = FALSE;
7175 if (fsig->hasthis && cmethod->klass->valuetype)
7176 /* this might point to the current method's stack */
7177 supported_tail_call = FALSE;
7178 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7179 supported_tail_call = FALSE;
7180 if (cfg->method->save_lmf)
7181 supported_tail_call = FALSE;
7182 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7183 supported_tail_call = FALSE;
7184 if (call_opcode != CEE_CALL)
7185 supported_tail_call = FALSE;
7187 /* Debugging support */
7189 if (supported_tail_call) {
7190 if (!mono_debug_count ())
7191 supported_tail_call = FALSE;
7195 return supported_tail_call;
7201 * Handle calls made to ctors from NEWOBJ opcodes.
7204 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7205 MonoInst **sp, guint8 *ip, int *inline_costs)
7207 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7209 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7210 mono_method_is_generic_sharable (cmethod, TRUE)) {
7211 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7212 mono_class_vtable (cfg->domain, cmethod->klass);
7213 CHECK_TYPELOAD (cmethod->klass);
7215 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7216 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7219 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7220 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7222 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7224 CHECK_TYPELOAD (cmethod->klass);
7225 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7230 /* Avoid virtual calls to ctors if possible */
7231 if (mono_class_is_marshalbyref (cmethod->klass))
7232 callvirt_this_arg = sp [0];
7234 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7235 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7236 CHECK_CFG_EXCEPTION;
7237 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7238 mono_method_check_inlining (cfg, cmethod) &&
7239 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7242 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7243 cfg->real_offset += 5;
7245 *inline_costs += costs - 5;
7247 INLINE_FAILURE ("inline failure");
7248 // FIXME-VT: Clean this up
7249 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7250 GSHAREDVT_FAILURE(*ip);
7251 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7253 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7256 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7258 if (cfg->llvm_only) {
7259 // FIXME: Avoid initializing vtable_arg
7260 emit_llvmonly_calli (cfg, fsig, sp, addr);
7262 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7264 } else if (context_used &&
7265 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7266 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7267 MonoInst *cmethod_addr;
7269 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7271 if (cfg->llvm_only) {
7272 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7273 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7274 emit_llvmonly_calli (cfg, fsig, sp, addr);
7276 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7277 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7279 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7282 INLINE_FAILURE ("ctor call");
7283 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7284 callvirt_this_arg, NULL, vtable_arg);
7291 emit_setret (MonoCompile *cfg, MonoInst *val)
7293 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7296 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7299 if (!cfg->vret_addr) {
7300 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7302 EMIT_NEW_RETLOADA (cfg, ret_addr);
7304 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7305 ins->klass = mono_class_from_mono_type (ret_type);
7308 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7309 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7310 MonoInst *iargs [1];
7314 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7315 mono_arch_emit_setret (cfg, cfg->method, conv);
7317 mono_arch_emit_setret (cfg, cfg->method, val);
7320 mono_arch_emit_setret (cfg, cfg->method, val);
7326 * mono_method_to_ir:
7328 * Translate the .net IL into linear IR.
7330 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7331 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7332 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7333 * @inline_args: if not NULL, contains the arguments to the inline call
7334 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7335 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7337 * This method is used to turn ECMA IL into Mono's internal Linear IR
7338 * reprensetation. It is used both for entire methods, as well as
7339 * inlining existing methods. In the former case, the @start_bblock,
7340 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7341 * inline_offset is set to zero.
7343 * Returns: the inline cost, or -1 if there was an error processing this method.
7346 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7347 MonoInst *return_var, MonoInst **inline_args,
7348 guint inline_offset, gboolean is_virtual_call)
7351 MonoInst *ins, **sp, **stack_start;
7352 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7353 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7354 MonoMethod *cmethod, *method_definition;
7355 MonoInst **arg_array;
7356 MonoMethodHeader *header;
7358 guint32 token, ins_flag;
7360 MonoClass *constrained_class = NULL;
7361 unsigned char *ip, *end, *target, *err_pos;
7362 MonoMethodSignature *sig;
7363 MonoGenericContext *generic_context = NULL;
7364 MonoGenericContainer *generic_container = NULL;
7365 MonoType **param_types;
7366 int i, n, start_new_bblock, dreg;
7367 int num_calls = 0, inline_costs = 0;
7368 int breakpoint_id = 0;
7370 GSList *class_inits = NULL;
7371 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7373 gboolean init_locals, seq_points, skip_dead_blocks;
7374 gboolean sym_seq_points = FALSE;
7375 MonoDebugMethodInfo *minfo;
7376 MonoBitSet *seq_point_locs = NULL;
7377 MonoBitSet *seq_point_set_locs = NULL;
7379 cfg->disable_inline = is_jit_optimizer_disabled (method);
7381 /* serialization and xdomain stuff may need access to private fields and methods */
7382 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7383 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7384 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7385 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7386 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7387 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7389 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7390 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7391 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7392 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7393 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7395 image = method->klass->image;
7396 header = mono_method_get_header_checked (method, &cfg->error);
7398 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7399 goto exception_exit;
7401 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7404 generic_container = mono_method_get_generic_container (method);
7405 sig = mono_method_signature (method);
7406 num_args = sig->hasthis + sig->param_count;
7407 ip = (unsigned char*)header->code;
7408 cfg->cil_start = ip;
7409 end = ip + header->code_size;
7410 cfg->stat_cil_code_size += header->code_size;
7412 seq_points = cfg->gen_seq_points && cfg->method == method;
7414 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7415 /* We could hit a seq point before attaching to the JIT (#8338) */
7419 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7420 minfo = mono_debug_lookup_method (method);
7422 MonoSymSeqPoint *sps;
7423 int i, n_il_offsets;
7425 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7426 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7427 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7428 sym_seq_points = TRUE;
7429 for (i = 0; i < n_il_offsets; ++i) {
7430 if (sps [i].il_offset < header->code_size)
7431 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7435 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7437 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7439 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7440 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7442 mono_debug_free_method_async_debug_info (asyncMethod);
7444 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7445 /* Methods without line number info like auto-generated property accessors */
7446 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7447 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7448 sym_seq_points = TRUE;
7453 * Methods without init_locals set could cause asserts in various passes
7454 * (#497220). To work around this, we emit dummy initialization opcodes
7455 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7456 * on some platforms.
7458 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7459 init_locals = header->init_locals;
7463 method_definition = method;
7464 while (method_definition->is_inflated) {
7465 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7466 method_definition = imethod->declaring;
7469 /* SkipVerification is not allowed if core-clr is enabled */
7470 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7472 dont_verify_stloc = TRUE;
7475 if (sig->is_inflated)
7476 generic_context = mono_method_get_context (method);
7477 else if (generic_container)
7478 generic_context = &generic_container->context;
7479 cfg->generic_context = generic_context;
7482 g_assert (!sig->has_type_parameters);
7484 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7485 g_assert (method->is_inflated);
7486 g_assert (mono_method_get_context (method)->method_inst);
7488 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7489 g_assert (sig->generic_param_count);
7491 if (cfg->method == method) {
7492 cfg->real_offset = 0;
7494 cfg->real_offset = inline_offset;
7497 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7498 cfg->cil_offset_to_bb_len = header->code_size;
7500 cfg->current_method = method;
7502 if (cfg->verbose_level > 2)
7503 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7505 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7507 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7508 for (n = 0; n < sig->param_count; ++n)
7509 param_types [n + sig->hasthis] = sig->params [n];
7510 cfg->arg_types = param_types;
7512 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7513 if (cfg->method == method) {
7515 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7516 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7519 NEW_BBLOCK (cfg, start_bblock);
7520 cfg->bb_entry = start_bblock;
7521 start_bblock->cil_code = NULL;
7522 start_bblock->cil_length = 0;
7525 NEW_BBLOCK (cfg, end_bblock);
7526 cfg->bb_exit = end_bblock;
7527 end_bblock->cil_code = NULL;
7528 end_bblock->cil_length = 0;
7529 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7530 g_assert (cfg->num_bblocks == 2);
7532 arg_array = cfg->args;
7534 if (header->num_clauses) {
7535 cfg->spvars = g_hash_table_new (NULL, NULL);
7536 cfg->exvars = g_hash_table_new (NULL, NULL);
7538 /* handle exception clauses */
7539 for (i = 0; i < header->num_clauses; ++i) {
7540 MonoBasicBlock *try_bb;
7541 MonoExceptionClause *clause = &header->clauses [i];
7542 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7544 try_bb->real_offset = clause->try_offset;
7545 try_bb->try_start = TRUE;
7546 try_bb->region = ((i + 1) << 8) | clause->flags;
7547 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7548 tblock->real_offset = clause->handler_offset;
7549 tblock->flags |= BB_EXCEPTION_HANDLER;
7552 * Linking the try block with the EH block hinders inlining as we won't be able to
7553 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7555 if (COMPILE_LLVM (cfg))
7556 link_bblock (cfg, try_bb, tblock);
7558 if (*(ip + clause->handler_offset) == CEE_POP)
7559 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7561 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7562 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7563 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7564 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7565 MONO_ADD_INS (tblock, ins);
7567 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7568 /* finally clauses already have a seq point */
7569 /* seq points for filter clauses are emitted below */
7570 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7571 MONO_ADD_INS (tblock, ins);
7574 /* todo: is a fault block unsafe to optimize? */
7575 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7576 tblock->flags |= BB_EXCEPTION_UNSAFE;
7579 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7581 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7583 /* catch and filter blocks get the exception object on the stack */
7584 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7585 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7587 /* mostly like handle_stack_args (), but just sets the input args */
7588 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7589 tblock->in_scount = 1;
7590 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7591 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7595 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7596 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7597 if (!cfg->compile_llvm) {
7598 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7599 ins->dreg = tblock->in_stack [0]->dreg;
7600 MONO_ADD_INS (tblock, ins);
7603 MonoInst *dummy_use;
7606 * Add a dummy use for the exvar so its liveness info will be
7609 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7612 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7613 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7614 MONO_ADD_INS (tblock, ins);
7617 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7618 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7619 tblock->flags |= BB_EXCEPTION_HANDLER;
7620 tblock->real_offset = clause->data.filter_offset;
7621 tblock->in_scount = 1;
7622 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7623 /* The filter block shares the exvar with the handler block */
7624 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7625 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7626 MONO_ADD_INS (tblock, ins);
7630 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7631 clause->data.catch_class &&
7633 mono_class_check_context_used (clause->data.catch_class)) {
7635 * In shared generic code with catch
7636 * clauses containing type variables
7637 * the exception handling code has to
7638 * be able to get to the rgctx.
7639 * Therefore we have to make sure that
7640 * the vtable/mrgctx argument (for
7641 * static or generic methods) or the
7642 * "this" argument (for non-static
7643 * methods) are live.
7645 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7646 mini_method_get_context (method)->method_inst ||
7647 method->klass->valuetype) {
7648 mono_get_vtable_var (cfg);
7650 MonoInst *dummy_use;
7652 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7657 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7658 cfg->cbb = start_bblock;
7659 cfg->args = arg_array;
7660 mono_save_args (cfg, sig, inline_args);
7663 /* FIRST CODE BLOCK */
7664 NEW_BBLOCK (cfg, tblock);
7665 tblock->cil_code = ip;
7669 ADD_BBLOCK (cfg, tblock);
7671 if (cfg->method == method) {
7672 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7673 if (breakpoint_id) {
7674 MONO_INST_NEW (cfg, ins, OP_BREAK);
7675 MONO_ADD_INS (cfg->cbb, ins);
7679 /* we use a separate basic block for the initialization code */
7680 NEW_BBLOCK (cfg, init_localsbb);
7681 if (cfg->method == method)
7682 cfg->bb_init = init_localsbb;
7683 init_localsbb->real_offset = cfg->real_offset;
7684 start_bblock->next_bb = init_localsbb;
7685 init_localsbb->next_bb = cfg->cbb;
7686 link_bblock (cfg, start_bblock, init_localsbb);
7687 link_bblock (cfg, init_localsbb, cfg->cbb);
7689 cfg->cbb = init_localsbb;
7691 if (cfg->gsharedvt && cfg->method == method) {
7692 MonoGSharedVtMethodInfo *info;
7693 MonoInst *var, *locals_var;
7696 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7697 info->method = cfg->method;
7698 info->count_entries = 16;
7699 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7700 cfg->gsharedvt_info = info;
7702 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7703 /* prevent it from being register allocated */
7704 //var->flags |= MONO_INST_VOLATILE;
7705 cfg->gsharedvt_info_var = var;
7707 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7708 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7710 /* Allocate locals */
7711 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7712 /* prevent it from being register allocated */
7713 //locals_var->flags |= MONO_INST_VOLATILE;
7714 cfg->gsharedvt_locals_var = locals_var;
7716 dreg = alloc_ireg (cfg);
7717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7719 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7720 ins->dreg = locals_var->dreg;
7722 MONO_ADD_INS (cfg->cbb, ins);
7723 cfg->gsharedvt_locals_var_ins = ins;
7725 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7728 ins->flags |= MONO_INST_INIT;
7732 if (mono_security_core_clr_enabled ()) {
7733 /* check if this is native code, e.g. an icall or a p/invoke */
7734 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7735 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7737 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7738 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7740 /* if this ia a native call then it can only be JITted from platform code */
7741 if ((icall || pinvk) && method->klass && method->klass->image) {
7742 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7743 MonoException *ex = icall ? mono_get_exception_security () :
7744 mono_get_exception_method_access ();
7745 emit_throw_exception (cfg, ex);
7752 CHECK_CFG_EXCEPTION;
7754 if (header->code_size == 0)
7757 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7762 if (cfg->method == method)
7763 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7765 for (n = 0; n < header->num_locals; ++n) {
7766 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7771 /* We force the vtable variable here for all shared methods
7772 for the possibility that they might show up in a stack
7773 trace where their exact instantiation is needed. */
7774 if (cfg->gshared && method == cfg->method) {
7775 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7776 mini_method_get_context (method)->method_inst ||
7777 method->klass->valuetype) {
7778 mono_get_vtable_var (cfg);
7780 /* FIXME: Is there a better way to do this?
7781 We need the variable live for the duration
7782 of the whole method. */
7783 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7787 /* add a check for this != NULL to inlined methods */
7788 if (is_virtual_call) {
7791 NEW_ARGLOAD (cfg, arg_ins, 0);
7792 MONO_ADD_INS (cfg->cbb, arg_ins);
7793 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7796 skip_dead_blocks = !dont_verify;
7797 if (skip_dead_blocks) {
7798 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7803 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7804 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7807 start_new_bblock = 0;
7809 if (cfg->method == method)
7810 cfg->real_offset = ip - header->code;
7812 cfg->real_offset = inline_offset;
7817 if (start_new_bblock) {
7818 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7819 if (start_new_bblock == 2) {
7820 g_assert (ip == tblock->cil_code);
7822 GET_BBLOCK (cfg, tblock, ip);
7824 cfg->cbb->next_bb = tblock;
7826 start_new_bblock = 0;
7827 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7828 if (cfg->verbose_level > 3)
7829 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7830 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7834 g_slist_free (class_inits);
7837 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7838 link_bblock (cfg, cfg->cbb, tblock);
7839 if (sp != stack_start) {
7840 handle_stack_args (cfg, stack_start, sp - stack_start);
7842 CHECK_UNVERIFIABLE (cfg);
7844 cfg->cbb->next_bb = tblock;
7846 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7847 if (cfg->verbose_level > 3)
7848 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7849 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7852 g_slist_free (class_inits);
7857 if (skip_dead_blocks) {
7858 int ip_offset = ip - header->code;
7860 if (ip_offset == bb->end)
7864 int op_size = mono_opcode_size (ip, end);
7865 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7867 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7869 if (ip_offset + op_size == bb->end) {
7870 MONO_INST_NEW (cfg, ins, OP_NOP);
7871 MONO_ADD_INS (cfg->cbb, ins);
7872 start_new_bblock = 1;
7880 * Sequence points are points where the debugger can place a breakpoint.
7881 * Currently, we generate these automatically at points where the IL
7884 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7886 * Make methods interruptable at the beginning, and at the targets of
7887 * backward branches.
7888 * Also, do this at the start of every bblock in methods with clauses too,
7889 * to be able to handle instructions with inprecise control flow like
7891 * Backward branches are handled at the end of method-to-ir ().
7893 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7894 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7896 /* Avoid sequence points on empty IL like .volatile */
7897 // FIXME: Enable this
7898 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7899 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7900 if ((sp != stack_start) && !sym_seq_point)
7901 ins->flags |= MONO_INST_NONEMPTY_STACK;
7902 MONO_ADD_INS (cfg->cbb, ins);
7905 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7908 cfg->cbb->real_offset = cfg->real_offset;
7910 if ((cfg->method == method) && cfg->coverage_info) {
7911 guint32 cil_offset = ip - header->code;
7912 cfg->coverage_info->data [cil_offset].cil_code = ip;
7914 /* TODO: Use an increment here */
7915 #if defined(TARGET_X86)
7916 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7917 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7919 MONO_ADD_INS (cfg->cbb, ins);
7921 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7922 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7926 if (cfg->verbose_level > 3)
7927 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7931 if (seq_points && !sym_seq_points && sp != stack_start) {
7933 * The C# compiler uses these nops to notify the JIT that it should
7934 * insert seq points.
7936 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7937 MONO_ADD_INS (cfg->cbb, ins);
7939 if (cfg->keep_cil_nops)
7940 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7942 MONO_INST_NEW (cfg, ins, OP_NOP);
7944 MONO_ADD_INS (cfg->cbb, ins);
7947 if (should_insert_brekpoint (cfg->method)) {
7948 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7950 MONO_INST_NEW (cfg, ins, OP_NOP);
7953 MONO_ADD_INS (cfg->cbb, ins);
7959 CHECK_STACK_OVF (1);
7960 n = (*ip)-CEE_LDARG_0;
7962 EMIT_NEW_ARGLOAD (cfg, ins, n);
7970 CHECK_STACK_OVF (1);
7971 n = (*ip)-CEE_LDLOC_0;
7973 EMIT_NEW_LOCLOAD (cfg, ins, n);
7982 n = (*ip)-CEE_STLOC_0;
7985 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7987 emit_stloc_ir (cfg, sp, header, n);
7994 CHECK_STACK_OVF (1);
7997 EMIT_NEW_ARGLOAD (cfg, ins, n);
8003 CHECK_STACK_OVF (1);
8006 NEW_ARGLOADA (cfg, ins, n);
8007 MONO_ADD_INS (cfg->cbb, ins);
8017 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8019 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8024 CHECK_STACK_OVF (1);
8027 EMIT_NEW_LOCLOAD (cfg, ins, n);
8031 case CEE_LDLOCA_S: {
8032 unsigned char *tmp_ip;
8034 CHECK_STACK_OVF (1);
8035 CHECK_LOCAL (ip [1]);
8037 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8043 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8052 CHECK_LOCAL (ip [1]);
8053 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8055 emit_stloc_ir (cfg, sp, header, ip [1]);
8060 CHECK_STACK_OVF (1);
8061 EMIT_NEW_PCONST (cfg, ins, NULL);
8062 ins->type = STACK_OBJ;
8067 CHECK_STACK_OVF (1);
8068 EMIT_NEW_ICONST (cfg, ins, -1);
8081 CHECK_STACK_OVF (1);
8082 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8088 CHECK_STACK_OVF (1);
8090 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8096 CHECK_STACK_OVF (1);
8097 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8103 CHECK_STACK_OVF (1);
8104 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8105 ins->type = STACK_I8;
8106 ins->dreg = alloc_dreg (cfg, STACK_I8);
8108 ins->inst_l = (gint64)read64 (ip);
8109 MONO_ADD_INS (cfg->cbb, ins);
8115 gboolean use_aotconst = FALSE;
8117 #ifdef TARGET_POWERPC
8118 /* FIXME: Clean this up */
8119 if (cfg->compile_aot)
8120 use_aotconst = TRUE;
8123 /* FIXME: we should really allocate this only late in the compilation process */
8124 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8126 CHECK_STACK_OVF (1);
8132 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8134 dreg = alloc_freg (cfg);
8135 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8136 ins->type = cfg->r4_stack_type;
8138 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8139 ins->type = cfg->r4_stack_type;
8140 ins->dreg = alloc_dreg (cfg, STACK_R8);
8142 MONO_ADD_INS (cfg->cbb, ins);
8152 gboolean use_aotconst = FALSE;
8154 #ifdef TARGET_POWERPC
8155 /* FIXME: Clean this up */
8156 if (cfg->compile_aot)
8157 use_aotconst = TRUE;
8160 /* FIXME: we should really allocate this only late in the compilation process */
8161 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8163 CHECK_STACK_OVF (1);
8169 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8171 dreg = alloc_freg (cfg);
8172 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8173 ins->type = STACK_R8;
8175 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8176 ins->type = STACK_R8;
8177 ins->dreg = alloc_dreg (cfg, STACK_R8);
8179 MONO_ADD_INS (cfg->cbb, ins);
8188 MonoInst *temp, *store;
8190 CHECK_STACK_OVF (1);
8194 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8195 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8197 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8200 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8213 if (sp [0]->type == STACK_R8)
8214 /* we need to pop the value from the x86 FP stack */
8215 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8220 MonoMethodSignature *fsig;
8223 INLINE_FAILURE ("jmp");
8224 GSHAREDVT_FAILURE (*ip);
8227 if (stack_start != sp)
8229 token = read32 (ip + 1);
8230 /* FIXME: check the signature matches */
8231 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8234 if (cfg->gshared && mono_method_check_context_used (cmethod))
8235 GENERIC_SHARING_FAILURE (CEE_JMP);
8237 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8239 fsig = mono_method_signature (cmethod);
8240 n = fsig->param_count + fsig->hasthis;
8241 if (cfg->llvm_only) {
8244 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8245 for (i = 0; i < n; ++i)
8246 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8247 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8249 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8250 * have to emit a normal return since llvm expects it.
8253 emit_setret (cfg, ins);
8254 MONO_INST_NEW (cfg, ins, OP_BR);
8255 ins->inst_target_bb = end_bblock;
8256 MONO_ADD_INS (cfg->cbb, ins);
8257 link_bblock (cfg, cfg->cbb, end_bblock);
8260 } else if (cfg->backend->have_op_tail_call) {
8261 /* Handle tail calls similarly to calls */
8264 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8265 call->method = cmethod;
8266 call->tail_call = TRUE;
8267 call->signature = mono_method_signature (cmethod);
8268 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8269 call->inst.inst_p0 = cmethod;
8270 for (i = 0; i < n; ++i)
8271 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8273 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8274 call->vret_var = cfg->vret_addr;
8276 mono_arch_emit_call (cfg, call);
8277 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8278 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8280 for (i = 0; i < num_args; ++i)
8281 /* Prevent arguments from being optimized away */
8282 arg_array [i]->flags |= MONO_INST_VOLATILE;
8284 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8285 ins = (MonoInst*)call;
8286 ins->inst_p0 = cmethod;
8287 MONO_ADD_INS (cfg->cbb, ins);
8291 start_new_bblock = 1;
8296 MonoMethodSignature *fsig;
8299 token = read32 (ip + 1);
8303 //GSHAREDVT_FAILURE (*ip);
8308 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8311 if (method->dynamic && fsig->pinvoke) {
8315 * This is a call through a function pointer using a pinvoke
8316 * signature. Have to create a wrapper and call that instead.
8317 * FIXME: This is very slow, need to create a wrapper at JIT time
8318 * instead based on the signature.
8320 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8321 EMIT_NEW_PCONST (cfg, args [1], fsig);
8323 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8326 n = fsig->param_count + fsig->hasthis;
8330 //g_assert (!virtual_ || fsig->hasthis);
8334 inline_costs += 10 * num_calls++;
8337 * Making generic calls out of gsharedvt methods.
8338 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8339 * patching gshared method addresses into a gsharedvt method.
8341 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8343 * We pass the address to the gsharedvt trampoline in the rgctx reg
8345 MonoInst *callee = addr;
8347 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8349 GSHAREDVT_FAILURE (*ip);
8353 GSHAREDVT_FAILURE (*ip);
8355 addr = emit_get_rgctx_sig (cfg, context_used,
8356 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8357 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8361 /* Prevent inlining of methods with indirect calls */
8362 INLINE_FAILURE ("indirect call");
8364 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8365 MonoJumpInfoType info_type;
8369 * Instead of emitting an indirect call, emit a direct call
8370 * with the contents of the aotconst as the patch info.
8372 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8373 info_type = (MonoJumpInfoType)addr->inst_c1;
8374 info_data = addr->inst_p0;
8376 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8377 info_data = addr->inst_right->inst_left;
8380 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8381 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8384 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8385 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8390 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8394 /* End of call, INS should contain the result of the call, if any */
8396 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8398 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8401 CHECK_CFG_EXCEPTION;
8405 constrained_class = NULL;
8409 case CEE_CALLVIRT: {
8410 MonoInst *addr = NULL;
8411 MonoMethodSignature *fsig = NULL;
8413 int virtual_ = *ip == CEE_CALLVIRT;
8414 gboolean pass_imt_from_rgctx = FALSE;
8415 MonoInst *imt_arg = NULL;
8416 MonoInst *keep_this_alive = NULL;
8417 gboolean pass_vtable = FALSE;
8418 gboolean pass_mrgctx = FALSE;
8419 MonoInst *vtable_arg = NULL;
8420 gboolean check_this = FALSE;
8421 gboolean supported_tail_call = FALSE;
8422 gboolean tail_call = FALSE;
8423 gboolean need_seq_point = FALSE;
8424 guint32 call_opcode = *ip;
8425 gboolean emit_widen = TRUE;
8426 gboolean push_res = TRUE;
8427 gboolean skip_ret = FALSE;
8428 gboolean delegate_invoke = FALSE;
8429 gboolean direct_icall = FALSE;
8430 gboolean constrained_partial_call = FALSE;
8431 MonoMethod *cil_method;
8434 token = read32 (ip + 1);
8438 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8441 cil_method = cmethod;
8443 if (constrained_class) {
8444 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8445 if (!mini_is_gsharedvt_klass (constrained_class)) {
8446 g_assert (!cmethod->klass->valuetype);
8447 if (!mini_type_is_reference (&constrained_class->byval_arg))
8448 constrained_partial_call = TRUE;
8452 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8453 if (cfg->verbose_level > 2)
8454 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8455 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8456 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8458 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8462 if (cfg->verbose_level > 2)
8463 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8465 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8467 * This is needed since get_method_constrained can't find
8468 * the method in klass representing a type var.
8469 * The type var is guaranteed to be a reference type in this
8472 if (!mini_is_gsharedvt_klass (constrained_class))
8473 g_assert (!cmethod->klass->valuetype);
8475 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8480 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8481 /* Use the corresponding method from the base type to avoid boxing */
8482 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8483 g_assert (base_type);
8484 constrained_class = mono_class_from_mono_type (base_type);
8485 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8490 if (!dont_verify && !cfg->skip_visibility) {
8491 MonoMethod *target_method = cil_method;
8492 if (method->is_inflated) {
8493 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8496 if (!mono_method_can_access_method (method_definition, target_method) &&
8497 !mono_method_can_access_method (method, cil_method))
8498 emit_method_access_failure (cfg, method, cil_method);
8501 if (mono_security_core_clr_enabled ())
8502 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8504 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8505 /* MS.NET seems to silently convert this to a callvirt */
8510 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8511 * converts to a callvirt.
8513 * tests/bug-515884.il is an example of this behavior
8515 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8516 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8517 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8521 if (!cmethod->klass->inited)
8522 if (!mono_class_init (cmethod->klass))
8523 TYPE_LOAD_ERROR (cmethod->klass);
8525 fsig = mono_method_signature (cmethod);
8528 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8529 mini_class_is_system_array (cmethod->klass)) {
8530 array_rank = cmethod->klass->rank;
8531 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8532 direct_icall = TRUE;
8533 } else if (fsig->pinvoke) {
8534 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8535 fsig = mono_method_signature (wrapper);
8536 } else if (constrained_class) {
8538 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8542 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8543 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8545 /* See code below */
8546 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8547 MonoBasicBlock *tbb;
8549 GET_BBLOCK (cfg, tbb, ip + 5);
8550 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8552 * We want to extend the try block to cover the call, but we can't do it if the
8553 * call is made directly since its followed by an exception check.
8555 direct_icall = FALSE;
8559 mono_save_token_info (cfg, image, token, cil_method);
8561 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8562 need_seq_point = TRUE;
8564 /* Don't support calls made using type arguments for now */
8566 if (cfg->gsharedvt) {
8567 if (mini_is_gsharedvt_signature (fsig))
8568 GSHAREDVT_FAILURE (*ip);
8572 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8573 g_assert_not_reached ();
8575 n = fsig->param_count + fsig->hasthis;
8577 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8581 g_assert (!mono_method_check_context_used (cmethod));
8585 //g_assert (!virtual_ || fsig->hasthis);
8590 * We have the `constrained.' prefix opcode.
8592 if (constrained_class) {
8593 if (mini_is_gsharedvt_klass (constrained_class)) {
8594 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8595 /* The 'Own method' case below */
8596 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8597 /* 'The type parameter is instantiated as a reference type' case below. */
8599 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8600 CHECK_CFG_EXCEPTION;
8606 if (constrained_partial_call) {
8607 gboolean need_box = TRUE;
8610 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8611 * called method is not known at compile time either. The called method could end up being
8612 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8613 * to box the receiver.
8614 * A simple solution would be to box always and make a normal virtual call, but that would
8615 * be bad performance wise.
8617 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8619 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8624 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8625 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8627 ins->klass = constrained_class;
8628 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8629 CHECK_CFG_EXCEPTION;
8630 } else if (need_box) {
8632 MonoBasicBlock *is_ref_bb, *end_bb;
8633 MonoInst *nonbox_call;
8636 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8638 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8639 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8641 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8643 NEW_BBLOCK (cfg, is_ref_bb);
8644 NEW_BBLOCK (cfg, end_bb);
8646 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8651 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8656 MONO_START_BB (cfg, is_ref_bb);
8657 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8658 ins->klass = constrained_class;
8659 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8660 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8664 MONO_START_BB (cfg, end_bb);
8667 nonbox_call->dreg = ins->dreg;
8670 g_assert (mono_class_is_interface (cmethod->klass));
8671 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8672 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8675 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8677 * The type parameter is instantiated as a valuetype,
8678 * but that type doesn't override the method we're
8679 * calling, so we need to box `this'.
8681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8682 ins->klass = constrained_class;
8683 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8684 CHECK_CFG_EXCEPTION;
8685 } else if (!constrained_class->valuetype) {
8686 int dreg = alloc_ireg_ref (cfg);
8689 * The type parameter is instantiated as a reference
8690 * type. We have a managed pointer on the stack, so
8691 * we need to dereference it here.
8693 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8694 ins->type = STACK_OBJ;
8697 if (cmethod->klass->valuetype) {
8700 /* Interface method */
8703 mono_class_setup_vtable (constrained_class);
8704 CHECK_TYPELOAD (constrained_class);
8705 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8707 TYPE_LOAD_ERROR (constrained_class);
8708 slot = mono_method_get_vtable_slot (cmethod);
8710 TYPE_LOAD_ERROR (cmethod->klass);
8711 cmethod = constrained_class->vtable [ioffset + slot];
8713 if (cmethod->klass == mono_defaults.enum_class) {
8714 /* Enum implements some interfaces, so treat this as the first case */
8715 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8716 ins->klass = constrained_class;
8717 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8718 CHECK_CFG_EXCEPTION;
8723 constrained_class = NULL;
8726 if (check_call_signature (cfg, fsig, sp))
8729 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8730 delegate_invoke = TRUE;
8732 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8733 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8734 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8742 * If the callee is a shared method, then its static cctor
8743 * might not get called after the call was patched.
8745 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8746 emit_class_init (cfg, cmethod->klass);
8747 CHECK_TYPELOAD (cmethod->klass);
8750 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8753 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8755 context_used = mini_method_check_context_used (cfg, cmethod);
8757 if (context_used && mono_class_is_interface (cmethod->klass)) {
8758 /* Generic method interface
8759 calls are resolved via a
8760 helper function and don't
8762 if (!cmethod_context || !cmethod_context->method_inst)
8763 pass_imt_from_rgctx = TRUE;
8767 * If a shared method calls another
8768 * shared method then the caller must
8769 * have a generic sharing context
8770 * because the magic trampoline
8771 * requires it. FIXME: We shouldn't
8772 * have to force the vtable/mrgctx
8773 * variable here. Instead there
8774 * should be a flag in the cfg to
8775 * request a generic sharing context.
8778 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8779 mono_get_vtable_var (cfg);
8784 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8786 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8788 CHECK_TYPELOAD (cmethod->klass);
8789 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8794 g_assert (!vtable_arg);
8796 if (!cfg->compile_aot) {
8798 * emit_get_rgctx_method () calls mono_class_vtable () so check
8799 * for type load errors before.
8801 mono_class_setup_vtable (cmethod->klass);
8802 CHECK_TYPELOAD (cmethod->klass);
8805 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8807 /* !marshalbyref is needed to properly handle generic methods + remoting */
8808 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8809 MONO_METHOD_IS_FINAL (cmethod)) &&
8810 !mono_class_is_marshalbyref (cmethod->klass)) {
8817 if (pass_imt_from_rgctx) {
8818 g_assert (!pass_vtable);
8820 imt_arg = emit_get_rgctx_method (cfg, context_used,
8821 cmethod, MONO_RGCTX_INFO_METHOD);
8825 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8827 /* Calling virtual generic methods */
8828 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8829 !(MONO_METHOD_IS_FINAL (cmethod) &&
8830 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8831 fsig->generic_param_count &&
8832 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8834 MonoInst *this_temp, *this_arg_temp, *store;
8835 MonoInst *iargs [4];
8837 g_assert (fsig->is_inflated);
8839 /* Prevent inlining of methods that contain indirect calls */
8840 INLINE_FAILURE ("virtual generic call");
8842 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8843 GSHAREDVT_FAILURE (*ip);
8845 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8846 g_assert (!imt_arg);
8848 g_assert (cmethod->is_inflated);
8849 imt_arg = emit_get_rgctx_method (cfg, context_used,
8850 cmethod, MONO_RGCTX_INFO_METHOD);
8851 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8853 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8854 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8855 MONO_ADD_INS (cfg->cbb, store);
8857 /* FIXME: This should be a managed pointer */
8858 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8860 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8861 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8862 cmethod, MONO_RGCTX_INFO_METHOD);
8863 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8864 addr = mono_emit_jit_icall (cfg,
8865 mono_helper_compile_generic_method, iargs);
8867 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8869 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8876 * Implement a workaround for the inherent races involved in locking:
8882 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8883 * try block, the Exit () won't be executed, see:
8884 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8885 * To work around this, we extend such try blocks to include the last x bytes
8886 * of the Monitor.Enter () call.
8888 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8889 MonoBasicBlock *tbb;
8891 GET_BBLOCK (cfg, tbb, ip + 5);
8893 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8894 * from Monitor.Enter like ArgumentNullException.
8896 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8897 /* Mark this bblock as needing to be extended */
8898 tbb->extend_try_block = TRUE;
8902 /* Conversion to a JIT intrinsic */
8903 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8904 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8905 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8913 if ((cfg->opt & MONO_OPT_INLINE) &&
8914 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8915 mono_method_check_inlining (cfg, cmethod)) {
8917 gboolean always = FALSE;
8919 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8920 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8921 /* Prevent inlining of methods that call wrappers */
8922 INLINE_FAILURE ("wrapper call");
8923 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8927 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8929 cfg->real_offset += 5;
8931 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8932 /* *sp is already set by inline_method */
8937 inline_costs += costs;
8943 /* Tail recursion elimination */
8944 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8945 gboolean has_vtargs = FALSE;
8948 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8949 INLINE_FAILURE ("tail call");
8951 /* keep it simple */
8952 for (i = fsig->param_count - 1; i >= 0; i--) {
8953 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8958 if (need_seq_point) {
8959 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8960 need_seq_point = FALSE;
8962 for (i = 0; i < n; ++i)
8963 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8964 MONO_INST_NEW (cfg, ins, OP_BR);
8965 MONO_ADD_INS (cfg->cbb, ins);
8966 tblock = start_bblock->out_bb [0];
8967 link_bblock (cfg, cfg->cbb, tblock);
8968 ins->inst_target_bb = tblock;
8969 start_new_bblock = 1;
8971 /* skip the CEE_RET, too */
8972 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8979 inline_costs += 10 * num_calls++;
8982 * Synchronized wrappers.
8983 * Its hard to determine where to replace a method with its synchronized
8984 * wrapper without causing an infinite recursion. The current solution is
8985 * to add the synchronized wrapper in the trampolines, and to
8986 * change the called method to a dummy wrapper, and resolve that wrapper
8987 * to the real method in mono_jit_compile_method ().
8989 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8990 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8991 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8992 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8996 * Making generic calls out of gsharedvt methods.
8997 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8998 * patching gshared method addresses into a gsharedvt method.
9000 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9001 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9002 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9003 MonoRgctxInfoType info_type;
9006 //if (mono_class_is_interface (cmethod->klass))
9007 //GSHAREDVT_FAILURE (*ip);
9008 // disable for possible remoting calls
9009 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9010 GSHAREDVT_FAILURE (*ip);
9011 if (fsig->generic_param_count) {
9012 /* virtual generic call */
9013 g_assert (!imt_arg);
9014 /* Same as the virtual generic case above */
9015 imt_arg = emit_get_rgctx_method (cfg, context_used,
9016 cmethod, MONO_RGCTX_INFO_METHOD);
9017 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9019 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9020 /* This can happen when we call a fully instantiated iface method */
9021 imt_arg = emit_get_rgctx_method (cfg, context_used,
9022 cmethod, MONO_RGCTX_INFO_METHOD);
9027 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9028 keep_this_alive = sp [0];
9030 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9031 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9033 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9034 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9036 if (cfg->llvm_only) {
9037 // FIXME: Avoid initializing vtable_arg
9038 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9040 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9045 /* Generic sharing */
9048 * Use this if the callee is gsharedvt sharable too, since
9049 * at runtime we might find an instantiation so the call cannot
9050 * be patched (the 'no_patch' code path in mini-trampolines.c).
9052 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9053 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9054 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9055 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9056 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9057 INLINE_FAILURE ("gshared");
9059 g_assert (cfg->gshared && cmethod);
9063 * We are compiling a call to a
9064 * generic method from shared code,
9065 * which means that we have to look up
9066 * the method in the rgctx and do an
9070 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9072 if (cfg->llvm_only) {
9073 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9074 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9076 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9077 // FIXME: Avoid initializing imt_arg/vtable_arg
9078 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9080 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9081 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9086 /* Direct calls to icalls */
9088 MonoMethod *wrapper;
9091 /* Inline the wrapper */
9092 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9094 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9095 g_assert (costs > 0);
9096 cfg->real_offset += 5;
9098 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9099 /* *sp is already set by inline_method */
9104 inline_costs += costs;
9113 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9114 MonoInst *val = sp [fsig->param_count];
9116 if (val->type == STACK_OBJ) {
9117 MonoInst *iargs [2];
9122 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9125 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9126 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9127 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9128 mini_emit_write_barrier (cfg, addr, val);
9129 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9130 GSHAREDVT_FAILURE (*ip);
9131 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9132 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9134 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9135 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9136 if (!cmethod->klass->element_class->valuetype && !readonly)
9137 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9138 CHECK_TYPELOAD (cmethod->klass);
9141 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9144 g_assert_not_reached ();
9151 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9155 /* Tail prefix / tail call optimization */
9157 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9158 /* FIXME: runtime generic context pointer for jumps? */
9159 /* FIXME: handle this for generic sharing eventually */
9160 if ((ins_flag & MONO_INST_TAILCALL) &&
9161 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9162 supported_tail_call = TRUE;
9164 if (supported_tail_call) {
9167 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9168 INLINE_FAILURE ("tail call");
9170 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9172 if (cfg->backend->have_op_tail_call) {
9173 /* Handle tail calls similarly to normal calls */
9176 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9178 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9179 call->tail_call = TRUE;
9180 call->method = cmethod;
9181 call->signature = mono_method_signature (cmethod);
9184 * We implement tail calls by storing the actual arguments into the
9185 * argument variables, then emitting a CEE_JMP.
9187 for (i = 0; i < n; ++i) {
9188 /* Prevent argument from being register allocated */
9189 arg_array [i]->flags |= MONO_INST_VOLATILE;
9190 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9192 ins = (MonoInst*)call;
9193 ins->inst_p0 = cmethod;
9194 ins->inst_p1 = arg_array [0];
9195 MONO_ADD_INS (cfg->cbb, ins);
9196 link_bblock (cfg, cfg->cbb, end_bblock);
9197 start_new_bblock = 1;
9199 // FIXME: Eliminate unreachable epilogs
9202 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9203 * only reachable from this call.
9205 GET_BBLOCK (cfg, tblock, ip + 5);
9206 if (tblock == cfg->cbb || tblock->in_count == 0)
9215 * Virtual calls in llvm-only mode.
9217 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9218 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9223 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9224 INLINE_FAILURE ("call");
9225 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9226 imt_arg, vtable_arg);
9228 if (tail_call && !cfg->llvm_only) {
9229 link_bblock (cfg, cfg->cbb, end_bblock);
9230 start_new_bblock = 1;
9232 // FIXME: Eliminate unreachable epilogs
9235 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9236 * only reachable from this call.
9238 GET_BBLOCK (cfg, tblock, ip + 5);
9239 if (tblock == cfg->cbb || tblock->in_count == 0)
9246 /* End of call, INS should contain the result of the call, if any */
9248 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9251 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9256 if (keep_this_alive) {
9257 MonoInst *dummy_use;
9259 /* See mono_emit_method_call_full () */
9260 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9263 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9265 * Clang can convert these calls to tail calls which screw up the stack
9266 * walk. This happens even when the -fno-optimize-sibling-calls
9267 * option is passed to clang.
9268 * Work around this by emitting a dummy call.
9270 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9273 CHECK_CFG_EXCEPTION;
9277 g_assert (*ip == CEE_RET);
9281 constrained_class = NULL;
9283 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9287 if (cfg->method != method) {
9288 /* return from inlined method */
9290 * If in_count == 0, that means the ret is unreachable due to
9291 * being preceeded by a throw. In that case, inline_method () will
9292 * handle setting the return value
9293 * (test case: test_0_inline_throw ()).
9295 if (return_var && cfg->cbb->in_count) {
9296 MonoType *ret_type = mono_method_signature (method)->ret;
9302 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9305 //g_assert (returnvar != -1);
9306 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9307 cfg->ret_var_set = TRUE;
9310 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9312 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9316 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9318 if (seq_points && !sym_seq_points) {
9320 * Place a seq point here too even through the IL stack is not
9321 * empty, so a step over on
9324 * will work correctly.
9326 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9327 MONO_ADD_INS (cfg->cbb, ins);
9330 g_assert (!return_var);
9334 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9337 emit_setret (cfg, *sp);
9340 if (sp != stack_start)
9342 MONO_INST_NEW (cfg, ins, OP_BR);
9344 ins->inst_target_bb = end_bblock;
9345 MONO_ADD_INS (cfg->cbb, ins);
9346 link_bblock (cfg, cfg->cbb, end_bblock);
9347 start_new_bblock = 1;
9351 MONO_INST_NEW (cfg, ins, OP_BR);
9353 target = ip + 1 + (signed char)(*ip);
9355 GET_BBLOCK (cfg, tblock, target);
9356 link_bblock (cfg, cfg->cbb, tblock);
9357 ins->inst_target_bb = tblock;
9358 if (sp != stack_start) {
9359 handle_stack_args (cfg, stack_start, sp - stack_start);
9361 CHECK_UNVERIFIABLE (cfg);
9363 MONO_ADD_INS (cfg->cbb, ins);
9364 start_new_bblock = 1;
9365 inline_costs += BRANCH_COST;
9379 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9381 target = ip + 1 + *(signed char*)ip;
9387 inline_costs += BRANCH_COST;
9391 MONO_INST_NEW (cfg, ins, OP_BR);
9394 target = ip + 4 + (gint32)read32(ip);
9396 GET_BBLOCK (cfg, tblock, target);
9397 link_bblock (cfg, cfg->cbb, tblock);
9398 ins->inst_target_bb = tblock;
9399 if (sp != stack_start) {
9400 handle_stack_args (cfg, stack_start, sp - stack_start);
9402 CHECK_UNVERIFIABLE (cfg);
9405 MONO_ADD_INS (cfg->cbb, ins);
9407 start_new_bblock = 1;
9408 inline_costs += BRANCH_COST;
9415 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9416 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9417 guint32 opsize = is_short ? 1 : 4;
9419 CHECK_OPSIZE (opsize);
9421 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9424 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9429 GET_BBLOCK (cfg, tblock, target);
9430 link_bblock (cfg, cfg->cbb, tblock);
9431 GET_BBLOCK (cfg, tblock, ip);
9432 link_bblock (cfg, cfg->cbb, tblock);
9434 if (sp != stack_start) {
9435 handle_stack_args (cfg, stack_start, sp - stack_start);
9436 CHECK_UNVERIFIABLE (cfg);
9439 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9440 cmp->sreg1 = sp [0]->dreg;
9441 type_from_op (cfg, cmp, sp [0], NULL);
9444 #if SIZEOF_REGISTER == 4
9445 if (cmp->opcode == OP_LCOMPARE_IMM) {
9446 /* Convert it to OP_LCOMPARE */
9447 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9448 ins->type = STACK_I8;
9449 ins->dreg = alloc_dreg (cfg, STACK_I8);
9451 MONO_ADD_INS (cfg->cbb, ins);
9452 cmp->opcode = OP_LCOMPARE;
9453 cmp->sreg2 = ins->dreg;
9456 MONO_ADD_INS (cfg->cbb, cmp);
9458 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9459 type_from_op (cfg, ins, sp [0], NULL);
9460 MONO_ADD_INS (cfg->cbb, ins);
9461 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9462 GET_BBLOCK (cfg, tblock, target);
9463 ins->inst_true_bb = tblock;
9464 GET_BBLOCK (cfg, tblock, ip);
9465 ins->inst_false_bb = tblock;
9466 start_new_bblock = 2;
9469 inline_costs += BRANCH_COST;
9484 MONO_INST_NEW (cfg, ins, *ip);
9486 target = ip + 4 + (gint32)read32(ip);
9492 inline_costs += BRANCH_COST;
9496 MonoBasicBlock **targets;
9497 MonoBasicBlock *default_bblock;
9498 MonoJumpInfoBBTable *table;
9499 int offset_reg = alloc_preg (cfg);
9500 int target_reg = alloc_preg (cfg);
9501 int table_reg = alloc_preg (cfg);
9502 int sum_reg = alloc_preg (cfg);
9503 gboolean use_op_switch;
9507 n = read32 (ip + 1);
9510 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9514 CHECK_OPSIZE (n * sizeof (guint32));
9515 target = ip + n * sizeof (guint32);
9517 GET_BBLOCK (cfg, default_bblock, target);
9518 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9520 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9521 for (i = 0; i < n; ++i) {
9522 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9523 targets [i] = tblock;
9524 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9528 if (sp != stack_start) {
9530 * Link the current bb with the targets as well, so handle_stack_args
9531 * will set their in_stack correctly.
9533 link_bblock (cfg, cfg->cbb, default_bblock);
9534 for (i = 0; i < n; ++i)
9535 link_bblock (cfg, cfg->cbb, targets [i]);
9537 handle_stack_args (cfg, stack_start, sp - stack_start);
9539 CHECK_UNVERIFIABLE (cfg);
9541 /* Undo the links */
9542 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9543 for (i = 0; i < n; ++i)
9544 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9550 for (i = 0; i < n; ++i)
9551 link_bblock (cfg, cfg->cbb, targets [i]);
9553 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9554 table->table = targets;
9555 table->table_size = n;
9557 use_op_switch = FALSE;
9559 /* ARM implements SWITCH statements differently */
9560 /* FIXME: Make it use the generic implementation */
9561 if (!cfg->compile_aot)
9562 use_op_switch = TRUE;
9565 if (COMPILE_LLVM (cfg))
9566 use_op_switch = TRUE;
9568 cfg->cbb->has_jump_table = 1;
9570 if (use_op_switch) {
9571 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9572 ins->sreg1 = src1->dreg;
9573 ins->inst_p0 = table;
9574 ins->inst_many_bb = targets;
9575 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9576 MONO_ADD_INS (cfg->cbb, ins);
9578 if (sizeof (gpointer) == 8)
9579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9583 #if SIZEOF_REGISTER == 8
9584 /* The upper word might not be zero, and we add it to a 64 bit address later */
9585 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9588 if (cfg->compile_aot) {
9589 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9591 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9592 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9593 ins->inst_p0 = table;
9594 ins->dreg = table_reg;
9595 MONO_ADD_INS (cfg->cbb, ins);
9598 /* FIXME: Use load_memindex */
9599 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9601 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9603 start_new_bblock = 1;
9604 inline_costs += (BRANCH_COST * 2);
9624 dreg = alloc_freg (cfg);
9627 dreg = alloc_lreg (cfg);
9630 dreg = alloc_ireg_ref (cfg);
9633 dreg = alloc_preg (cfg);
9636 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9637 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9638 if (*ip == CEE_LDIND_R4)
9639 ins->type = cfg->r4_stack_type;
9640 ins->flags |= ins_flag;
9641 MONO_ADD_INS (cfg->cbb, ins);
9643 if (ins_flag & MONO_INST_VOLATILE) {
9644 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9645 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9661 if (ins_flag & MONO_INST_VOLATILE) {
9662 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9663 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9666 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9667 ins->flags |= ins_flag;
9670 MONO_ADD_INS (cfg->cbb, ins);
9672 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9673 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9682 MONO_INST_NEW (cfg, ins, (*ip));
9684 ins->sreg1 = sp [0]->dreg;
9685 ins->sreg2 = sp [1]->dreg;
9686 type_from_op (cfg, ins, sp [0], sp [1]);
9688 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9690 /* Use the immediate opcodes if possible */
9691 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9692 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9693 if (imm_opcode != -1) {
9694 ins->opcode = imm_opcode;
9695 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9698 NULLIFY_INS (sp [1]);
9702 MONO_ADD_INS ((cfg)->cbb, (ins));
9704 *sp++ = mono_decompose_opcode (cfg, ins);
9721 MONO_INST_NEW (cfg, ins, (*ip));
9723 ins->sreg1 = sp [0]->dreg;
9724 ins->sreg2 = sp [1]->dreg;
9725 type_from_op (cfg, ins, sp [0], sp [1]);
9727 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9728 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9730 /* FIXME: Pass opcode to is_inst_imm */
9732 /* Use the immediate opcodes if possible */
9733 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9734 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9735 if (imm_opcode != -1) {
9736 ins->opcode = imm_opcode;
9737 if (sp [1]->opcode == OP_I8CONST) {
9738 #if SIZEOF_REGISTER == 8
9739 ins->inst_imm = sp [1]->inst_l;
9741 ins->inst_ls_word = sp [1]->inst_ls_word;
9742 ins->inst_ms_word = sp [1]->inst_ms_word;
9746 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9749 /* Might be followed by an instruction added by add_widen_op */
9750 if (sp [1]->next == NULL)
9751 NULLIFY_INS (sp [1]);
9754 MONO_ADD_INS ((cfg)->cbb, (ins));
9756 *sp++ = mono_decompose_opcode (cfg, ins);
9769 case CEE_CONV_OVF_I8:
9770 case CEE_CONV_OVF_U8:
9774 /* Special case this earlier so we have long constants in the IR */
9775 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9776 int data = sp [-1]->inst_c0;
9777 sp [-1]->opcode = OP_I8CONST;
9778 sp [-1]->type = STACK_I8;
9779 #if SIZEOF_REGISTER == 8
9780 if ((*ip) == CEE_CONV_U8)
9781 sp [-1]->inst_c0 = (guint32)data;
9783 sp [-1]->inst_c0 = data;
9785 sp [-1]->inst_ls_word = data;
9786 if ((*ip) == CEE_CONV_U8)
9787 sp [-1]->inst_ms_word = 0;
9789 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9791 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9798 case CEE_CONV_OVF_I4:
9799 case CEE_CONV_OVF_I1:
9800 case CEE_CONV_OVF_I2:
9801 case CEE_CONV_OVF_I:
9802 case CEE_CONV_OVF_U:
9805 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9806 ADD_UNOP (CEE_CONV_OVF_I8);
9813 case CEE_CONV_OVF_U1:
9814 case CEE_CONV_OVF_U2:
9815 case CEE_CONV_OVF_U4:
9818 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9819 ADD_UNOP (CEE_CONV_OVF_U8);
9826 case CEE_CONV_OVF_I1_UN:
9827 case CEE_CONV_OVF_I2_UN:
9828 case CEE_CONV_OVF_I4_UN:
9829 case CEE_CONV_OVF_I8_UN:
9830 case CEE_CONV_OVF_U1_UN:
9831 case CEE_CONV_OVF_U2_UN:
9832 case CEE_CONV_OVF_U4_UN:
9833 case CEE_CONV_OVF_U8_UN:
9834 case CEE_CONV_OVF_I_UN:
9835 case CEE_CONV_OVF_U_UN:
9842 CHECK_CFG_EXCEPTION;
9846 case CEE_ADD_OVF_UN:
9848 case CEE_MUL_OVF_UN:
9850 case CEE_SUB_OVF_UN:
9856 GSHAREDVT_FAILURE (*ip);
9859 token = read32 (ip + 1);
9860 klass = mini_get_class (method, token, generic_context);
9861 CHECK_TYPELOAD (klass);
9863 if (generic_class_is_reference_type (cfg, klass)) {
9864 MonoInst *store, *load;
9865 int dreg = alloc_ireg_ref (cfg);
9867 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9868 load->flags |= ins_flag;
9869 MONO_ADD_INS (cfg->cbb, load);
9871 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9872 store->flags |= ins_flag;
9873 MONO_ADD_INS (cfg->cbb, store);
9875 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9876 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9878 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9890 token = read32 (ip + 1);
9891 klass = mini_get_class (method, token, generic_context);
9892 CHECK_TYPELOAD (klass);
9894 /* Optimize the common ldobj+stloc combination */
9904 loc_index = ip [5] - CEE_STLOC_0;
9911 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9912 CHECK_LOCAL (loc_index);
9914 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9915 ins->dreg = cfg->locals [loc_index]->dreg;
9916 ins->flags |= ins_flag;
9919 if (ins_flag & MONO_INST_VOLATILE) {
9920 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9921 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9927 /* Optimize the ldobj+stobj combination */
9928 /* The reference case ends up being a load+store anyway */
9929 /* Skip this if the operation is volatile. */
9930 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9935 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9943 ins->flags |= ins_flag;
9946 if (ins_flag & MONO_INST_VOLATILE) {
9947 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9948 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9957 CHECK_STACK_OVF (1);
9959 n = read32 (ip + 1);
9961 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9962 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9963 ins->type = STACK_OBJ;
9966 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9967 MonoInst *iargs [1];
9968 char *str = (char *)mono_method_get_wrapper_data (method, n);
9970 if (cfg->compile_aot)
9971 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9973 EMIT_NEW_PCONST (cfg, iargs [0], str);
9974 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9976 if (cfg->opt & MONO_OPT_SHARED) {
9977 MonoInst *iargs [3];
9979 if (cfg->compile_aot) {
9980 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9982 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9983 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9984 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9985 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9986 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9989 if (cfg->cbb->out_of_line) {
9990 MonoInst *iargs [2];
9992 if (image == mono_defaults.corlib) {
9994 * Avoid relocations in AOT and save some space by using a
9995 * version of helper_ldstr specialized to mscorlib.
9997 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9998 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10000 /* Avoid creating the string object */
10001 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10002 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10003 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10007 if (cfg->compile_aot) {
10008 NEW_LDSTRCONST (cfg, ins, image, n);
10010 MONO_ADD_INS (cfg->cbb, ins);
10013 NEW_PCONST (cfg, ins, NULL);
10014 ins->type = STACK_OBJ;
10015 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10019 OUT_OF_MEMORY_FAILURE;
10022 MONO_ADD_INS (cfg->cbb, ins);
10031 MonoInst *iargs [2];
10032 MonoMethodSignature *fsig;
10035 MonoInst *vtable_arg = NULL;
10038 token = read32 (ip + 1);
10039 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10042 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10045 mono_save_token_info (cfg, image, token, cmethod);
10047 if (!mono_class_init (cmethod->klass))
10048 TYPE_LOAD_ERROR (cmethod->klass);
10050 context_used = mini_method_check_context_used (cfg, cmethod);
10052 if (mono_security_core_clr_enabled ())
10053 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10055 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10056 emit_class_init (cfg, cmethod->klass);
10057 CHECK_TYPELOAD (cmethod->klass);
10061 if (cfg->gsharedvt) {
10062 if (mini_is_gsharedvt_variable_signature (sig))
10063 GSHAREDVT_FAILURE (*ip);
10067 n = fsig->param_count;
10071 * Generate smaller code for the common newobj <exception> instruction in
10072 * argument checking code.
10074 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10075 is_exception_class (cmethod->klass) && n <= 2 &&
10076 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10077 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10078 MonoInst *iargs [3];
10082 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10085 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10088 iargs [1] = sp [0];
10089 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10092 iargs [1] = sp [0];
10093 iargs [2] = sp [1];
10094 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10097 g_assert_not_reached ();
10105 /* move the args to allow room for 'this' in the first position */
10111 /* check_call_signature () requires sp[0] to be set */
10112 this_ins.type = STACK_OBJ;
10113 sp [0] = &this_ins;
10114 if (check_call_signature (cfg, fsig, sp))
10119 if (mini_class_is_system_array (cmethod->klass)) {
10120 *sp = emit_get_rgctx_method (cfg, context_used,
10121 cmethod, MONO_RGCTX_INFO_METHOD);
10123 /* Avoid varargs in the common case */
10124 if (fsig->param_count == 1)
10125 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10126 else if (fsig->param_count == 2)
10127 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10128 else if (fsig->param_count == 3)
10129 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10130 else if (fsig->param_count == 4)
10131 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10133 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10134 } else if (cmethod->string_ctor) {
10135 g_assert (!context_used);
10136 g_assert (!vtable_arg);
10137 /* we simply pass a null pointer */
10138 EMIT_NEW_PCONST (cfg, *sp, NULL);
10139 /* now call the string ctor */
10140 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10142 if (cmethod->klass->valuetype) {
10143 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10144 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10145 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10150 * The code generated by mini_emit_virtual_call () expects
10151 * iargs [0] to be a boxed instance, but luckily the vcall
10152 * will be transformed into a normal call there.
10154 } else if (context_used) {
10155 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10158 MonoVTable *vtable = NULL;
10160 if (!cfg->compile_aot)
10161 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10162 CHECK_TYPELOAD (cmethod->klass);
10165 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10166 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10167 * As a workaround, we call class cctors before allocating objects.
10169 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10170 emit_class_init (cfg, cmethod->klass);
10171 if (cfg->verbose_level > 2)
10172 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10173 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10176 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10179 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10182 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10184 /* Now call the actual ctor */
10185 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10186 CHECK_CFG_EXCEPTION;
10189 if (alloc == NULL) {
10191 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10192 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10200 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10201 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10204 case CEE_CASTCLASS:
10209 token = read32 (ip + 1);
10210 klass = mini_get_class (method, token, generic_context);
10211 CHECK_TYPELOAD (klass);
10212 if (sp [0]->type != STACK_OBJ)
10215 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10216 ins->dreg = alloc_preg (cfg);
10217 ins->sreg1 = (*sp)->dreg;
10218 ins->klass = klass;
10219 ins->type = STACK_OBJ;
10220 MONO_ADD_INS (cfg->cbb, ins);
10222 CHECK_CFG_EXCEPTION;
10226 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10229 case CEE_UNBOX_ANY: {
10230 MonoInst *res, *addr;
10235 token = read32 (ip + 1);
10236 klass = mini_get_class (method, token, generic_context);
10237 CHECK_TYPELOAD (klass);
10239 mono_save_token_info (cfg, image, token, klass);
10241 context_used = mini_class_check_context_used (cfg, klass);
10243 if (mini_is_gsharedvt_klass (klass)) {
10244 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10246 } else if (generic_class_is_reference_type (cfg, klass)) {
10247 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10248 EMIT_NEW_PCONST (cfg, res, NULL);
10249 res->type = STACK_OBJ;
10251 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10252 res->dreg = alloc_preg (cfg);
10253 res->sreg1 = (*sp)->dreg;
10254 res->klass = klass;
10255 res->type = STACK_OBJ;
10256 MONO_ADD_INS (cfg->cbb, res);
10257 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10259 } else if (mono_class_is_nullable (klass)) {
10260 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10262 addr = handle_unbox (cfg, klass, sp, context_used);
10264 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10275 MonoClass *enum_class;
10276 MonoMethod *has_flag;
10282 token = read32 (ip + 1);
10283 klass = mini_get_class (method, token, generic_context);
10284 CHECK_TYPELOAD (klass);
10286 mono_save_token_info (cfg, image, token, klass);
10288 context_used = mini_class_check_context_used (cfg, klass);
10290 if (generic_class_is_reference_type (cfg, klass)) {
10296 if (klass == mono_defaults.void_class)
10298 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10300 /* frequent check in generic code: box (struct), brtrue */
10305 * <push int/long ptr>
10308 * constrained. MyFlags
10309 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10311 * If we find this sequence and the operand types on box and constrained
10312 * are equal, we can emit a specialized instruction sequence instead of
10313 * the very slow HasFlag () call.
10315 if ((cfg->opt & MONO_OPT_INTRINS) &&
10316 /* Cheap checks first. */
10317 ip + 5 + 6 + 5 < end &&
10318 ip [5] == CEE_PREFIX1 &&
10319 ip [6] == CEE_CONSTRAINED_ &&
10320 ip [11] == CEE_CALLVIRT &&
10321 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10322 mono_class_is_enum (klass) &&
10323 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10324 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10325 has_flag->klass == mono_defaults.enum_class &&
10326 !strcmp (has_flag->name, "HasFlag") &&
10327 has_flag->signature->hasthis &&
10328 has_flag->signature->param_count == 1) {
10329 CHECK_TYPELOAD (enum_class);
10331 if (enum_class == klass) {
10332 MonoInst *enum_this, *enum_flag;
10337 enum_this = sp [0];
10338 enum_flag = sp [1];
10340 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10345 // FIXME: LLVM can't handle the inconsistent bb linking
10346 if (!mono_class_is_nullable (klass) &&
10347 !mini_is_gsharedvt_klass (klass) &&
10348 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10349 (ip [5] == CEE_BRTRUE ||
10350 ip [5] == CEE_BRTRUE_S ||
10351 ip [5] == CEE_BRFALSE ||
10352 ip [5] == CEE_BRFALSE_S)) {
10353 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10355 MonoBasicBlock *true_bb, *false_bb;
10359 if (cfg->verbose_level > 3) {
10360 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10361 printf ("<box+brtrue opt>\n");
10366 case CEE_BRFALSE_S:
10369 target = ip + 1 + (signed char)(*ip);
10376 target = ip + 4 + (gint)(read32 (ip));
10380 g_assert_not_reached ();
10384 * We need to link both bblocks, since it is needed for handling stack
10385 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10386 * Branching to only one of them would lead to inconsistencies, so
10387 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10389 GET_BBLOCK (cfg, true_bb, target);
10390 GET_BBLOCK (cfg, false_bb, ip);
10392 mono_link_bblock (cfg, cfg->cbb, true_bb);
10393 mono_link_bblock (cfg, cfg->cbb, false_bb);
10395 if (sp != stack_start) {
10396 handle_stack_args (cfg, stack_start, sp - stack_start);
10398 CHECK_UNVERIFIABLE (cfg);
10401 if (COMPILE_LLVM (cfg)) {
10402 dreg = alloc_ireg (cfg);
10403 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10404 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10406 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10408 /* The JIT can't eliminate the iconst+compare */
10409 MONO_INST_NEW (cfg, ins, OP_BR);
10410 ins->inst_target_bb = is_true ? true_bb : false_bb;
10411 MONO_ADD_INS (cfg->cbb, ins);
10414 start_new_bblock = 1;
10418 *sp++ = handle_box (cfg, val, klass, context_used);
10420 CHECK_CFG_EXCEPTION;
10429 token = read32 (ip + 1);
10430 klass = mini_get_class (method, token, generic_context);
10431 CHECK_TYPELOAD (klass);
10433 mono_save_token_info (cfg, image, token, klass);
10435 context_used = mini_class_check_context_used (cfg, klass);
10437 if (mono_class_is_nullable (klass)) {
10440 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10441 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10445 ins = handle_unbox (cfg, klass, sp, context_used);
10458 MonoClassField *field;
10459 #ifndef DISABLE_REMOTING
10463 gboolean is_instance;
10465 gpointer addr = NULL;
10466 gboolean is_special_static;
10468 MonoInst *store_val = NULL;
10469 MonoInst *thread_ins;
10472 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10474 if (op == CEE_STFLD) {
10477 store_val = sp [1];
10482 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10484 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10487 if (op == CEE_STSFLD) {
10490 store_val = sp [0];
10495 token = read32 (ip + 1);
10496 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10497 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10498 klass = field->parent;
10501 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10504 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10505 FIELD_ACCESS_FAILURE (method, field);
10506 mono_class_init (klass);
10508 /* if the class is Critical then transparent code cannot access it's fields */
10509 if (!is_instance && mono_security_core_clr_enabled ())
10510 ensure_method_is_allowed_to_access_field (cfg, method, field);
10512 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10513 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10514 if (mono_security_core_clr_enabled ())
10515 ensure_method_is_allowed_to_access_field (cfg, method, field);
10518 ftype = mono_field_get_type (field);
10521 * LDFLD etc. is usable on static fields as well, so convert those cases to
10524 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10536 g_assert_not_reached ();
10538 is_instance = FALSE;
10541 context_used = mini_class_check_context_used (cfg, klass);
10543 /* INSTANCE CASE */
10545 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10546 if (op == CEE_STFLD) {
10547 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10549 #ifndef DISABLE_REMOTING
10550 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10551 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10552 MonoInst *iargs [5];
10554 GSHAREDVT_FAILURE (op);
10556 iargs [0] = sp [0];
10557 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10558 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10559 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10561 iargs [4] = sp [1];
10563 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10564 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10565 iargs, ip, cfg->real_offset, TRUE);
10566 CHECK_CFG_EXCEPTION;
10567 g_assert (costs > 0);
10569 cfg->real_offset += 5;
10571 inline_costs += costs;
10573 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10578 MonoInst *store, *wbarrier_ptr_ins = NULL;
10580 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10582 if (ins_flag & MONO_INST_VOLATILE) {
10583 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10584 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10587 if (mini_is_gsharedvt_klass (klass)) {
10588 MonoInst *offset_ins;
10590 context_used = mini_class_check_context_used (cfg, klass);
10592 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10593 /* The value is offset by 1 */
10594 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10595 dreg = alloc_ireg_mp (cfg);
10596 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10597 wbarrier_ptr_ins = ins;
10598 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10599 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10601 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10603 if (sp [0]->opcode != OP_LDADDR)
10604 store->flags |= MONO_INST_FAULT;
10606 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10607 if (mini_is_gsharedvt_klass (klass)) {
10608 g_assert (wbarrier_ptr_ins);
10609 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10611 /* insert call to write barrier */
10615 dreg = alloc_ireg_mp (cfg);
10616 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10617 mini_emit_write_barrier (cfg, ptr, sp [1]);
10621 store->flags |= ins_flag;
10628 #ifndef DISABLE_REMOTING
10629 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10630 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10631 MonoInst *iargs [4];
10633 GSHAREDVT_FAILURE (op);
10635 iargs [0] = sp [0];
10636 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10637 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10638 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10639 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10640 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10641 iargs, ip, cfg->real_offset, TRUE);
10642 CHECK_CFG_EXCEPTION;
10643 g_assert (costs > 0);
10645 cfg->real_offset += 5;
10649 inline_costs += costs;
10651 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10657 if (sp [0]->type == STACK_VTYPE) {
10660 /* Have to compute the address of the variable */
10662 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10664 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10666 g_assert (var->klass == klass);
10668 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10672 if (op == CEE_LDFLDA) {
10673 if (sp [0]->type == STACK_OBJ) {
10674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10675 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10678 dreg = alloc_ireg_mp (cfg);
10680 if (mini_is_gsharedvt_klass (klass)) {
10681 MonoInst *offset_ins;
10683 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10684 /* The value is offset by 1 */
10685 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10686 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10688 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10690 ins->klass = mono_class_from_mono_type (field->type);
10691 ins->type = STACK_MP;
10696 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10698 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10699 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10708 if (mini_is_gsharedvt_klass (klass)) {
10709 MonoInst *offset_ins;
10711 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10712 /* The value is offset by 1 */
10713 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10714 dreg = alloc_ireg_mp (cfg);
10715 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10716 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10718 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10720 load->flags |= ins_flag;
10721 if (sp [0]->opcode != OP_LDADDR)
10722 load->flags |= MONO_INST_FAULT;
10734 context_used = mini_class_check_context_used (cfg, klass);
10736 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10737 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10741 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10742 * to be called here.
10744 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10745 mono_class_vtable (cfg->domain, klass);
10746 CHECK_TYPELOAD (klass);
10748 mono_domain_lock (cfg->domain);
10749 if (cfg->domain->special_static_fields)
10750 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10751 mono_domain_unlock (cfg->domain);
10753 is_special_static = mono_class_field_is_special_static (field);
10755 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10756 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10760 /* Generate IR to compute the field address */
10761 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10763 * Fast access to TLS data
10764 * Inline version of get_thread_static_data () in
10768 int idx, static_data_reg, array_reg, dreg;
10770 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10771 GSHAREDVT_FAILURE (op);
10773 static_data_reg = alloc_ireg (cfg);
10774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10776 if (cfg->compile_aot) {
10777 int offset_reg, offset2_reg, idx_reg;
10779 /* For TLS variables, this will return the TLS offset */
10780 EMIT_NEW_SFLDACONST (cfg, ins, field);
10781 offset_reg = ins->dreg;
10782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10783 idx_reg = alloc_ireg (cfg);
10784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10786 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10787 array_reg = alloc_ireg (cfg);
10788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10789 offset2_reg = alloc_ireg (cfg);
10790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10791 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10792 dreg = alloc_ireg (cfg);
10793 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10795 offset = (gsize)addr & 0x7fffffff;
10796 idx = offset & 0x3f;
10798 array_reg = alloc_ireg (cfg);
10799 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10800 dreg = alloc_ireg (cfg);
10801 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10803 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10804 (cfg->compile_aot && is_special_static) ||
10805 (context_used && is_special_static)) {
10806 MonoInst *iargs [2];
10808 g_assert (field->parent);
10809 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10810 if (context_used) {
10811 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10812 field, MONO_RGCTX_INFO_CLASS_FIELD);
10814 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10816 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10817 } else if (context_used) {
10818 MonoInst *static_data;
10821 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10822 method->klass->name_space, method->klass->name, method->name,
10823 depth, field->offset);
10826 if (mono_class_needs_cctor_run (klass, method))
10827 emit_class_init (cfg, klass);
10830 * The pointer we're computing here is
10832 * super_info.static_data + field->offset
10834 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10835 klass, MONO_RGCTX_INFO_STATIC_DATA);
10837 if (mini_is_gsharedvt_klass (klass)) {
10838 MonoInst *offset_ins;
10840 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10841 /* The value is offset by 1 */
10842 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10843 dreg = alloc_ireg_mp (cfg);
10844 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10845 } else if (field->offset == 0) {
10848 int addr_reg = mono_alloc_preg (cfg);
10849 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10851 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10852 MonoInst *iargs [2];
10854 g_assert (field->parent);
10855 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10856 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10857 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10859 MonoVTable *vtable = NULL;
10861 if (!cfg->compile_aot)
10862 vtable = mono_class_vtable (cfg->domain, klass);
10863 CHECK_TYPELOAD (klass);
10866 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10867 if (!(g_slist_find (class_inits, klass))) {
10868 emit_class_init (cfg, klass);
10869 if (cfg->verbose_level > 2)
10870 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10871 class_inits = g_slist_prepend (class_inits, klass);
10874 if (cfg->run_cctors) {
10875 /* This makes so that inline cannot trigger */
10876 /* .cctors: too many apps depend on them */
10877 /* running with a specific order... */
10879 if (! vtable->initialized)
10880 INLINE_FAILURE ("class init");
10881 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10882 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10883 goto exception_exit;
10887 if (cfg->compile_aot)
10888 EMIT_NEW_SFLDACONST (cfg, ins, field);
10891 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10893 EMIT_NEW_PCONST (cfg, ins, addr);
10896 MonoInst *iargs [1];
10897 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10898 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10902 /* Generate IR to do the actual load/store operation */
10904 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10905 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10906 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10909 if (op == CEE_LDSFLDA) {
10910 ins->klass = mono_class_from_mono_type (ftype);
10911 ins->type = STACK_PTR;
10913 } else if (op == CEE_STSFLD) {
10916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10917 store->flags |= ins_flag;
10919 gboolean is_const = FALSE;
10920 MonoVTable *vtable = NULL;
10921 gpointer addr = NULL;
10923 if (!context_used) {
10924 vtable = mono_class_vtable (cfg->domain, klass);
10925 CHECK_TYPELOAD (klass);
10927 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10928 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10929 int ro_type = ftype->type;
10931 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10932 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10933 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10936 GSHAREDVT_FAILURE (op);
10938 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10941 case MONO_TYPE_BOOLEAN:
10943 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10947 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10950 case MONO_TYPE_CHAR:
10952 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10956 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10961 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10965 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10970 case MONO_TYPE_PTR:
10971 case MONO_TYPE_FNPTR:
10972 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10973 type_to_eval_stack_type ((cfg), field->type, *sp);
10976 case MONO_TYPE_STRING:
10977 case MONO_TYPE_OBJECT:
10978 case MONO_TYPE_CLASS:
10979 case MONO_TYPE_SZARRAY:
10980 case MONO_TYPE_ARRAY:
10981 if (!mono_gc_is_moving ()) {
10982 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10983 type_to_eval_stack_type ((cfg), field->type, *sp);
10991 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10996 case MONO_TYPE_VALUETYPE:
11006 CHECK_STACK_OVF (1);
11008 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11009 load->flags |= ins_flag;
11015 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11016 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11017 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11028 token = read32 (ip + 1);
11029 klass = mini_get_class (method, token, generic_context);
11030 CHECK_TYPELOAD (klass);
11031 if (ins_flag & MONO_INST_VOLATILE) {
11032 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11033 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11035 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11036 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11037 ins->flags |= ins_flag;
11038 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11039 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11040 /* insert call to write barrier */
11041 mini_emit_write_barrier (cfg, sp [0], sp [1]);
11053 const char *data_ptr;
11055 guint32 field_token;
11061 token = read32 (ip + 1);
11063 klass = mini_get_class (method, token, generic_context);
11064 CHECK_TYPELOAD (klass);
11065 if (klass->byval_arg.type == MONO_TYPE_VOID)
11068 context_used = mini_class_check_context_used (cfg, klass);
11070 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11071 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11072 ins->sreg1 = sp [0]->dreg;
11073 ins->type = STACK_I4;
11074 ins->dreg = alloc_ireg (cfg);
11075 MONO_ADD_INS (cfg->cbb, ins);
11076 *sp = mono_decompose_opcode (cfg, ins);
11079 if (context_used) {
11080 MonoInst *args [3];
11081 MonoClass *array_class = mono_array_class_get (klass, 1);
11082 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11084 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11087 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11088 array_class, MONO_RGCTX_INFO_VTABLE);
11093 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11095 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11097 if (cfg->opt & MONO_OPT_SHARED) {
11098 /* Decompose now to avoid problems with references to the domainvar */
11099 MonoInst *iargs [3];
11101 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11102 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11103 iargs [2] = sp [0];
11105 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11107 /* Decompose later since it is needed by abcrem */
11108 MonoClass *array_type = mono_array_class_get (klass, 1);
11109 mono_class_vtable (cfg->domain, array_type);
11110 CHECK_TYPELOAD (array_type);
11112 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11113 ins->dreg = alloc_ireg_ref (cfg);
11114 ins->sreg1 = sp [0]->dreg;
11115 ins->inst_newa_class = klass;
11116 ins->type = STACK_OBJ;
11117 ins->klass = array_type;
11118 MONO_ADD_INS (cfg->cbb, ins);
11119 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11120 cfg->cbb->has_array_access = TRUE;
11122 /* Needed so mono_emit_load_get_addr () gets called */
11123 mono_get_got_var (cfg);
11133 * we inline/optimize the initialization sequence if possible.
11134 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11135 * for small sizes open code the memcpy
11136 * ensure the rva field is big enough
11138 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11139 MonoMethod *memcpy_method = mini_get_memcpy_method ();
11140 MonoInst *iargs [3];
11141 int add_reg = alloc_ireg_mp (cfg);
11143 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11144 if (cfg->compile_aot) {
11145 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11147 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11149 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11150 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11159 if (sp [0]->type != STACK_OBJ)
11162 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11163 ins->dreg = alloc_preg (cfg);
11164 ins->sreg1 = sp [0]->dreg;
11165 ins->type = STACK_I4;
11166 /* This flag will be inherited by the decomposition */
11167 ins->flags |= MONO_INST_FAULT;
11168 MONO_ADD_INS (cfg->cbb, ins);
11169 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11170 cfg->cbb->has_array_access = TRUE;
11178 if (sp [0]->type != STACK_OBJ)
11181 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11183 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11184 CHECK_TYPELOAD (klass);
11185 /* we need to make sure that this array is exactly the type it needs
11186 * to be for correctness. the wrappers are lax with their usage
11187 * so we need to ignore them here
11189 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11190 MonoClass *array_class = mono_array_class_get (klass, 1);
11191 mini_emit_check_array_type (cfg, sp [0], array_class);
11192 CHECK_TYPELOAD (array_class);
11196 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11201 case CEE_LDELEM_I1:
11202 case CEE_LDELEM_U1:
11203 case CEE_LDELEM_I2:
11204 case CEE_LDELEM_U2:
11205 case CEE_LDELEM_I4:
11206 case CEE_LDELEM_U4:
11207 case CEE_LDELEM_I8:
11209 case CEE_LDELEM_R4:
11210 case CEE_LDELEM_R8:
11211 case CEE_LDELEM_REF: {
11217 if (*ip == CEE_LDELEM) {
11219 token = read32 (ip + 1);
11220 klass = mini_get_class (method, token, generic_context);
11221 CHECK_TYPELOAD (klass);
11222 mono_class_init (klass);
11225 klass = array_access_to_klass (*ip);
11227 if (sp [0]->type != STACK_OBJ)
11230 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11232 if (mini_is_gsharedvt_variable_klass (klass)) {
11233 // FIXME-VT: OP_ICONST optimization
11234 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11235 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11236 ins->opcode = OP_LOADV_MEMBASE;
11237 } else if (sp [1]->opcode == OP_ICONST) {
11238 int array_reg = sp [0]->dreg;
11239 int index_reg = sp [1]->dreg;
11240 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11242 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11243 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11245 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11246 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11248 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11249 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11252 if (*ip == CEE_LDELEM)
11259 case CEE_STELEM_I1:
11260 case CEE_STELEM_I2:
11261 case CEE_STELEM_I4:
11262 case CEE_STELEM_I8:
11263 case CEE_STELEM_R4:
11264 case CEE_STELEM_R8:
11265 case CEE_STELEM_REF:
11270 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11272 if (*ip == CEE_STELEM) {
11274 token = read32 (ip + 1);
11275 klass = mini_get_class (method, token, generic_context);
11276 CHECK_TYPELOAD (klass);
11277 mono_class_init (klass);
11280 klass = array_access_to_klass (*ip);
11282 if (sp [0]->type != STACK_OBJ)
11285 emit_array_store (cfg, klass, sp, TRUE);
11287 if (*ip == CEE_STELEM)
11294 case CEE_CKFINITE: {
11298 if (cfg->llvm_only) {
11299 MonoInst *iargs [1];
11301 iargs [0] = sp [0];
11302 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11304 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11305 ins->sreg1 = sp [0]->dreg;
11306 ins->dreg = alloc_freg (cfg);
11307 ins->type = STACK_R8;
11308 MONO_ADD_INS (cfg->cbb, ins);
11310 *sp++ = mono_decompose_opcode (cfg, ins);
11316 case CEE_REFANYVAL: {
11317 MonoInst *src_var, *src;
11319 int klass_reg = alloc_preg (cfg);
11320 int dreg = alloc_preg (cfg);
11322 GSHAREDVT_FAILURE (*ip);
11325 MONO_INST_NEW (cfg, ins, *ip);
11328 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11329 CHECK_TYPELOAD (klass);
11331 context_used = mini_class_check_context_used (cfg, klass);
11334 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11336 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11337 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11340 if (context_used) {
11341 MonoInst *klass_ins;
11343 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11344 klass, MONO_RGCTX_INFO_KLASS);
11347 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11348 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11350 mini_emit_class_check (cfg, klass_reg, klass);
11352 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11353 ins->type = STACK_MP;
11354 ins->klass = klass;
11359 case CEE_MKREFANY: {
11360 MonoInst *loc, *addr;
11362 GSHAREDVT_FAILURE (*ip);
11365 MONO_INST_NEW (cfg, ins, *ip);
11368 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11369 CHECK_TYPELOAD (klass);
11371 context_used = mini_class_check_context_used (cfg, klass);
11373 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11374 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11376 if (context_used) {
11377 MonoInst *const_ins;
11378 int type_reg = alloc_preg (cfg);
11380 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11381 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11385 int const_reg = alloc_preg (cfg);
11386 int type_reg = alloc_preg (cfg);
11388 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11395 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11396 ins->type = STACK_VTYPE;
11397 ins->klass = mono_defaults.typed_reference_class;
11402 case CEE_LDTOKEN: {
11404 MonoClass *handle_class;
11406 CHECK_STACK_OVF (1);
11409 n = read32 (ip + 1);
11411 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11412 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11413 handle = mono_method_get_wrapper_data (method, n);
11414 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11415 if (handle_class == mono_defaults.typehandle_class)
11416 handle = &((MonoClass*)handle)->byval_arg;
11419 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11424 mono_class_init (handle_class);
11425 if (cfg->gshared) {
11426 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11427 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11428 /* This case handles ldtoken
11429 of an open type, like for
11432 } else if (handle_class == mono_defaults.typehandle_class) {
11433 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11434 } else if (handle_class == mono_defaults.fieldhandle_class)
11435 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11436 else if (handle_class == mono_defaults.methodhandle_class)
11437 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11439 g_assert_not_reached ();
11442 if ((cfg->opt & MONO_OPT_SHARED) &&
11443 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11444 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11445 MonoInst *addr, *vtvar, *iargs [3];
11446 int method_context_used;
11448 method_context_used = mini_method_check_context_used (cfg, method);
11450 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11452 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11453 EMIT_NEW_ICONST (cfg, iargs [1], n);
11454 if (method_context_used) {
11455 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11456 method, MONO_RGCTX_INFO_METHOD);
11457 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11459 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11460 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11462 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11464 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11466 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11468 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11469 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11470 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11471 (cmethod->klass == mono_defaults.systemtype_class) &&
11472 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11473 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11475 mono_class_init (tclass);
11476 if (context_used) {
11477 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11478 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11479 } else if (cfg->compile_aot) {
11480 if (method->wrapper_type) {
11481 error_init (&error); //got to do it since there are multiple conditionals below
11482 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11483 /* Special case for static synchronized wrappers */
11484 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11486 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11487 /* FIXME: n is not a normal token */
11489 EMIT_NEW_PCONST (cfg, ins, NULL);
11492 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11495 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11497 EMIT_NEW_PCONST (cfg, ins, rt);
11499 ins->type = STACK_OBJ;
11500 ins->klass = cmethod->klass;
11503 MonoInst *addr, *vtvar;
11505 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11507 if (context_used) {
11508 if (handle_class == mono_defaults.typehandle_class) {
11509 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11510 mono_class_from_mono_type ((MonoType *)handle),
11511 MONO_RGCTX_INFO_TYPE);
11512 } else if (handle_class == mono_defaults.methodhandle_class) {
11513 ins = emit_get_rgctx_method (cfg, context_used,
11514 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11515 } else if (handle_class == mono_defaults.fieldhandle_class) {
11516 ins = emit_get_rgctx_field (cfg, context_used,
11517 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11519 g_assert_not_reached ();
11521 } else if (cfg->compile_aot) {
11522 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11524 EMIT_NEW_PCONST (cfg, ins, handle);
11526 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11527 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11528 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11538 if (sp [-1]->type != STACK_OBJ)
11541 MONO_INST_NEW (cfg, ins, OP_THROW);
11543 ins->sreg1 = sp [0]->dreg;
11545 cfg->cbb->out_of_line = TRUE;
11546 MONO_ADD_INS (cfg->cbb, ins);
11547 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11548 MONO_ADD_INS (cfg->cbb, ins);
11551 link_bblock (cfg, cfg->cbb, end_bblock);
11552 start_new_bblock = 1;
11553 /* This can complicate code generation for llvm since the return value might not be defined */
11554 if (COMPILE_LLVM (cfg))
11555 INLINE_FAILURE ("throw");
11557 case CEE_ENDFINALLY:
11558 if (!ip_in_finally_clause (cfg, ip - header->code))
11560 /* mono_save_seq_point_info () depends on this */
11561 if (sp != stack_start)
11562 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11563 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11564 MONO_ADD_INS (cfg->cbb, ins);
11566 start_new_bblock = 1;
11569 * Control will leave the method so empty the stack, otherwise
11570 * the next basic block will start with a nonempty stack.
11572 while (sp != stack_start) {
11577 case CEE_LEAVE_S: {
11580 if (*ip == CEE_LEAVE) {
11582 target = ip + 5 + (gint32)read32(ip + 1);
11585 target = ip + 2 + (signed char)(ip [1]);
11588 /* empty the stack */
11589 while (sp != stack_start) {
11594 * If this leave statement is in a catch block, check for a
11595 * pending exception, and rethrow it if necessary.
11596 * We avoid doing this in runtime invoke wrappers, since those are called
11597 * by native code which excepts the wrapper to catch all exceptions.
11599 for (i = 0; i < header->num_clauses; ++i) {
11600 MonoExceptionClause *clause = &header->clauses [i];
11603 * Use <= in the final comparison to handle clauses with multiple
11604 * leave statements, like in bug #78024.
11605 * The ordering of the exception clauses guarantees that we find the
11606 * innermost clause.
11608 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11610 MonoBasicBlock *dont_throw;
11615 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11618 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11620 NEW_BBLOCK (cfg, dont_throw);
11623 * Currently, we always rethrow the abort exception, despite the
11624 * fact that this is not correct. See thread6.cs for an example.
11625 * But propagating the abort exception is more important than
11626 * getting the sematics right.
11628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11629 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11630 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11632 MONO_START_BB (cfg, dont_throw);
11637 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11640 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11642 MonoExceptionClause *clause;
11644 for (tmp = handlers; tmp; tmp = tmp->next) {
11645 clause = (MonoExceptionClause *)tmp->data;
11646 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11648 link_bblock (cfg, cfg->cbb, tblock);
11649 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11650 ins->inst_target_bb = tblock;
11651 ins->inst_eh_block = clause;
11652 MONO_ADD_INS (cfg->cbb, ins);
11653 cfg->cbb->has_call_handler = 1;
11654 if (COMPILE_LLVM (cfg)) {
11655 MonoBasicBlock *target_bb;
11658 * Link the finally bblock with the target, since it will
11659 * conceptually branch there.
11661 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11662 GET_BBLOCK (cfg, target_bb, target);
11663 link_bblock (cfg, tblock, target_bb);
11666 g_list_free (handlers);
11669 MONO_INST_NEW (cfg, ins, OP_BR);
11670 MONO_ADD_INS (cfg->cbb, ins);
11671 GET_BBLOCK (cfg, tblock, target);
11672 link_bblock (cfg, cfg->cbb, tblock);
11673 ins->inst_target_bb = tblock;
11675 start_new_bblock = 1;
11677 if (*ip == CEE_LEAVE)
11686 * Mono specific opcodes
11688 case MONO_CUSTOM_PREFIX: {
11690 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11694 case CEE_MONO_ICALL: {
11696 MonoJitICallInfo *info;
11698 token = read32 (ip + 2);
11699 func = mono_method_get_wrapper_data (method, token);
11700 info = mono_find_jit_icall_by_addr (func);
11702 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11705 CHECK_STACK (info->sig->param_count);
11706 sp -= info->sig->param_count;
11708 ins = mono_emit_jit_icall (cfg, info->func, sp);
11709 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11713 inline_costs += 10 * num_calls++;
11717 case CEE_MONO_LDPTR_CARD_TABLE:
11718 case CEE_MONO_LDPTR_NURSERY_START:
11719 case CEE_MONO_LDPTR_NURSERY_BITS:
11720 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11721 CHECK_STACK_OVF (1);
11724 case CEE_MONO_LDPTR_CARD_TABLE:
11725 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11727 case CEE_MONO_LDPTR_NURSERY_START:
11728 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11730 case CEE_MONO_LDPTR_NURSERY_BITS:
11731 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11733 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11734 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11740 inline_costs += 10 * num_calls++;
11743 case CEE_MONO_LDPTR: {
11746 CHECK_STACK_OVF (1);
11748 token = read32 (ip + 2);
11750 ptr = mono_method_get_wrapper_data (method, token);
11751 EMIT_NEW_PCONST (cfg, ins, ptr);
11754 inline_costs += 10 * num_calls++;
11755 /* Can't embed random pointers into AOT code */
11759 case CEE_MONO_JIT_ICALL_ADDR: {
11760 MonoJitICallInfo *callinfo;
11763 CHECK_STACK_OVF (1);
11765 token = read32 (ip + 2);
11767 ptr = mono_method_get_wrapper_data (method, token);
11768 callinfo = mono_find_jit_icall_by_addr (ptr);
11769 g_assert (callinfo);
11770 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11773 inline_costs += 10 * num_calls++;
11776 case CEE_MONO_ICALL_ADDR: {
11777 MonoMethod *cmethod;
11780 CHECK_STACK_OVF (1);
11782 token = read32 (ip + 2);
11784 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11786 if (cfg->compile_aot) {
11787 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11789 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11790 * before the call, its not needed when using direct pinvoke.
11791 * This is not an optimization, but its used to avoid looking up pinvokes
11792 * on platforms which don't support dlopen ().
11794 EMIT_NEW_PCONST (cfg, ins, NULL);
11796 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11799 ptr = mono_lookup_internal_call (cmethod);
11801 EMIT_NEW_PCONST (cfg, ins, ptr);
11807 case CEE_MONO_VTADDR: {
11808 MonoInst *src_var, *src;
11814 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11815 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11820 case CEE_MONO_NEWOBJ: {
11821 MonoInst *iargs [2];
11823 CHECK_STACK_OVF (1);
11825 token = read32 (ip + 2);
11826 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11827 mono_class_init (klass);
11828 NEW_DOMAINCONST (cfg, iargs [0]);
11829 MONO_ADD_INS (cfg->cbb, iargs [0]);
11830 NEW_CLASSCONST (cfg, iargs [1], klass);
11831 MONO_ADD_INS (cfg->cbb, iargs [1]);
11832 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11834 inline_costs += 10 * num_calls++;
11837 case CEE_MONO_OBJADDR:
11840 MONO_INST_NEW (cfg, ins, OP_MOVE);
11841 ins->dreg = alloc_ireg_mp (cfg);
11842 ins->sreg1 = sp [0]->dreg;
11843 ins->type = STACK_MP;
11844 MONO_ADD_INS (cfg->cbb, ins);
11848 case CEE_MONO_LDNATIVEOBJ:
11850 * Similar to LDOBJ, but instead load the unmanaged
11851 * representation of the vtype to the stack.
11856 token = read32 (ip + 2);
11857 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11858 g_assert (klass->valuetype);
11859 mono_class_init (klass);
11862 MonoInst *src, *dest, *temp;
11865 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11866 temp->backend.is_pinvoke = 1;
11867 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11868 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11870 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11871 dest->type = STACK_VTYPE;
11872 dest->klass = klass;
11878 case CEE_MONO_RETOBJ: {
11880 * Same as RET, but return the native representation of a vtype
11883 g_assert (cfg->ret);
11884 g_assert (mono_method_signature (method)->pinvoke);
11889 token = read32 (ip + 2);
11890 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11892 if (!cfg->vret_addr) {
11893 g_assert (cfg->ret_var_is_local);
11895 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11897 EMIT_NEW_RETLOADA (cfg, ins);
11899 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11901 if (sp != stack_start)
11904 MONO_INST_NEW (cfg, ins, OP_BR);
11905 ins->inst_target_bb = end_bblock;
11906 MONO_ADD_INS (cfg->cbb, ins);
11907 link_bblock (cfg, cfg->cbb, end_bblock);
11908 start_new_bblock = 1;
11912 case CEE_MONO_SAVE_LMF:
11913 case CEE_MONO_RESTORE_LMF:
11916 case CEE_MONO_CLASSCONST:
11917 CHECK_STACK_OVF (1);
11919 token = read32 (ip + 2);
11920 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11923 inline_costs += 10 * num_calls++;
11925 case CEE_MONO_NOT_TAKEN:
11926 cfg->cbb->out_of_line = TRUE;
11929 case CEE_MONO_TLS: {
11932 CHECK_STACK_OVF (1);
11934 key = (MonoTlsKey)read32 (ip + 2);
11935 g_assert (key < TLS_KEY_NUM);
11937 ins = mono_create_tls_get (cfg, key);
11939 ins->type = STACK_PTR;
11944 case CEE_MONO_DYN_CALL: {
11945 MonoCallInst *call;
11947 /* It would be easier to call a trampoline, but that would put an
11948 * extra frame on the stack, confusing exception handling. So
11949 * implement it inline using an opcode for now.
11952 if (!cfg->dyn_call_var) {
11953 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11954 /* prevent it from being register allocated */
11955 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11958 /* Has to use a call inst since it local regalloc expects it */
11959 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11960 ins = (MonoInst*)call;
11962 ins->sreg1 = sp [0]->dreg;
11963 ins->sreg2 = sp [1]->dreg;
11964 MONO_ADD_INS (cfg->cbb, ins);
11966 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11969 inline_costs += 10 * num_calls++;
11973 case CEE_MONO_MEMORY_BARRIER: {
11975 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11979 case CEE_MONO_ATOMIC_STORE_I4: {
11980 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11986 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11987 ins->dreg = sp [0]->dreg;
11988 ins->sreg1 = sp [1]->dreg;
11989 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11990 MONO_ADD_INS (cfg->cbb, ins);
11995 case CEE_MONO_JIT_ATTACH: {
11996 MonoInst *args [16], *domain_ins;
11997 MonoInst *ad_ins, *jit_tls_ins;
11998 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12000 g_assert (!mono_threads_is_coop_enabled ());
12002 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12004 EMIT_NEW_PCONST (cfg, ins, NULL);
12005 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12007 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12008 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12010 if (ad_ins && jit_tls_ins) {
12011 NEW_BBLOCK (cfg, next_bb);
12012 NEW_BBLOCK (cfg, call_bb);
12014 if (cfg->compile_aot) {
12015 /* AOT code is only used in the root domain */
12016 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12018 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12020 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12026 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12027 MONO_START_BB (cfg, call_bb);
12030 /* AOT code is only used in the root domain */
12031 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12032 if (cfg->compile_aot) {
12036 * This is called on unattached threads, so it cannot go through the trampoline
12037 * infrastructure. Use an indirect call through a got slot initialized at load time
12040 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12041 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12043 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12045 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12048 MONO_START_BB (cfg, next_bb);
12053 case CEE_MONO_JIT_DETACH: {
12054 MonoInst *args [16];
12056 /* Restore the original domain */
12057 dreg = alloc_ireg (cfg);
12058 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12059 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12063 case CEE_MONO_CALLI_EXTRA_ARG: {
12065 MonoMethodSignature *fsig;
12069 * This is the same as CEE_CALLI, but passes an additional argument
12070 * to the called method in llvmonly mode.
12071 * This is only used by delegate invoke wrappers to call the
12072 * actual delegate method.
12074 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12077 token = read32 (ip + 2);
12085 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12088 if (cfg->llvm_only)
12089 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12091 n = fsig->param_count + fsig->hasthis + 1;
12098 if (cfg->llvm_only) {
12100 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12101 * cconv. This is set by mono_init_delegate ().
12103 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12104 MonoInst *callee = addr;
12105 MonoInst *call, *localloc_ins;
12106 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12107 int low_bit_reg = alloc_preg (cfg);
12109 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12110 NEW_BBLOCK (cfg, end_bb);
12112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12116 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12117 addr = emit_get_rgctx_sig (cfg, context_used,
12118 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12120 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12122 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12123 ins->dreg = alloc_preg (cfg);
12124 ins->inst_imm = 2 * SIZEOF_VOID_P;
12125 MONO_ADD_INS (cfg->cbb, ins);
12126 localloc_ins = ins;
12127 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12131 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12134 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12135 MONO_START_BB (cfg, is_gsharedvt_bb);
12136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12137 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12138 ins->dreg = call->dreg;
12140 MONO_START_BB (cfg, end_bb);
12142 /* Caller uses a normal calling conv */
12144 MonoInst *callee = addr;
12145 MonoInst *call, *localloc_ins;
12146 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12147 int low_bit_reg = alloc_preg (cfg);
12149 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12150 NEW_BBLOCK (cfg, end_bb);
12152 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12156 /* Normal case: callee uses a normal cconv, no conversion is needed */
12157 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12159 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12160 MONO_START_BB (cfg, is_gsharedvt_bb);
12161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12162 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12163 MONO_ADD_INS (cfg->cbb, addr);
12165 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12167 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12168 ins->dreg = alloc_preg (cfg);
12169 ins->inst_imm = 2 * SIZEOF_VOID_P;
12170 MONO_ADD_INS (cfg->cbb, ins);
12171 localloc_ins = ins;
12172 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12176 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12177 ins->dreg = call->dreg;
12178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12180 MONO_START_BB (cfg, end_bb);
12183 /* Same as CEE_CALLI */
12184 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12186 * We pass the address to the gsharedvt trampoline in the rgctx reg
12188 MonoInst *callee = addr;
12190 addr = emit_get_rgctx_sig (cfg, context_used,
12191 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12192 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12194 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12198 if (!MONO_TYPE_IS_VOID (fsig->ret))
12199 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12201 CHECK_CFG_EXCEPTION;
12205 constrained_class = NULL;
12208 case CEE_MONO_LDDOMAIN:
12209 CHECK_STACK_OVF (1);
12210 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12214 case CEE_MONO_GET_LAST_ERROR:
12216 CHECK_STACK_OVF (1);
12218 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12219 ins->dreg = alloc_dreg (cfg, STACK_I4);
12220 ins->type = STACK_I4;
12221 MONO_ADD_INS (cfg->cbb, ins);
12226 case CEE_MONO_GET_RGCTX_ARG:
12228 CHECK_STACK_OVF (1);
12230 mono_create_rgctx_var (cfg);
12232 MONO_INST_NEW (cfg, ins, OP_MOVE);
12233 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12234 ins->sreg1 = cfg->rgctx_var->dreg;
12235 ins->type = STACK_PTR;
12236 MONO_ADD_INS (cfg->cbb, ins);
12242 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12248 case CEE_PREFIX1: {
12251 case CEE_ARGLIST: {
12252 /* somewhat similar to LDTOKEN */
12253 MonoInst *addr, *vtvar;
12254 CHECK_STACK_OVF (1);
12255 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12257 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12258 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12260 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12261 ins->type = STACK_VTYPE;
12262 ins->klass = mono_defaults.argumenthandle_class;
12272 MonoInst *cmp, *arg1, *arg2;
12280 * The following transforms:
12281 * CEE_CEQ into OP_CEQ
12282 * CEE_CGT into OP_CGT
12283 * CEE_CGT_UN into OP_CGT_UN
12284 * CEE_CLT into OP_CLT
12285 * CEE_CLT_UN into OP_CLT_UN
12287 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12289 MONO_INST_NEW (cfg, ins, cmp->opcode);
12290 cmp->sreg1 = arg1->dreg;
12291 cmp->sreg2 = arg2->dreg;
12292 type_from_op (cfg, cmp, arg1, arg2);
12294 add_widen_op (cfg, cmp, &arg1, &arg2);
12295 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12296 cmp->opcode = OP_LCOMPARE;
12297 else if (arg1->type == STACK_R4)
12298 cmp->opcode = OP_RCOMPARE;
12299 else if (arg1->type == STACK_R8)
12300 cmp->opcode = OP_FCOMPARE;
12302 cmp->opcode = OP_ICOMPARE;
12303 MONO_ADD_INS (cfg->cbb, cmp);
12304 ins->type = STACK_I4;
12305 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12306 type_from_op (cfg, ins, arg1, arg2);
12308 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12310 * The backends expect the fceq opcodes to do the
12313 ins->sreg1 = cmp->sreg1;
12314 ins->sreg2 = cmp->sreg2;
12317 MONO_ADD_INS (cfg->cbb, ins);
12323 MonoInst *argconst;
12324 MonoMethod *cil_method;
12326 CHECK_STACK_OVF (1);
12328 n = read32 (ip + 2);
12329 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12332 mono_class_init (cmethod->klass);
12334 mono_save_token_info (cfg, image, n, cmethod);
12336 context_used = mini_method_check_context_used (cfg, cmethod);
12338 cil_method = cmethod;
12339 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12340 emit_method_access_failure (cfg, method, cil_method);
12342 if (mono_security_core_clr_enabled ())
12343 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12346 * Optimize the common case of ldftn+delegate creation
12348 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12349 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12350 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12351 MonoInst *target_ins, *handle_ins;
12352 MonoMethod *invoke;
12353 int invoke_context_used;
12355 invoke = mono_get_delegate_invoke (ctor_method->klass);
12356 if (!invoke || !mono_method_signature (invoke))
12359 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12361 target_ins = sp [-1];
12363 if (mono_security_core_clr_enabled ())
12364 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12366 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12367 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12368 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12369 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12370 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12374 /* FIXME: SGEN support */
12375 if (invoke_context_used == 0 || cfg->llvm_only) {
12377 if (cfg->verbose_level > 3)
12378 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12379 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12382 CHECK_CFG_EXCEPTION;
12392 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12393 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12397 inline_costs += 10 * num_calls++;
12400 case CEE_LDVIRTFTN: {
12401 MonoInst *args [2];
12405 n = read32 (ip + 2);
12406 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12409 mono_class_init (cmethod->klass);
12411 context_used = mini_method_check_context_used (cfg, cmethod);
12413 if (mono_security_core_clr_enabled ())
12414 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12417 * Optimize the common case of ldvirtftn+delegate creation
12419 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12420 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12421 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12422 MonoInst *target_ins, *handle_ins;
12423 MonoMethod *invoke;
12424 int invoke_context_used;
12425 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12427 invoke = mono_get_delegate_invoke (ctor_method->klass);
12428 if (!invoke || !mono_method_signature (invoke))
12431 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12433 target_ins = sp [-1];
12435 if (mono_security_core_clr_enabled ())
12436 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12438 /* FIXME: SGEN support */
12439 if (invoke_context_used == 0 || cfg->llvm_only) {
12441 if (cfg->verbose_level > 3)
12442 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12443 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12446 CHECK_CFG_EXCEPTION;
12459 args [1] = emit_get_rgctx_method (cfg, context_used,
12460 cmethod, MONO_RGCTX_INFO_METHOD);
12463 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12465 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12468 inline_costs += 10 * num_calls++;
12472 CHECK_STACK_OVF (1);
12474 n = read16 (ip + 2);
12476 EMIT_NEW_ARGLOAD (cfg, ins, n);
12481 CHECK_STACK_OVF (1);
12483 n = read16 (ip + 2);
12485 NEW_ARGLOADA (cfg, ins, n);
12486 MONO_ADD_INS (cfg->cbb, ins);
12494 n = read16 (ip + 2);
12496 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12498 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12502 CHECK_STACK_OVF (1);
12504 n = read16 (ip + 2);
12506 EMIT_NEW_LOCLOAD (cfg, ins, n);
12511 unsigned char *tmp_ip;
12512 CHECK_STACK_OVF (1);
12514 n = read16 (ip + 2);
12517 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12523 EMIT_NEW_LOCLOADA (cfg, ins, n);
12532 n = read16 (ip + 2);
12534 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12536 emit_stloc_ir (cfg, sp, header, n);
12540 case CEE_LOCALLOC: {
12542 MonoBasicBlock *non_zero_bb, *end_bb;
12543 int alloc_ptr = alloc_preg (cfg);
12545 if (sp != stack_start)
12547 if (cfg->method != method)
12549 * Inlining this into a loop in a parent could lead to
12550 * stack overflows which is different behavior than the
12551 * non-inlined case, thus disable inlining in this case.
12553 INLINE_FAILURE("localloc");
12555 NEW_BBLOCK (cfg, non_zero_bb);
12556 NEW_BBLOCK (cfg, end_bb);
12558 /* if size != zero */
12559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12562 //size is zero, so result is NULL
12563 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12566 MONO_START_BB (cfg, non_zero_bb);
12567 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12568 ins->dreg = alloc_ptr;
12569 ins->sreg1 = sp [0]->dreg;
12570 ins->type = STACK_PTR;
12571 MONO_ADD_INS (cfg->cbb, ins);
12573 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12575 ins->flags |= MONO_INST_INIT;
12577 MONO_START_BB (cfg, end_bb);
12578 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12579 ins->type = STACK_PTR;
12585 case CEE_ENDFILTER: {
12586 MonoExceptionClause *clause, *nearest;
12591 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12593 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12594 ins->sreg1 = (*sp)->dreg;
12595 MONO_ADD_INS (cfg->cbb, ins);
12596 start_new_bblock = 1;
12600 for (cc = 0; cc < header->num_clauses; ++cc) {
12601 clause = &header->clauses [cc];
12602 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12603 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12604 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12607 g_assert (nearest);
12608 if ((ip - header->code) != nearest->handler_offset)
12613 case CEE_UNALIGNED_:
12614 ins_flag |= MONO_INST_UNALIGNED;
12615 /* FIXME: record alignment? we can assume 1 for now */
12619 case CEE_VOLATILE_:
12620 ins_flag |= MONO_INST_VOLATILE;
12624 ins_flag |= MONO_INST_TAILCALL;
12625 cfg->flags |= MONO_CFG_HAS_TAIL;
12626 /* Can't inline tail calls at this time */
12627 inline_costs += 100000;
12634 token = read32 (ip + 2);
12635 klass = mini_get_class (method, token, generic_context);
12636 CHECK_TYPELOAD (klass);
12637 if (generic_class_is_reference_type (cfg, klass))
12638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12640 mini_emit_initobj (cfg, *sp, NULL, klass);
12644 case CEE_CONSTRAINED_:
12646 token = read32 (ip + 2);
12647 constrained_class = mini_get_class (method, token, generic_context);
12648 CHECK_TYPELOAD (constrained_class);
12652 case CEE_INITBLK: {
12653 MonoInst *iargs [3];
12657 /* Skip optimized paths for volatile operations. */
12658 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12659 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12660 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12661 /* emit_memset only works when val == 0 */
12662 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12665 iargs [0] = sp [0];
12666 iargs [1] = sp [1];
12667 iargs [2] = sp [2];
12668 if (ip [1] == CEE_CPBLK) {
12670 * FIXME: It's unclear whether we should be emitting both the acquire
12671 * and release barriers for cpblk. It is technically both a load and
12672 * store operation, so it seems like that's the sensible thing to do.
12674 * FIXME: We emit full barriers on both sides of the operation for
12675 * simplicity. We should have a separate atomic memcpy method instead.
12677 MonoMethod *memcpy_method = mini_get_memcpy_method ();
12679 if (ins_flag & MONO_INST_VOLATILE)
12680 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12682 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12683 call->flags |= ins_flag;
12685 if (ins_flag & MONO_INST_VOLATILE)
12686 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12688 MonoMethod *memset_method = mini_get_memset_method ();
12689 if (ins_flag & MONO_INST_VOLATILE) {
12690 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12691 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12693 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12694 call->flags |= ins_flag;
12705 ins_flag |= MONO_INST_NOTYPECHECK;
12707 ins_flag |= MONO_INST_NORANGECHECK;
12708 /* we ignore the no-nullcheck for now since we
12709 * really do it explicitly only when doing callvirt->call
12713 case CEE_RETHROW: {
12715 int handler_offset = -1;
12717 for (i = 0; i < header->num_clauses; ++i) {
12718 MonoExceptionClause *clause = &header->clauses [i];
12719 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12720 handler_offset = clause->handler_offset;
12725 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12727 if (handler_offset == -1)
12730 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12731 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12732 ins->sreg1 = load->dreg;
12733 MONO_ADD_INS (cfg->cbb, ins);
12735 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12736 MONO_ADD_INS (cfg->cbb, ins);
12739 link_bblock (cfg, cfg->cbb, end_bblock);
12740 start_new_bblock = 1;
12748 CHECK_STACK_OVF (1);
12750 token = read32 (ip + 2);
12751 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12752 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12755 val = mono_type_size (type, &ialign);
12757 MonoClass *klass = mini_get_class (method, token, generic_context);
12758 CHECK_TYPELOAD (klass);
12760 val = mono_type_size (&klass->byval_arg, &ialign);
12762 if (mini_is_gsharedvt_klass (klass))
12763 GSHAREDVT_FAILURE (*ip);
12765 EMIT_NEW_ICONST (cfg, ins, val);
12770 case CEE_REFANYTYPE: {
12771 MonoInst *src_var, *src;
12773 GSHAREDVT_FAILURE (*ip);
12779 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12781 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12782 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12783 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12788 case CEE_READONLY_:
12801 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12811 g_warning ("opcode 0x%02x not handled", *ip);
12815 if (start_new_bblock != 1)
12818 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12819 if (cfg->cbb->next_bb) {
12820 /* This could already be set because of inlining, #693905 */
12821 MonoBasicBlock *bb = cfg->cbb;
12823 while (bb->next_bb)
12825 bb->next_bb = end_bblock;
12827 cfg->cbb->next_bb = end_bblock;
12830 if (cfg->method == method && cfg->domainvar) {
12832 MonoInst *get_domain;
12834 cfg->cbb = init_localsbb;
12836 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12837 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12838 MONO_ADD_INS (cfg->cbb, store);
12841 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12842 if (cfg->compile_aot)
12843 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12844 mono_get_got_var (cfg);
12847 if (cfg->method == method && cfg->got_var)
12848 mono_emit_load_got_addr (cfg);
12850 if (init_localsbb) {
12851 cfg->cbb = init_localsbb;
12853 for (i = 0; i < header->num_locals; ++i) {
12854 emit_init_local (cfg, i, header->locals [i], init_locals);
12858 if (cfg->init_ref_vars && cfg->method == method) {
12859 /* Emit initialization for ref vars */
12860 // FIXME: Avoid duplication initialization for IL locals.
12861 for (i = 0; i < cfg->num_varinfo; ++i) {
12862 MonoInst *ins = cfg->varinfo [i];
12864 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12865 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12869 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12870 cfg->cbb = init_localsbb;
12871 emit_push_lmf (cfg);
12874 cfg->cbb = init_localsbb;
12875 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12878 MonoBasicBlock *bb;
12881 * Make seq points at backward branch targets interruptable.
12883 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12884 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12885 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12888 /* Add a sequence point for method entry/exit events */
12889 if (seq_points && cfg->gen_sdb_seq_points) {
12890 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12891 MONO_ADD_INS (init_localsbb, ins);
12892 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12893 MONO_ADD_INS (cfg->bb_exit, ins);
12897 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12898 * the code they refer to was dead (#11880).
12900 if (sym_seq_points) {
12901 for (i = 0; i < header->code_size; ++i) {
12902 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12905 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12906 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12913 if (cfg->method == method) {
12914 MonoBasicBlock *bb;
12915 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12916 if (bb == cfg->bb_init)
12919 bb->region = mono_find_block_region (cfg, bb->real_offset);
12921 mono_create_spvar_for_region (cfg, bb->region);
12922 if (cfg->verbose_level > 2)
12923 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12926 MonoBasicBlock *bb;
12927 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12928 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12929 bb->real_offset = inline_offset;
12933 if (inline_costs < 0) {
12936 /* Method is too large */
12937 mname = mono_method_full_name (method, TRUE);
12938 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12942 if ((cfg->verbose_level > 2) && (cfg->method == method))
12943 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12948 g_assert (!mono_error_ok (&cfg->error));
12952 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12956 set_exception_type_from_invalid_il (cfg, method, ip);
12960 g_slist_free (class_inits);
12961 mono_basic_block_free (original_bb);
12962 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12963 if (cfg->exception_type)
12966 return inline_costs;
12970 store_membase_reg_to_store_membase_imm (int opcode)
12973 case OP_STORE_MEMBASE_REG:
12974 return OP_STORE_MEMBASE_IMM;
12975 case OP_STOREI1_MEMBASE_REG:
12976 return OP_STOREI1_MEMBASE_IMM;
12977 case OP_STOREI2_MEMBASE_REG:
12978 return OP_STOREI2_MEMBASE_IMM;
12979 case OP_STOREI4_MEMBASE_REG:
12980 return OP_STOREI4_MEMBASE_IMM;
12981 case OP_STOREI8_MEMBASE_REG:
12982 return OP_STOREI8_MEMBASE_IMM;
12984 g_assert_not_reached ();
12991 mono_op_to_op_imm (int opcode)
12995 return OP_IADD_IMM;
12997 return OP_ISUB_IMM;
12999 return OP_IDIV_IMM;
13001 return OP_IDIV_UN_IMM;
13003 return OP_IREM_IMM;
13005 return OP_IREM_UN_IMM;
13007 return OP_IMUL_IMM;
13009 return OP_IAND_IMM;
13013 return OP_IXOR_IMM;
13015 return OP_ISHL_IMM;
13017 return OP_ISHR_IMM;
13019 return OP_ISHR_UN_IMM;
13022 return OP_LADD_IMM;
13024 return OP_LSUB_IMM;
13026 return OP_LAND_IMM;
13030 return OP_LXOR_IMM;
13032 return OP_LSHL_IMM;
13034 return OP_LSHR_IMM;
13036 return OP_LSHR_UN_IMM;
13037 #if SIZEOF_REGISTER == 8
13039 return OP_LREM_IMM;
13043 return OP_COMPARE_IMM;
13045 return OP_ICOMPARE_IMM;
13047 return OP_LCOMPARE_IMM;
13049 case OP_STORE_MEMBASE_REG:
13050 return OP_STORE_MEMBASE_IMM;
13051 case OP_STOREI1_MEMBASE_REG:
13052 return OP_STOREI1_MEMBASE_IMM;
13053 case OP_STOREI2_MEMBASE_REG:
13054 return OP_STOREI2_MEMBASE_IMM;
13055 case OP_STOREI4_MEMBASE_REG:
13056 return OP_STOREI4_MEMBASE_IMM;
13058 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13060 return OP_X86_PUSH_IMM;
13061 case OP_X86_COMPARE_MEMBASE_REG:
13062 return OP_X86_COMPARE_MEMBASE_IMM;
13064 #if defined(TARGET_AMD64)
13065 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13066 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13068 case OP_VOIDCALL_REG:
13069 return OP_VOIDCALL;
13077 return OP_LOCALLOC_IMM;
13084 ldind_to_load_membase (int opcode)
13088 return OP_LOADI1_MEMBASE;
13090 return OP_LOADU1_MEMBASE;
13092 return OP_LOADI2_MEMBASE;
13094 return OP_LOADU2_MEMBASE;
13096 return OP_LOADI4_MEMBASE;
13098 return OP_LOADU4_MEMBASE;
13100 return OP_LOAD_MEMBASE;
13101 case CEE_LDIND_REF:
13102 return OP_LOAD_MEMBASE;
13104 return OP_LOADI8_MEMBASE;
13106 return OP_LOADR4_MEMBASE;
13108 return OP_LOADR8_MEMBASE;
13110 g_assert_not_reached ();
13117 stind_to_store_membase (int opcode)
13121 return OP_STOREI1_MEMBASE_REG;
13123 return OP_STOREI2_MEMBASE_REG;
13125 return OP_STOREI4_MEMBASE_REG;
13127 case CEE_STIND_REF:
13128 return OP_STORE_MEMBASE_REG;
13130 return OP_STOREI8_MEMBASE_REG;
13132 return OP_STORER4_MEMBASE_REG;
13134 return OP_STORER8_MEMBASE_REG;
13136 g_assert_not_reached ();
13143 mono_load_membase_to_load_mem (int opcode)
13145 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13146 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13148 case OP_LOAD_MEMBASE:
13149 return OP_LOAD_MEM;
13150 case OP_LOADU1_MEMBASE:
13151 return OP_LOADU1_MEM;
13152 case OP_LOADU2_MEMBASE:
13153 return OP_LOADU2_MEM;
13154 case OP_LOADI4_MEMBASE:
13155 return OP_LOADI4_MEM;
13156 case OP_LOADU4_MEMBASE:
13157 return OP_LOADU4_MEM;
13158 #if SIZEOF_REGISTER == 8
13159 case OP_LOADI8_MEMBASE:
13160 return OP_LOADI8_MEM;
13169 op_to_op_dest_membase (int store_opcode, int opcode)
13171 #if defined(TARGET_X86)
13172 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13177 return OP_X86_ADD_MEMBASE_REG;
13179 return OP_X86_SUB_MEMBASE_REG;
13181 return OP_X86_AND_MEMBASE_REG;
13183 return OP_X86_OR_MEMBASE_REG;
13185 return OP_X86_XOR_MEMBASE_REG;
13188 return OP_X86_ADD_MEMBASE_IMM;
13191 return OP_X86_SUB_MEMBASE_IMM;
13194 return OP_X86_AND_MEMBASE_IMM;
13197 return OP_X86_OR_MEMBASE_IMM;
13200 return OP_X86_XOR_MEMBASE_IMM;
13206 #if defined(TARGET_AMD64)
13207 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13212 return OP_X86_ADD_MEMBASE_REG;
13214 return OP_X86_SUB_MEMBASE_REG;
13216 return OP_X86_AND_MEMBASE_REG;
13218 return OP_X86_OR_MEMBASE_REG;
13220 return OP_X86_XOR_MEMBASE_REG;
13222 return OP_X86_ADD_MEMBASE_IMM;
13224 return OP_X86_SUB_MEMBASE_IMM;
13226 return OP_X86_AND_MEMBASE_IMM;
13228 return OP_X86_OR_MEMBASE_IMM;
13230 return OP_X86_XOR_MEMBASE_IMM;
13232 return OP_AMD64_ADD_MEMBASE_REG;
13234 return OP_AMD64_SUB_MEMBASE_REG;
13236 return OP_AMD64_AND_MEMBASE_REG;
13238 return OP_AMD64_OR_MEMBASE_REG;
13240 return OP_AMD64_XOR_MEMBASE_REG;
13243 return OP_AMD64_ADD_MEMBASE_IMM;
13246 return OP_AMD64_SUB_MEMBASE_IMM;
13249 return OP_AMD64_AND_MEMBASE_IMM;
13252 return OP_AMD64_OR_MEMBASE_IMM;
13255 return OP_AMD64_XOR_MEMBASE_IMM;
13265 op_to_op_store_membase (int store_opcode, int opcode)
13267 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13270 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13271 return OP_X86_SETEQ_MEMBASE;
13273 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13274 return OP_X86_SETNE_MEMBASE;
13282 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13285 /* FIXME: This has sign extension issues */
13287 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13288 return OP_X86_COMPARE_MEMBASE8_IMM;
13291 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13296 return OP_X86_PUSH_MEMBASE;
13297 case OP_COMPARE_IMM:
13298 case OP_ICOMPARE_IMM:
13299 return OP_X86_COMPARE_MEMBASE_IMM;
13302 return OP_X86_COMPARE_MEMBASE_REG;
13306 #ifdef TARGET_AMD64
13307 /* FIXME: This has sign extension issues */
13309 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13310 return OP_X86_COMPARE_MEMBASE8_IMM;
13315 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13316 return OP_X86_PUSH_MEMBASE;
13318 /* FIXME: This only works for 32 bit immediates
13319 case OP_COMPARE_IMM:
13320 case OP_LCOMPARE_IMM:
13321 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13322 return OP_AMD64_COMPARE_MEMBASE_IMM;
13324 case OP_ICOMPARE_IMM:
13325 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13326 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13330 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13331 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13332 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13333 return OP_AMD64_COMPARE_MEMBASE_REG;
13336 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13337 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13346 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13349 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13355 return OP_X86_COMPARE_REG_MEMBASE;
13357 return OP_X86_ADD_REG_MEMBASE;
13359 return OP_X86_SUB_REG_MEMBASE;
13361 return OP_X86_AND_REG_MEMBASE;
13363 return OP_X86_OR_REG_MEMBASE;
13365 return OP_X86_XOR_REG_MEMBASE;
13369 #ifdef TARGET_AMD64
13370 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13373 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13375 return OP_X86_ADD_REG_MEMBASE;
13377 return OP_X86_SUB_REG_MEMBASE;
13379 return OP_X86_AND_REG_MEMBASE;
13381 return OP_X86_OR_REG_MEMBASE;
13383 return OP_X86_XOR_REG_MEMBASE;
13385 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13389 return OP_AMD64_COMPARE_REG_MEMBASE;
13391 return OP_AMD64_ADD_REG_MEMBASE;
13393 return OP_AMD64_SUB_REG_MEMBASE;
13395 return OP_AMD64_AND_REG_MEMBASE;
13397 return OP_AMD64_OR_REG_MEMBASE;
13399 return OP_AMD64_XOR_REG_MEMBASE;
13408 mono_op_to_op_imm_noemul (int opcode)
13411 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13417 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13424 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13429 return mono_op_to_op_imm (opcode);
13434 * mono_handle_global_vregs:
13436 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13440 mono_handle_global_vregs (MonoCompile *cfg)
13442 gint32 *vreg_to_bb;
13443 MonoBasicBlock *bb;
13446 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13448 #ifdef MONO_ARCH_SIMD_INTRINSICS
13449 if (cfg->uses_simd_intrinsics)
13450 mono_simd_simplify_indirection (cfg);
13453 /* Find local vregs used in more than one bb */
13454 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13455 MonoInst *ins = bb->code;
13456 int block_num = bb->block_num;
13458 if (cfg->verbose_level > 2)
13459 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13462 for (; ins; ins = ins->next) {
13463 const char *spec = INS_INFO (ins->opcode);
13464 int regtype = 0, regindex;
13467 if (G_UNLIKELY (cfg->verbose_level > 2))
13468 mono_print_ins (ins);
13470 g_assert (ins->opcode >= MONO_CEE_LAST);
13472 for (regindex = 0; regindex < 4; regindex ++) {
13475 if (regindex == 0) {
13476 regtype = spec [MONO_INST_DEST];
13477 if (regtype == ' ')
13480 } else if (regindex == 1) {
13481 regtype = spec [MONO_INST_SRC1];
13482 if (regtype == ' ')
13485 } else if (regindex == 2) {
13486 regtype = spec [MONO_INST_SRC2];
13487 if (regtype == ' ')
13490 } else if (regindex == 3) {
13491 regtype = spec [MONO_INST_SRC3];
13492 if (regtype == ' ')
13497 #if SIZEOF_REGISTER == 4
13498 /* In the LLVM case, the long opcodes are not decomposed */
13499 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13501 * Since some instructions reference the original long vreg,
13502 * and some reference the two component vregs, it is quite hard
13503 * to determine when it needs to be global. So be conservative.
13505 if (!get_vreg_to_inst (cfg, vreg)) {
13506 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13508 if (cfg->verbose_level > 2)
13509 printf ("LONG VREG R%d made global.\n", vreg);
13513 * Make the component vregs volatile since the optimizations can
13514 * get confused otherwise.
13516 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13517 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13521 g_assert (vreg != -1);
13523 prev_bb = vreg_to_bb [vreg];
13524 if (prev_bb == 0) {
13525 /* 0 is a valid block num */
13526 vreg_to_bb [vreg] = block_num + 1;
13527 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13528 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13531 if (!get_vreg_to_inst (cfg, vreg)) {
13532 if (G_UNLIKELY (cfg->verbose_level > 2))
13533 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13537 if (vreg_is_ref (cfg, vreg))
13538 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13540 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13543 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13546 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13550 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13553 g_assert_not_reached ();
13557 /* Flag as having been used in more than one bb */
13558 vreg_to_bb [vreg] = -1;
13564 /* If a variable is used in only one bblock, convert it into a local vreg */
13565 for (i = 0; i < cfg->num_varinfo; i++) {
13566 MonoInst *var = cfg->varinfo [i];
13567 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13569 switch (var->type) {
13575 #if SIZEOF_REGISTER == 8
13578 #if !defined(TARGET_X86)
13579 /* Enabling this screws up the fp stack on x86 */
13582 if (mono_arch_is_soft_float ())
13586 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13590 /* Arguments are implicitly global */
13591 /* Putting R4 vars into registers doesn't work currently */
13592 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13593 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13595 * Make that the variable's liveness interval doesn't contain a call, since
13596 * that would cause the lvreg to be spilled, making the whole optimization
13599 /* This is too slow for JIT compilation */
13601 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13603 int def_index, call_index, ins_index;
13604 gboolean spilled = FALSE;
13609 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13610 const char *spec = INS_INFO (ins->opcode);
13612 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13613 def_index = ins_index;
13615 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13616 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13617 if (call_index > def_index) {
13623 if (MONO_IS_CALL (ins))
13624 call_index = ins_index;
13634 if (G_UNLIKELY (cfg->verbose_level > 2))
13635 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13636 var->flags |= MONO_INST_IS_DEAD;
13637 cfg->vreg_to_inst [var->dreg] = NULL;
13644 * Compress the varinfo and vars tables so the liveness computation is faster and
13645 * takes up less space.
13648 for (i = 0; i < cfg->num_varinfo; ++i) {
13649 MonoInst *var = cfg->varinfo [i];
13650 if (pos < i && cfg->locals_start == i)
13651 cfg->locals_start = pos;
13652 if (!(var->flags & MONO_INST_IS_DEAD)) {
13654 cfg->varinfo [pos] = cfg->varinfo [i];
13655 cfg->varinfo [pos]->inst_c0 = pos;
13656 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13657 cfg->vars [pos].idx = pos;
13658 #if SIZEOF_REGISTER == 4
13659 if (cfg->varinfo [pos]->type == STACK_I8) {
13660 /* Modify the two component vars too */
13663 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13664 var1->inst_c0 = pos;
13665 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13666 var1->inst_c0 = pos;
13673 cfg->num_varinfo = pos;
13674 if (cfg->locals_start > cfg->num_varinfo)
13675 cfg->locals_start = cfg->num_varinfo;
13679 * mono_allocate_gsharedvt_vars:
13681 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13682 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13685 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13689 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13691 for (i = 0; i < cfg->num_varinfo; ++i) {
13692 MonoInst *ins = cfg->varinfo [i];
13695 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13696 if (i >= cfg->locals_start) {
13698 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13699 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13700 ins->opcode = OP_GSHAREDVT_LOCAL;
13701 ins->inst_imm = idx;
13704 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13705 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13712 * mono_spill_global_vars:
13714 * Generate spill code for variables which are not allocated to registers,
13715 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13716 * code is generated which could be optimized by the local optimization passes.
13719 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13721 MonoBasicBlock *bb;
13723 int orig_next_vreg;
13724 guint32 *vreg_to_lvreg;
13726 guint32 i, lvregs_len, lvregs_size;
13727 gboolean dest_has_lvreg = FALSE;
13728 MonoStackType stacktypes [128];
13729 MonoInst **live_range_start, **live_range_end;
13730 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13732 *need_local_opts = FALSE;
13734 memset (spec2, 0, sizeof (spec2));
13736 /* FIXME: Move this function to mini.c */
13737 stacktypes ['i'] = STACK_PTR;
13738 stacktypes ['l'] = STACK_I8;
13739 stacktypes ['f'] = STACK_R8;
13740 #ifdef MONO_ARCH_SIMD_INTRINSICS
13741 stacktypes ['x'] = STACK_VTYPE;
13744 #if SIZEOF_REGISTER == 4
13745 /* Create MonoInsts for longs */
13746 for (i = 0; i < cfg->num_varinfo; i++) {
13747 MonoInst *ins = cfg->varinfo [i];
13749 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13750 switch (ins->type) {
13755 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13758 g_assert (ins->opcode == OP_REGOFFSET);
13760 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13762 tree->opcode = OP_REGOFFSET;
13763 tree->inst_basereg = ins->inst_basereg;
13764 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13766 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13768 tree->opcode = OP_REGOFFSET;
13769 tree->inst_basereg = ins->inst_basereg;
13770 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13780 if (cfg->compute_gc_maps) {
13781 /* registers need liveness info even for !non refs */
13782 for (i = 0; i < cfg->num_varinfo; i++) {
13783 MonoInst *ins = cfg->varinfo [i];
13785 if (ins->opcode == OP_REGVAR)
13786 ins->flags |= MONO_INST_GC_TRACK;
13790 /* FIXME: widening and truncation */
13793 * As an optimization, when a variable allocated to the stack is first loaded into
13794 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13795 * the variable again.
13797 orig_next_vreg = cfg->next_vreg;
13798 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13799 lvregs_size = 1024;
13800 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13804 * These arrays contain the first and last instructions accessing a given
13806 * Since we emit bblocks in the same order we process them here, and we
13807 * don't split live ranges, these will precisely describe the live range of
13808 * the variable, i.e. the instruction range where a valid value can be found
13809 * in the variables location.
13810 * The live range is computed using the liveness info computed by the liveness pass.
13811 * We can't use vmv->range, since that is an abstract live range, and we need
13812 * one which is instruction precise.
13813 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13815 /* FIXME: Only do this if debugging info is requested */
13816 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13817 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13818 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13819 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13821 /* Add spill loads/stores */
13822 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13825 if (cfg->verbose_level > 2)
13826 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13828 /* Clear vreg_to_lvreg array */
13829 for (i = 0; i < lvregs_len; i++)
13830 vreg_to_lvreg [lvregs [i]] = 0;
13834 MONO_BB_FOR_EACH_INS (bb, ins) {
13835 const char *spec = INS_INFO (ins->opcode);
13836 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13837 gboolean store, no_lvreg;
13838 int sregs [MONO_MAX_SRC_REGS];
13840 if (G_UNLIKELY (cfg->verbose_level > 2))
13841 mono_print_ins (ins);
13843 if (ins->opcode == OP_NOP)
13847 * We handle LDADDR here as well, since it can only be decomposed
13848 * when variable addresses are known.
13850 if (ins->opcode == OP_LDADDR) {
13851 MonoInst *var = (MonoInst *)ins->inst_p0;
13853 if (var->opcode == OP_VTARG_ADDR) {
13854 /* Happens on SPARC/S390 where vtypes are passed by reference */
13855 MonoInst *vtaddr = var->inst_left;
13856 if (vtaddr->opcode == OP_REGVAR) {
13857 ins->opcode = OP_MOVE;
13858 ins->sreg1 = vtaddr->dreg;
13860 else if (var->inst_left->opcode == OP_REGOFFSET) {
13861 ins->opcode = OP_LOAD_MEMBASE;
13862 ins->inst_basereg = vtaddr->inst_basereg;
13863 ins->inst_offset = vtaddr->inst_offset;
13866 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13867 /* gsharedvt arg passed by ref */
13868 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13870 ins->opcode = OP_LOAD_MEMBASE;
13871 ins->inst_basereg = var->inst_basereg;
13872 ins->inst_offset = var->inst_offset;
13873 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13874 MonoInst *load, *load2, *load3;
13875 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13876 int reg1, reg2, reg3;
13877 MonoInst *info_var = cfg->gsharedvt_info_var;
13878 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13882 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13885 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13887 g_assert (info_var);
13888 g_assert (locals_var);
13890 /* Mark the instruction used to compute the locals var as used */
13891 cfg->gsharedvt_locals_var_ins = NULL;
13893 /* Load the offset */
13894 if (info_var->opcode == OP_REGOFFSET) {
13895 reg1 = alloc_ireg (cfg);
13896 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13897 } else if (info_var->opcode == OP_REGVAR) {
13899 reg1 = info_var->dreg;
13901 g_assert_not_reached ();
13903 reg2 = alloc_ireg (cfg);
13904 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13905 /* Load the locals area address */
13906 reg3 = alloc_ireg (cfg);
13907 if (locals_var->opcode == OP_REGOFFSET) {
13908 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13909 } else if (locals_var->opcode == OP_REGVAR) {
13910 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13912 g_assert_not_reached ();
13914 /* Compute the address */
13915 ins->opcode = OP_PADD;
13919 mono_bblock_insert_before_ins (bb, ins, load3);
13920 mono_bblock_insert_before_ins (bb, load3, load2);
13922 mono_bblock_insert_before_ins (bb, load2, load);
13924 g_assert (var->opcode == OP_REGOFFSET);
13926 ins->opcode = OP_ADD_IMM;
13927 ins->sreg1 = var->inst_basereg;
13928 ins->inst_imm = var->inst_offset;
13931 *need_local_opts = TRUE;
13932 spec = INS_INFO (ins->opcode);
13935 if (ins->opcode < MONO_CEE_LAST) {
13936 mono_print_ins (ins);
13937 g_assert_not_reached ();
13941 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13945 if (MONO_IS_STORE_MEMBASE (ins)) {
13946 tmp_reg = ins->dreg;
13947 ins->dreg = ins->sreg2;
13948 ins->sreg2 = tmp_reg;
13951 spec2 [MONO_INST_DEST] = ' ';
13952 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13953 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13954 spec2 [MONO_INST_SRC3] = ' ';
13956 } else if (MONO_IS_STORE_MEMINDEX (ins))
13957 g_assert_not_reached ();
13962 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13963 printf ("\t %.3s %d", spec, ins->dreg);
13964 num_sregs = mono_inst_get_src_registers (ins, sregs);
13965 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13966 printf (" %d", sregs [srcindex]);
13973 regtype = spec [MONO_INST_DEST];
13974 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13977 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13978 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13979 MonoInst *store_ins;
13981 MonoInst *def_ins = ins;
13982 int dreg = ins->dreg; /* The original vreg */
13984 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13986 if (var->opcode == OP_REGVAR) {
13987 ins->dreg = var->dreg;
13988 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13990 * Instead of emitting a load+store, use a _membase opcode.
13992 g_assert (var->opcode == OP_REGOFFSET);
13993 if (ins->opcode == OP_MOVE) {
13997 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13998 ins->inst_basereg = var->inst_basereg;
13999 ins->inst_offset = var->inst_offset;
14002 spec = INS_INFO (ins->opcode);
14006 g_assert (var->opcode == OP_REGOFFSET);
14008 prev_dreg = ins->dreg;
14010 /* Invalidate any previous lvreg for this vreg */
14011 vreg_to_lvreg [ins->dreg] = 0;
14015 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14017 store_opcode = OP_STOREI8_MEMBASE_REG;
14020 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14022 #if SIZEOF_REGISTER != 8
14023 if (regtype == 'l') {
14024 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14025 mono_bblock_insert_after_ins (bb, ins, store_ins);
14026 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14027 mono_bblock_insert_after_ins (bb, ins, store_ins);
14028 def_ins = store_ins;
14033 g_assert (store_opcode != OP_STOREV_MEMBASE);
14035 /* Try to fuse the store into the instruction itself */
14036 /* FIXME: Add more instructions */
14037 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14038 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14039 ins->inst_imm = ins->inst_c0;
14040 ins->inst_destbasereg = var->inst_basereg;
14041 ins->inst_offset = var->inst_offset;
14042 spec = INS_INFO (ins->opcode);
14043 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14044 ins->opcode = store_opcode;
14045 ins->inst_destbasereg = var->inst_basereg;
14046 ins->inst_offset = var->inst_offset;
14050 tmp_reg = ins->dreg;
14051 ins->dreg = ins->sreg2;
14052 ins->sreg2 = tmp_reg;
14055 spec2 [MONO_INST_DEST] = ' ';
14056 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14057 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14058 spec2 [MONO_INST_SRC3] = ' ';
14060 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14061 // FIXME: The backends expect the base reg to be in inst_basereg
14062 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14064 ins->inst_basereg = var->inst_basereg;
14065 ins->inst_offset = var->inst_offset;
14066 spec = INS_INFO (ins->opcode);
14068 /* printf ("INS: "); mono_print_ins (ins); */
14069 /* Create a store instruction */
14070 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14072 /* Insert it after the instruction */
14073 mono_bblock_insert_after_ins (bb, ins, store_ins);
14075 def_ins = store_ins;
14078 * We can't assign ins->dreg to var->dreg here, since the
14079 * sregs could use it. So set a flag, and do it after
14082 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14083 dest_has_lvreg = TRUE;
14088 if (def_ins && !live_range_start [dreg]) {
14089 live_range_start [dreg] = def_ins;
14090 live_range_start_bb [dreg] = bb;
14093 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14096 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14097 tmp->inst_c1 = dreg;
14098 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14105 num_sregs = mono_inst_get_src_registers (ins, sregs);
14106 for (srcindex = 0; srcindex < 3; ++srcindex) {
14107 regtype = spec [MONO_INST_SRC1 + srcindex];
14108 sreg = sregs [srcindex];
14110 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14111 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14112 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14113 MonoInst *use_ins = ins;
14114 MonoInst *load_ins;
14115 guint32 load_opcode;
14117 if (var->opcode == OP_REGVAR) {
14118 sregs [srcindex] = var->dreg;
14119 //mono_inst_set_src_registers (ins, sregs);
14120 live_range_end [sreg] = use_ins;
14121 live_range_end_bb [sreg] = bb;
14123 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14126 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14127 /* var->dreg is a hreg */
14128 tmp->inst_c1 = sreg;
14129 mono_bblock_insert_after_ins (bb, ins, tmp);
14135 g_assert (var->opcode == OP_REGOFFSET);
14137 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14139 g_assert (load_opcode != OP_LOADV_MEMBASE);
14141 if (vreg_to_lvreg [sreg]) {
14142 g_assert (vreg_to_lvreg [sreg] != -1);
14144 /* The variable is already loaded to an lvreg */
14145 if (G_UNLIKELY (cfg->verbose_level > 2))
14146 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14147 sregs [srcindex] = vreg_to_lvreg [sreg];
14148 //mono_inst_set_src_registers (ins, sregs);
14152 /* Try to fuse the load into the instruction */
14153 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14154 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14155 sregs [0] = var->inst_basereg;
14156 //mono_inst_set_src_registers (ins, sregs);
14157 ins->inst_offset = var->inst_offset;
14158 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14159 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14160 sregs [1] = var->inst_basereg;
14161 //mono_inst_set_src_registers (ins, sregs);
14162 ins->inst_offset = var->inst_offset;
14164 if (MONO_IS_REAL_MOVE (ins)) {
14165 ins->opcode = OP_NOP;
14168 //printf ("%d ", srcindex); mono_print_ins (ins);
14170 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14172 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14173 if (var->dreg == prev_dreg) {
14175 * sreg refers to the value loaded by the load
14176 * emitted below, but we need to use ins->dreg
14177 * since it refers to the store emitted earlier.
14181 g_assert (sreg != -1);
14182 vreg_to_lvreg [var->dreg] = sreg;
14183 if (lvregs_len >= lvregs_size) {
14184 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14185 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14186 lvregs = new_lvregs;
14189 lvregs [lvregs_len ++] = var->dreg;
14193 sregs [srcindex] = sreg;
14194 //mono_inst_set_src_registers (ins, sregs);
14196 #if SIZEOF_REGISTER != 8
14197 if (regtype == 'l') {
14198 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14199 mono_bblock_insert_before_ins (bb, ins, load_ins);
14200 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14201 mono_bblock_insert_before_ins (bb, ins, load_ins);
14202 use_ins = load_ins;
14207 #if SIZEOF_REGISTER == 4
14208 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14210 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14211 mono_bblock_insert_before_ins (bb, ins, load_ins);
14212 use_ins = load_ins;
14216 if (var->dreg < orig_next_vreg) {
14217 live_range_end [var->dreg] = use_ins;
14218 live_range_end_bb [var->dreg] = bb;
14221 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14224 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14225 tmp->inst_c1 = var->dreg;
14226 mono_bblock_insert_after_ins (bb, ins, tmp);
14230 mono_inst_set_src_registers (ins, sregs);
14232 if (dest_has_lvreg) {
14233 g_assert (ins->dreg != -1);
14234 vreg_to_lvreg [prev_dreg] = ins->dreg;
14235 if (lvregs_len >= lvregs_size) {
14236 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14237 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14238 lvregs = new_lvregs;
14241 lvregs [lvregs_len ++] = prev_dreg;
14242 dest_has_lvreg = FALSE;
14246 tmp_reg = ins->dreg;
14247 ins->dreg = ins->sreg2;
14248 ins->sreg2 = tmp_reg;
14251 if (MONO_IS_CALL (ins)) {
14252 /* Clear vreg_to_lvreg array */
14253 for (i = 0; i < lvregs_len; i++)
14254 vreg_to_lvreg [lvregs [i]] = 0;
14256 } else if (ins->opcode == OP_NOP) {
14258 MONO_INST_NULLIFY_SREGS (ins);
14261 if (cfg->verbose_level > 2)
14262 mono_print_ins_index (1, ins);
14265 /* Extend the live range based on the liveness info */
14266 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14267 for (i = 0; i < cfg->num_varinfo; i ++) {
14268 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14270 if (vreg_is_volatile (cfg, vi->vreg))
14271 /* The liveness info is incomplete */
14274 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14275 /* Live from at least the first ins of this bb */
14276 live_range_start [vi->vreg] = bb->code;
14277 live_range_start_bb [vi->vreg] = bb;
14280 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14281 /* Live at least until the last ins of this bb */
14282 live_range_end [vi->vreg] = bb->last_ins;
14283 live_range_end_bb [vi->vreg] = bb;
14290 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14291 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14293 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14294 for (i = 0; i < cfg->num_varinfo; ++i) {
14295 int vreg = MONO_VARINFO (cfg, i)->vreg;
14298 if (live_range_start [vreg]) {
14299 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14301 ins->inst_c1 = vreg;
14302 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14304 if (live_range_end [vreg]) {
14305 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14307 ins->inst_c1 = vreg;
14308 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14309 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14311 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14316 if (cfg->gsharedvt_locals_var_ins) {
14317 /* Nullify if unused */
14318 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14319 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14322 g_free (live_range_start);
14323 g_free (live_range_end);
14324 g_free (live_range_start_bb);
14325 g_free (live_range_end_bb);
14331 * - use 'iadd' instead of 'int_add'
14332 * - handling ovf opcodes: decompose in method_to_ir.
14333 * - unify iregs/fregs
14334 * -> partly done, the missing parts are:
14335 * - a more complete unification would involve unifying the hregs as well, so
14336 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14337 * would no longer map to the machine hregs, so the code generators would need to
14338 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14339 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14340 * fp/non-fp branches speeds it up by about 15%.
14341 * - use sext/zext opcodes instead of shifts
14343 * - get rid of TEMPLOADs if possible and use vregs instead
14344 * - clean up usage of OP_P/OP_ opcodes
14345 * - cleanup usage of DUMMY_USE
14346 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14348 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14349 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14350 * - make sure handle_stack_args () is called before the branch is emitted
14351 * - when the new IR is done, get rid of all unused stuff
14352 * - COMPARE/BEQ as separate instructions or unify them ?
14353 * - keeping them separate allows specialized compare instructions like
14354 * compare_imm, compare_membase
14355 * - most back ends unify fp compare+branch, fp compare+ceq
14356 * - integrate mono_save_args into inline_method
14357 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14358 * - handle long shift opts on 32 bit platforms somehow: they require
14359 * 3 sregs (2 for arg1 and 1 for arg2)
14360 * - make byref a 'normal' type.
14361 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14362 * variable if needed.
14363 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14364 * like inline_method.
14365 * - remove inlining restrictions
14366 * - fix LNEG and enable cfold of INEG
14367 * - generalize x86 optimizations like ldelema as a peephole optimization
14368 * - add store_mem_imm for amd64
14369 * - optimize the loading of the interruption flag in the managed->native wrappers
14370 * - avoid special handling of OP_NOP in passes
14371 * - move code inserting instructions into one function/macro.
14372 * - try a coalescing phase after liveness analysis
14373 * - add float -> vreg conversion + local optimizations on !x86
14374 * - figure out how to handle decomposed branches during optimizations, ie.
14375 * compare+branch, op_jump_table+op_br etc.
14376 * - promote RuntimeXHandles to vregs
14377 * - vtype cleanups:
14378 * - add a NEW_VARLOADA_VREG macro
14379 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14380 * accessing vtype fields.
14381 * - get rid of I8CONST on 64 bit platforms
14382 * - dealing with the increase in code size due to branches created during opcode
14384 * - use extended basic blocks
14385 * - all parts of the JIT
14386 * - handle_global_vregs () && local regalloc
14387 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14388 * - sources of increase in code size:
14391 * - isinst and castclass
14392 * - lvregs not allocated to global registers even if used multiple times
14393 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14395 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14396 * - add all micro optimizations from the old JIT
14397 * - put tree optimizations into the deadce pass
14398 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14399 * specific function.
14400 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14401 * fcompare + branchCC.
14402 * - create a helper function for allocating a stack slot, taking into account
14403 * MONO_CFG_HAS_SPILLUP.
14405 * - merge the ia64 switch changes.
14406 * - optimize mono_regstate2_alloc_int/float.
14407 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14408 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14409 * parts of the tree could be separated by other instructions, killing the tree
14410 * arguments, or stores killing loads etc. Also, should we fold loads into other
14411 * instructions if the result of the load is used multiple times ?
14412 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14413 * - LAST MERGE: 108395.
14414 * - when returning vtypes in registers, generate IR and append it to the end of the
14415 * last bb instead of doing it in the epilog.
14416 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14424 - When to decompose opcodes:
14425 - earlier: this makes some optimizations hard to implement, since the low level IR
14426 no longer contains the neccessary information. But it is easier to do.
14427 - later: harder to implement, enables more optimizations.
14428 - Branches inside bblocks:
14429 - created when decomposing complex opcodes.
14430 - branches to another bblock: harmless, but not tracked by the branch
14431 optimizations, so need to branch to a label at the start of the bblock.
14432 - branches to inside the same bblock: very problematic, trips up the local
14433 reg allocator. Can be fixed by spitting the current bblock, but that is a
14434 complex operation, since some local vregs can become global vregs etc.
14435 - Local/global vregs:
14436 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14437 local register allocator.
14438 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14439 structure, created by mono_create_var (). Assigned to hregs or the stack by
14440 the global register allocator.
14441 - When to do optimizations like alu->alu_imm:
14442 - earlier -> saves work later on since the IR will be smaller/simpler
14443 - later -> can work on more instructions
14444 - Handling of valuetypes:
14445 - When a vtype is pushed on the stack, a new temporary is created, an
14446 instruction computing its address (LDADDR) is emitted and pushed on
14447 the stack. Need to optimize cases when the vtype is used immediately as in
14448 argument passing, stloc etc.
14449 - Instead of the to_end stuff in the old JIT, simply call the function handling
14450 the values on the stack before emitting the last instruction of the bb.
14453 #else /* !DISABLE_JIT */
14456 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14460 #endif /* !DISABLE_JIT */