3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1166 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1172 param_table [STACK_MAX] [STACK_MAX] = {
1177 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1182 switch (args->type) {
1192 for (i = 0; i < sig->param_count; ++i) {
1193 switch (args [i].type) {
1197 if (!sig->params [i]->byref)
1201 if (sig->params [i]->byref)
1203 switch (sig->params [i]->type) {
1204 case MONO_TYPE_CLASS:
1205 case MONO_TYPE_STRING:
1206 case MONO_TYPE_OBJECT:
1207 case MONO_TYPE_SZARRAY:
1208 case MONO_TYPE_ARRAY:
1215 if (sig->params [i]->byref)
1217 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1226 /*if (!param_table [args [i].type] [sig->params [i]->type])
1234 * When we need a pointer to the current domain many times in a method, we
1235 * call mono_domain_get() once and we store the result in a local variable.
1236 * This function returns the variable that represents the MonoDomain*.
1238 inline static MonoInst *
1239 mono_get_domainvar (MonoCompile *cfg)
1241 if (!cfg->domainvar)
1242 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1243 return cfg->domainvar;
1247 * The got_var contains the address of the Global Offset Table when AOT
1251 mono_get_got_var (MonoCompile *cfg)
1253 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1255 if (!cfg->got_var) {
1256 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1258 return cfg->got_var;
1262 mono_create_rgctx_var (MonoCompile *cfg)
1264 if (!cfg->rgctx_var) {
1265 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1266 /* force the var to be stack allocated */
1267 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1272 mono_get_vtable_var (MonoCompile *cfg)
1274 g_assert (cfg->gshared);
1276 mono_create_rgctx_var (cfg);
1278 return cfg->rgctx_var;
1282 type_from_stack_type (MonoInst *ins) {
1283 switch (ins->type) {
1284 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1285 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1286 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1287 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1288 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1290 return &ins->klass->this_arg;
1291 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1292 case STACK_VTYPE: return &ins->klass->byval_arg;
1294 g_error ("stack type %d to monotype not handled\n", ins->type);
1299 static G_GNUC_UNUSED int
1300 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1302 t = mono_type_get_underlying_type (t);
1314 case MONO_TYPE_FNPTR:
1316 case MONO_TYPE_CLASS:
1317 case MONO_TYPE_STRING:
1318 case MONO_TYPE_OBJECT:
1319 case MONO_TYPE_SZARRAY:
1320 case MONO_TYPE_ARRAY:
1326 return cfg->r4_stack_type;
1329 case MONO_TYPE_VALUETYPE:
1330 case MONO_TYPE_TYPEDBYREF:
1332 case MONO_TYPE_GENERICINST:
1333 if (mono_type_generic_inst_is_valuetype (t))
1339 g_assert_not_reached ();
1346 array_access_to_klass (int opcode)
1350 return mono_defaults.byte_class;
1352 return mono_defaults.uint16_class;
1355 return mono_defaults.int_class;
1358 return mono_defaults.sbyte_class;
1361 return mono_defaults.int16_class;
1364 return mono_defaults.int32_class;
1366 return mono_defaults.uint32_class;
1369 return mono_defaults.int64_class;
1372 return mono_defaults.single_class;
1375 return mono_defaults.double_class;
1376 case CEE_LDELEM_REF:
1377 case CEE_STELEM_REF:
1378 return mono_defaults.object_class;
1380 g_assert_not_reached ();
1386 * We try to share variables when possible
1389 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1394 /* inlining can result in deeper stacks */
1395 if (slot >= cfg->header->max_stack)
1396 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1398 pos = ins->type - 1 + slot * STACK_MAX;
1400 switch (ins->type) {
1407 if ((vnum = cfg->intvars [pos]))
1408 return cfg->varinfo [vnum];
1409 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1410 cfg->intvars [pos] = res->inst_c0;
1413 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1419 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1422 * Don't use this if a generic_context is set, since that means AOT can't
1423 * look up the method using just the image+token.
1424 * table == 0 means this is a reference made from a wrapper.
1426 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1427 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1428 jump_info_token->image = image;
1429 jump_info_token->token = token;
1430 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1435 * This function is called to handle items that are left on the evaluation stack
1436 * at basic block boundaries. What happens is that we save the values to local variables
1437 * and we reload them later when first entering the target basic block (with the
1438 * handle_loaded_temps () function).
1439 * A single joint point will use the same variables (stored in the array bb->out_stack or
1440 * bb->in_stack, if the basic block is before or after the joint point).
1442 * This function needs to be called _before_ emitting the last instruction of
1443 * the bb (i.e. before emitting a branch).
1444 * If the stack merge fails at a join point, cfg->unverifiable is set.
1447 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1450 MonoBasicBlock *bb = cfg->cbb;
1451 MonoBasicBlock *outb;
1452 MonoInst *inst, **locals;
1457 if (cfg->verbose_level > 3)
1458 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1459 if (!bb->out_scount) {
1460 bb->out_scount = count;
1461 //printf ("bblock %d has out:", bb->block_num);
1463 for (i = 0; i < bb->out_count; ++i) {
1464 outb = bb->out_bb [i];
1465 /* exception handlers are linked, but they should not be considered for stack args */
1466 if (outb->flags & BB_EXCEPTION_HANDLER)
1468 //printf (" %d", outb->block_num);
1469 if (outb->in_stack) {
1471 bb->out_stack = outb->in_stack;
1477 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1478 for (i = 0; i < count; ++i) {
1480 * try to reuse temps already allocated for this purpouse, if they occupy the same
1481 * stack slot and if they are of the same type.
1482 * This won't cause conflicts since if 'local' is used to
1483 * store one of the values in the in_stack of a bblock, then
1484 * the same variable will be used for the same outgoing stack
1486 * This doesn't work when inlining methods, since the bblocks
1487 * in the inlined methods do not inherit their in_stack from
1488 * the bblock they are inlined to. See bug #58863 for an
1491 if (cfg->inlined_method)
1492 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1494 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1499 for (i = 0; i < bb->out_count; ++i) {
1500 outb = bb->out_bb [i];
1501 /* exception handlers are linked, but they should not be considered for stack args */
1502 if (outb->flags & BB_EXCEPTION_HANDLER)
1504 if (outb->in_scount) {
1505 if (outb->in_scount != bb->out_scount) {
1506 cfg->unverifiable = TRUE;
1509 continue; /* check they are the same locals */
1511 outb->in_scount = count;
1512 outb->in_stack = bb->out_stack;
1515 locals = bb->out_stack;
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1526 * It is possible that the out bblocks already have in_stack assigned, and
1527 * the in_stacks differ. In this case, we will store to all the different
1534 /* Find a bblock which has a different in_stack */
1536 while (bindex < bb->out_count) {
1537 outb = bb->out_bb [bindex];
1538 /* exception handlers are linked, but they should not be considered for stack args */
1539 if (outb->flags & BB_EXCEPTION_HANDLER) {
1543 if (outb->in_stack != locals) {
1544 for (i = 0; i < count; ++i) {
1545 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1546 inst->cil_code = sp [i]->cil_code;
1547 sp [i] = locals [i];
1548 if (cfg->verbose_level > 3)
1549 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1551 locals = outb->in_stack;
1561 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1565 if (cfg->compile_aot) {
1566 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1572 ji.type = patch_type;
1573 ji.data.target = data;
1574 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1575 mono_error_assert_ok (&error);
1577 EMIT_NEW_PCONST (cfg, ins, target);
1583 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1587 g_assert (val == 0);
1592 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1595 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1598 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1601 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1603 #if SIZEOF_REGISTER == 8
1605 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1611 val_reg = alloc_preg (cfg);
1613 if (SIZEOF_REGISTER == 8)
1614 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1616 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1619 /* This could be optimized further if neccesary */
1621 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1628 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1642 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1647 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1652 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1659 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1666 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1667 g_assert (size < 10000);
1670 /* This could be optimized further if neccesary */
1672 cur_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1681 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1683 cur_reg = alloc_preg (cfg);
1684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1693 cur_reg = alloc_preg (cfg);
1694 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1695 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1701 cur_reg = alloc_preg (cfg);
1702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1719 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1721 int tls_offset = mono_tls_get_tls_offset (key);
1723 if (cfg->compile_aot)
1726 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1728 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1729 ins->dreg = mono_alloc_preg (cfg);
1730 ins->inst_offset = tls_offset;
1737 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1739 int tls_offset = mono_tls_get_tls_offset (key);
1741 if (cfg->compile_aot)
1744 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1746 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1747 ins->sreg1 = value->dreg;
1748 ins->inst_offset = tls_offset;
1756 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1758 MonoInst *fast_tls = NULL;
1760 if (!mini_get_debug_options ()->use_fallback_tls)
1761 fast_tls = mono_create_fast_tls_getter (cfg, key);
1764 MONO_ADD_INS (cfg->cbb, fast_tls);
1768 if (cfg->compile_aot) {
1771 * tls getters are critical pieces of code and we don't want to resolve them
1772 * through the standard plt/tramp mechanism since we might expose ourselves
1773 * to crashes and infinite recursions.
1775 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1776 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1778 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1779 return mono_emit_jit_icall (cfg, getter, NULL);
1784 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1786 MonoInst *fast_tls = NULL;
1788 if (!mini_get_debug_options ()->use_fallback_tls)
1789 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1792 MONO_ADD_INS (cfg->cbb, fast_tls);
1796 if (cfg->compile_aot) {
1798 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1799 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1801 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1802 return mono_emit_jit_icall (cfg, setter, &value);
1809 * Emit IR to push the current LMF onto the LMF stack.
1812 emit_push_lmf (MonoCompile *cfg)
1815 * Emit IR to push the LMF:
1816 * lmf_addr = <lmf_addr from tls>
1817 * lmf->lmf_addr = lmf_addr
1818 * lmf->prev_lmf = *lmf_addr
1821 MonoInst *ins, *lmf_ins;
1826 int lmf_reg, prev_lmf_reg;
1828 * Store lmf_addr in a variable, so it can be allocated to a global register.
1830 if (!cfg->lmf_addr_var)
1831 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1834 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1836 int jit_tls_dreg = ins->dreg;
1838 lmf_reg = alloc_preg (cfg);
1839 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1841 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1844 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1846 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1847 lmf_reg = ins->dreg;
1849 prev_lmf_reg = alloc_preg (cfg);
1850 /* Save previous_lmf */
1851 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1852 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1854 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1860 * Emit IR to pop the current LMF from the LMF stack.
1863 emit_pop_lmf (MonoCompile *cfg)
1865 int lmf_reg, lmf_addr_reg;
1871 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1872 lmf_reg = ins->dreg;
1876 * Emit IR to pop the LMF:
1877 * *(lmf->lmf_addr) = lmf->prev_lmf
1879 /* This could be called before emit_push_lmf () */
1880 if (!cfg->lmf_addr_var)
1881 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1882 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1884 prev_lmf_reg = alloc_preg (cfg);
1885 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1886 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1890 emit_instrumentation_call (MonoCompile *cfg, void *func)
1892 MonoInst *iargs [1];
1895 * Avoid instrumenting inlined methods since it can
1896 * distort profiling results.
1898 if (cfg->method != cfg->current_method)
1901 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1902 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1903 mono_emit_jit_icall (cfg, func, iargs);
1908 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1911 type = mini_get_underlying_type (type);
1912 switch (type->type) {
1913 case MONO_TYPE_VOID:
1914 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1921 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1925 case MONO_TYPE_FNPTR:
1926 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1927 case MONO_TYPE_CLASS:
1928 case MONO_TYPE_STRING:
1929 case MONO_TYPE_OBJECT:
1930 case MONO_TYPE_SZARRAY:
1931 case MONO_TYPE_ARRAY:
1932 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1935 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1938 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1940 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1942 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1943 case MONO_TYPE_VALUETYPE:
1944 if (type->data.klass->enumtype) {
1945 type = mono_class_enum_basetype (type->data.klass);
1948 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1949 case MONO_TYPE_TYPEDBYREF:
1950 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1951 case MONO_TYPE_GENERICINST:
1952 type = &type->data.generic_class->container_class->byval_arg;
1955 case MONO_TYPE_MVAR:
1957 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1959 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1964 //XXX this ignores if t is byref
1965 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1968 * target_type_is_incompatible:
1969 * @cfg: MonoCompile context
1971 * Check that the item @arg on the evaluation stack can be stored
1972 * in the target type (can be a local, or field, etc).
1973 * The cfg arg can be used to check if we need verification or just
1976 * Returns: non-0 value if arg can't be stored on a target.
1979 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1981 MonoType *simple_type;
1984 if (target->byref) {
1985 /* FIXME: check that the pointed to types match */
1986 if (arg->type == STACK_MP) {
1987 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1988 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1989 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1991 /* if the target is native int& or same type */
1992 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1995 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1996 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1997 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2001 if (arg->type == STACK_PTR)
2006 simple_type = mini_get_underlying_type (target);
2007 switch (simple_type->type) {
2008 case MONO_TYPE_VOID:
2016 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2020 /* STACK_MP is needed when setting pinned locals */
2021 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2026 case MONO_TYPE_FNPTR:
2028 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2029 * in native int. (#688008).
2031 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 if (arg->type != STACK_OBJ)
2041 /* FIXME: check type compatibility */
2045 if (arg->type != STACK_I8)
2049 if (arg->type != cfg->r4_stack_type)
2053 if (arg->type != STACK_R8)
2056 case MONO_TYPE_VALUETYPE:
2057 if (arg->type != STACK_VTYPE)
2059 klass = mono_class_from_mono_type (simple_type);
2060 if (klass != arg->klass)
2063 case MONO_TYPE_TYPEDBYREF:
2064 if (arg->type != STACK_VTYPE)
2066 klass = mono_class_from_mono_type (simple_type);
2067 if (klass != arg->klass)
2070 case MONO_TYPE_GENERICINST:
2071 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2072 MonoClass *target_class;
2073 if (arg->type != STACK_VTYPE)
2075 klass = mono_class_from_mono_type (simple_type);
2076 target_class = mono_class_from_mono_type (target);
2077 /* The second cases is needed when doing partial sharing */
2078 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2082 if (arg->type != STACK_OBJ)
2084 /* FIXME: check type compatibility */
2088 case MONO_TYPE_MVAR:
2089 g_assert (cfg->gshared);
2090 if (mini_type_var_is_vt (simple_type)) {
2091 if (arg->type != STACK_VTYPE)
2094 if (arg->type != STACK_OBJ)
2099 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2105 * Prepare arguments for passing to a function call.
2106 * Return a non-zero value if the arguments can't be passed to the given
2108 * The type checks are not yet complete and some conversions may need
2109 * casts on 32 or 64 bit architectures.
2111 * FIXME: implement this using target_type_is_incompatible ()
2114 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2116 MonoType *simple_type;
2120 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2124 for (i = 0; i < sig->param_count; ++i) {
2125 if (sig->params [i]->byref) {
2126 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2130 simple_type = mini_get_underlying_type (sig->params [i]);
2132 switch (simple_type->type) {
2133 case MONO_TYPE_VOID:
2142 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2148 case MONO_TYPE_FNPTR:
2149 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2152 case MONO_TYPE_CLASS:
2153 case MONO_TYPE_STRING:
2154 case MONO_TYPE_OBJECT:
2155 case MONO_TYPE_SZARRAY:
2156 case MONO_TYPE_ARRAY:
2157 if (args [i]->type != STACK_OBJ)
2162 if (args [i]->type != STACK_I8)
2166 if (args [i]->type != cfg->r4_stack_type)
2170 if (args [i]->type != STACK_R8)
2173 case MONO_TYPE_VALUETYPE:
2174 if (simple_type->data.klass->enumtype) {
2175 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2178 if (args [i]->type != STACK_VTYPE)
2181 case MONO_TYPE_TYPEDBYREF:
2182 if (args [i]->type != STACK_VTYPE)
2185 case MONO_TYPE_GENERICINST:
2186 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2189 case MONO_TYPE_MVAR:
2191 if (args [i]->type != STACK_VTYPE)
2195 g_error ("unknown type 0x%02x in check_call_signature",
2203 callvirt_to_call (int opcode)
2206 case OP_CALL_MEMBASE:
2208 case OP_VOIDCALL_MEMBASE:
2210 case OP_FCALL_MEMBASE:
2212 case OP_RCALL_MEMBASE:
2214 case OP_VCALL_MEMBASE:
2216 case OP_LCALL_MEMBASE:
2219 g_assert_not_reached ();
2226 callvirt_to_call_reg (int opcode)
2229 case OP_CALL_MEMBASE:
2231 case OP_VOIDCALL_MEMBASE:
2232 return OP_VOIDCALL_REG;
2233 case OP_FCALL_MEMBASE:
2234 return OP_FCALL_REG;
2235 case OP_RCALL_MEMBASE:
2236 return OP_RCALL_REG;
2237 case OP_VCALL_MEMBASE:
2238 return OP_VCALL_REG;
2239 case OP_LCALL_MEMBASE:
2240 return OP_LCALL_REG;
2242 g_assert_not_reached ();
2248 /* Either METHOD or IMT_ARG needs to be set */
2250 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2254 if (COMPILE_LLVM (cfg)) {
2256 method_reg = alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2259 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2260 method_reg = ins->dreg;
2264 call->imt_arg_reg = method_reg;
2266 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2271 method_reg = alloc_preg (cfg);
2272 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2274 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2275 method_reg = ins->dreg;
2278 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2281 static MonoJumpInfo *
2282 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2284 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2288 ji->data.target = target;
2294 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2297 return mono_class_check_context_used (klass);
2303 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2306 return mono_method_check_context_used (method);
2312 * check_method_sharing:
2314 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2317 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2319 gboolean pass_vtable = FALSE;
2320 gboolean pass_mrgctx = FALSE;
2322 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2323 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2324 gboolean sharable = FALSE;
2326 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2330 * Pass vtable iff target method might
2331 * be shared, which means that sharing
2332 * is enabled for its class and its
2333 * context is sharable (and it's not a
2336 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2340 if (mini_method_get_context (cmethod) &&
2341 mini_method_get_context (cmethod)->method_inst) {
2342 g_assert (!pass_vtable);
2344 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2347 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2352 if (out_pass_vtable)
2353 *out_pass_vtable = pass_vtable;
2354 if (out_pass_mrgctx)
2355 *out_pass_mrgctx = pass_mrgctx;
2358 inline static MonoCallInst *
2359 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2360 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2364 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2372 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2374 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2376 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2379 call->signature = sig;
2380 call->rgctx_reg = rgctx;
2381 sig_ret = mini_get_underlying_type (sig->ret);
2383 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2386 if (mini_type_is_vtype (sig_ret)) {
2387 call->vret_var = cfg->vret_addr;
2388 //g_assert_not_reached ();
2390 } else if (mini_type_is_vtype (sig_ret)) {
2391 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2394 temp->backend.is_pinvoke = sig->pinvoke;
2397 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2398 * address of return value to increase optimization opportunities.
2399 * Before vtype decomposition, the dreg of the call ins itself represents the
2400 * fact the call modifies the return value. After decomposition, the call will
2401 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2402 * will be transformed into an LDADDR.
2404 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2405 loada->dreg = alloc_preg (cfg);
2406 loada->inst_p0 = temp;
2407 /* We reference the call too since call->dreg could change during optimization */
2408 loada->inst_p1 = call;
2409 MONO_ADD_INS (cfg->cbb, loada);
2411 call->inst.dreg = temp->dreg;
2413 call->vret_var = loada;
2414 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2415 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2417 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2418 if (COMPILE_SOFT_FLOAT (cfg)) {
2420 * If the call has a float argument, we would need to do an r8->r4 conversion using
2421 * an icall, but that cannot be done during the call sequence since it would clobber
2422 * the call registers + the stack. So we do it before emitting the call.
2424 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2426 MonoInst *in = call->args [i];
2428 if (i >= sig->hasthis)
2429 t = sig->params [i - sig->hasthis];
2431 t = &mono_defaults.int_class->byval_arg;
2432 t = mono_type_get_underlying_type (t);
2434 if (!t->byref && t->type == MONO_TYPE_R4) {
2435 MonoInst *iargs [1];
2439 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2441 /* The result will be in an int vreg */
2442 call->args [i] = conv;
2448 call->need_unbox_trampoline = unbox_trampoline;
2451 if (COMPILE_LLVM (cfg))
2452 mono_llvm_emit_call (cfg, call);
2454 mono_arch_emit_call (cfg, call);
2456 mono_arch_emit_call (cfg, call);
2459 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2460 cfg->flags |= MONO_CFG_HAS_CALLS;
2466 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2468 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2469 cfg->uses_rgctx_reg = TRUE;
2470 call->rgctx_reg = TRUE;
2472 call->rgctx_arg_reg = rgctx_reg;
2477 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2482 gboolean check_sp = FALSE;
2484 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2485 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2487 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2492 rgctx_reg = mono_alloc_preg (cfg);
2493 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2497 if (!cfg->stack_inbalance_var)
2498 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2500 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2501 ins->dreg = cfg->stack_inbalance_var->dreg;
2502 MONO_ADD_INS (cfg->cbb, ins);
2505 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2507 call->inst.sreg1 = addr->dreg;
2510 emit_imt_argument (cfg, call, NULL, imt_arg);
2512 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2517 sp_reg = mono_alloc_preg (cfg);
2519 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2521 MONO_ADD_INS (cfg->cbb, ins);
2523 /* Restore the stack so we don't crash when throwing the exception */
2524 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2525 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2526 MONO_ADD_INS (cfg->cbb, ins);
2528 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2529 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2533 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2535 return (MonoInst*)call;
2539 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2542 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2543 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2545 #ifndef DISABLE_REMOTING
2546 gboolean might_be_remote = FALSE;
2548 gboolean virtual_ = this_ins != NULL;
2549 gboolean enable_for_aot = TRUE;
2552 MonoInst *call_target = NULL;
2554 gboolean need_unbox_trampoline;
2557 sig = mono_method_signature (method);
2559 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2560 g_assert_not_reached ();
2563 rgctx_reg = mono_alloc_preg (cfg);
2564 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2567 if (method->string_ctor) {
2568 /* Create the real signature */
2569 /* FIXME: Cache these */
2570 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2571 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2576 context_used = mini_method_check_context_used (cfg, method);
2578 #ifndef DISABLE_REMOTING
2579 might_be_remote = this_ins && sig->hasthis &&
2580 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2581 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2583 if (might_be_remote && context_used) {
2586 g_assert (cfg->gshared);
2588 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2590 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2594 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2595 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2597 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2599 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2601 #ifndef DISABLE_REMOTING
2602 if (might_be_remote)
2603 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2606 call->method = method;
2607 call->inst.flags |= MONO_INST_HAS_METHOD;
2608 call->inst.inst_left = this_ins;
2609 call->tail_call = tail;
2612 int vtable_reg, slot_reg, this_reg;
2615 this_reg = this_ins->dreg;
2617 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2618 MonoInst *dummy_use;
2620 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2622 /* Make a call to delegate->invoke_impl */
2623 call->inst.inst_basereg = this_reg;
2624 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2625 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2627 /* We must emit a dummy use here because the delegate trampoline will
2628 replace the 'this' argument with the delegate target making this activation
2629 no longer a root for the delegate.
2630 This is an issue for delegates that target collectible code such as dynamic
2631 methods of GC'able assemblies.
2633 For a test case look into #667921.
2635 FIXME: a dummy use is not the best way to do it as the local register allocator
2636 will put it on a caller save register and spil it around the call.
2637 Ideally, we would either put it on a callee save register or only do the store part.
2639 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2641 return (MonoInst*)call;
2644 if ((!cfg->compile_aot || enable_for_aot) &&
2645 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2646 (MONO_METHOD_IS_FINAL (method) &&
2647 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2648 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2650 * the method is not virtual, we just need to ensure this is not null
2651 * and then we can call the method directly.
2653 #ifndef DISABLE_REMOTING
2654 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2656 * The check above ensures method is not gshared, this is needed since
2657 * gshared methods can't have wrappers.
2659 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2663 if (!method->string_ctor)
2664 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2666 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2667 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2669 * the method is virtual, but we can statically dispatch since either
2670 * it's class or the method itself are sealed.
2671 * But first we need to ensure it's not a null reference.
2673 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2675 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2676 } else if (call_target) {
2677 vtable_reg = alloc_preg (cfg);
2678 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2680 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2681 call->inst.sreg1 = call_target->dreg;
2682 call->inst.flags &= !MONO_INST_HAS_METHOD;
2684 vtable_reg = alloc_preg (cfg);
2685 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2686 if (mono_class_is_interface (method->klass)) {
2687 guint32 imt_slot = mono_method_get_imt_slot (method);
2688 emit_imt_argument (cfg, call, call->method, imt_arg);
2689 slot_reg = vtable_reg;
2690 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2692 slot_reg = vtable_reg;
2693 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2694 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2696 g_assert (mono_method_signature (method)->generic_param_count);
2697 emit_imt_argument (cfg, call, call->method, imt_arg);
2701 call->inst.sreg1 = slot_reg;
2702 call->inst.inst_offset = offset;
2703 call->is_virtual = TRUE;
2707 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2710 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2712 return (MonoInst*)call;
2716 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2718 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2722 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2729 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2732 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2734 return (MonoInst*)call;
2738 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2740 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2744 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2748 * mono_emit_abs_call:
2750 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2752 inline static MonoInst*
2753 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2754 MonoMethodSignature *sig, MonoInst **args)
2756 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2760 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2763 if (cfg->abs_patches == NULL)
2764 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2765 g_hash_table_insert (cfg->abs_patches, ji, ji);
2766 ins = mono_emit_native_call (cfg, ji, sig, args);
2767 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2771 static MonoMethodSignature*
2772 sig_to_rgctx_sig (MonoMethodSignature *sig)
2774 // FIXME: memory allocation
2775 MonoMethodSignature *res;
2778 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2779 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2780 res->param_count = sig->param_count + 1;
2781 for (i = 0; i < sig->param_count; ++i)
2782 res->params [i] = sig->params [i];
2783 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2787 /* Make an indirect call to FSIG passing an additional argument */
2789 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2791 MonoMethodSignature *csig;
2792 MonoInst *args_buf [16];
2794 int i, pindex, tmp_reg;
2796 /* Make a call with an rgctx/extra arg */
2797 if (fsig->param_count + 2 < 16)
2800 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2803 args [pindex ++] = orig_args [0];
2804 for (i = 0; i < fsig->param_count; ++i)
2805 args [pindex ++] = orig_args [fsig->hasthis + i];
2806 tmp_reg = alloc_preg (cfg);
2807 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2808 csig = sig_to_rgctx_sig (fsig);
2809 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2812 /* Emit an indirect call to the function descriptor ADDR */
2814 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2816 int addr_reg, arg_reg;
2817 MonoInst *call_target;
2819 g_assert (cfg->llvm_only);
2822 * addr points to a <addr, arg> pair, load both of them, and
2823 * make a call to addr, passing arg as an extra arg.
2825 addr_reg = alloc_preg (cfg);
2826 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2827 arg_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2830 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2834 direct_icalls_enabled (MonoCompile *cfg)
2838 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2840 if (cfg->compile_llvm && !cfg->llvm_only)
2843 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2849 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2852 * Call the jit icall without a wrapper if possible.
2853 * The wrapper is needed for the following reasons:
2854 * - to handle exceptions thrown using mono_raise_exceptions () from the
2855 * icall function. The EH code needs the lmf frame pushed by the
2856 * wrapper to be able to unwind back to managed code.
2857 * - to be able to do stack walks for asynchronously suspended
2858 * threads when debugging.
2860 if (info->no_raise && direct_icalls_enabled (cfg)) {
2864 if (!info->wrapper_method) {
2865 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2866 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2868 mono_memory_barrier ();
2872 * Inline the wrapper method, which is basically a call to the C icall, and
2873 * an exception check.
2875 costs = inline_method (cfg, info->wrapper_method, NULL,
2876 args, NULL, il_offset, TRUE);
2877 g_assert (costs > 0);
2878 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2882 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2887 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2889 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2890 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2894 * Native code might return non register sized integers
2895 * without initializing the upper bits.
2897 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2898 case OP_LOADI1_MEMBASE:
2899 widen_op = OP_ICONV_TO_I1;
2901 case OP_LOADU1_MEMBASE:
2902 widen_op = OP_ICONV_TO_U1;
2904 case OP_LOADI2_MEMBASE:
2905 widen_op = OP_ICONV_TO_I2;
2907 case OP_LOADU2_MEMBASE:
2908 widen_op = OP_ICONV_TO_U2;
2914 if (widen_op != -1) {
2915 int dreg = alloc_preg (cfg);
2918 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2919 widen->type = ins->type;
2930 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2932 MonoInst *args [16];
2934 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2935 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2937 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2941 mini_get_memcpy_method (void)
2943 static MonoMethod *memcpy_method = NULL;
2944 if (!memcpy_method) {
2945 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2947 g_error ("Old corlib found. Install a new one");
2949 return memcpy_method;
2953 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2955 MonoClassField *field;
2956 gpointer iter = NULL;
2958 while ((field = mono_class_get_fields (klass, &iter))) {
2961 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2963 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2964 if (mini_type_is_reference (mono_field_get_type (field))) {
2965 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2966 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2968 MonoClass *field_class = mono_class_from_mono_type (field->type);
2969 if (field_class->has_references)
2970 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2976 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2978 int card_table_shift_bits;
2979 gpointer card_table_mask;
2981 MonoInst *dummy_use;
2982 int nursery_shift_bits;
2983 size_t nursery_size;
2985 if (!cfg->gen_write_barriers)
2988 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2990 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2992 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2995 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2996 wbarrier->sreg1 = ptr->dreg;
2997 wbarrier->sreg2 = value->dreg;
2998 MONO_ADD_INS (cfg->cbb, wbarrier);
2999 } else if (card_table) {
3000 int offset_reg = alloc_preg (cfg);
3005 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
3006 * collector case, so, for the serial collector, it might slightly slow down nursery
3007 * collections. We also expect that the host system and the target system have the same card
3008 * table configuration, which is the case if they have the same pointer size.
3011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3012 if (card_table_mask)
3013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3015 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3016 * IMM's larger than 32bits.
3018 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3019 card_reg = ins->dreg;
3021 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3022 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3024 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3025 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3028 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3032 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3034 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3035 unsigned need_wb = 0;
3040 /*types with references can't have alignment smaller than sizeof(void*) */
3041 if (align < SIZEOF_VOID_P)
3044 if (size > 5 * SIZEOF_VOID_P)
3047 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3049 destreg = iargs [0]->dreg;
3050 srcreg = iargs [1]->dreg;
3053 dest_ptr_reg = alloc_preg (cfg);
3054 tmp_reg = alloc_preg (cfg);
3057 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3059 while (size >= SIZEOF_VOID_P) {
3060 MonoInst *load_inst;
3061 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3062 load_inst->dreg = tmp_reg;
3063 load_inst->inst_basereg = srcreg;
3064 load_inst->inst_offset = offset;
3065 MONO_ADD_INS (cfg->cbb, load_inst);
3067 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3070 mini_emit_write_barrier (cfg, iargs [0], load_inst);
3072 offset += SIZEOF_VOID_P;
3073 size -= SIZEOF_VOID_P;
3076 /*tmp += sizeof (void*)*/
3077 if (size >= SIZEOF_VOID_P) {
3078 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3079 MONO_ADD_INS (cfg->cbb, iargs [0]);
3083 /* Those cannot be references since size < sizeof (void*) */
3085 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3086 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3092 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3093 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3099 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3109 * Emit code to copy a valuetype of type @klass whose address is stored in
3110 * @src->dreg to memory whose address is stored at @dest->dreg.
3113 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3115 MonoInst *iargs [4];
3118 MonoMethod *memcpy_method;
3119 MonoInst *size_ins = NULL;
3120 MonoInst *memcpy_ins = NULL;
3124 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3127 * This check breaks with spilled vars... need to handle it during verification anyway.
3128 * g_assert (klass && klass == src->klass && klass == dest->klass);
3131 if (mini_is_gsharedvt_klass (klass)) {
3133 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3134 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3138 n = mono_class_native_size (klass, &align);
3140 n = mono_class_value_size (klass, &align);
3143 align = SIZEOF_VOID_P;
3144 /* if native is true there should be no references in the struct */
3145 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3146 /* Avoid barriers when storing to the stack */
3147 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3148 (dest->opcode == OP_LDADDR))) {
3154 context_used = mini_class_check_context_used (cfg, klass);
3156 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3157 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3159 } else if (size_ins || align < SIZEOF_VOID_P) {
3161 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3163 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3164 if (!cfg->compile_aot)
3165 mono_class_compute_gc_descriptor (klass);
3168 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3170 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3172 /* We don't unroll more than 5 stores to avoid code bloat. */
3173 /*This is harmless and simplify mono_gc_get_range_copy_func */
3174 n += (SIZEOF_VOID_P - 1);
3175 n &= ~(SIZEOF_VOID_P - 1);
3177 EMIT_NEW_ICONST (cfg, iargs [2], n);
3178 mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs);
3183 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3184 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3185 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3190 iargs [2] = size_ins;
3192 EMIT_NEW_ICONST (cfg, iargs [2], n);
3194 memcpy_method = mini_get_memcpy_method ();
3196 mini_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3198 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3203 mini_get_memset_method (void)
3205 static MonoMethod *memset_method = NULL;
3206 if (!memset_method) {
3207 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3209 g_error ("Old corlib found. Install a new one");
3211 return memset_method;
3215 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3217 MonoInst *iargs [3];
3220 MonoMethod *memset_method;
3221 MonoInst *size_ins = NULL;
3222 MonoInst *bzero_ins = NULL;
3223 static MonoMethod *bzero_method;
3225 /* FIXME: Optimize this for the case when dest is an LDADDR */
3226 mono_class_init (klass);
3227 if (mini_is_gsharedvt_klass (klass)) {
3228 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3229 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3231 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3232 g_assert (bzero_method);
3234 iargs [1] = size_ins;
3235 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3239 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3241 n = mono_class_value_size (klass, &align);
3243 if (n <= sizeof (gpointer) * 8) {
3244 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3247 memset_method = mini_get_memset_method ();
3249 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3250 EMIT_NEW_ICONST (cfg, iargs [2], n);
3251 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3258 * Emit IR to return either the this pointer for instance method,
3259 * or the mrgctx for static methods.
3262 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3264 MonoInst *this_ins = NULL;
3266 g_assert (cfg->gshared);
3268 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3269 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3270 !method->klass->valuetype)
3271 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3273 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3274 MonoInst *mrgctx_loc, *mrgctx_var;
3276 g_assert (!this_ins);
3277 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3279 mrgctx_loc = mono_get_vtable_var (cfg);
3280 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3283 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
3284 MonoInst *mrgctx_loc, *mrgctx_var;
3286 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
3287 mrgctx_loc = mono_get_vtable_var (cfg);
3288 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3290 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
3293 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3294 MonoInst *vtable_loc, *vtable_var;
3296 g_assert (!this_ins);
3298 vtable_loc = mono_get_vtable_var (cfg);
3299 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3301 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3302 MonoInst *mrgctx_var = vtable_var;
3305 vtable_reg = alloc_preg (cfg);
3306 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3307 vtable_var->type = STACK_PTR;
3315 vtable_reg = alloc_preg (cfg);
3316 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3321 static MonoJumpInfoRgctxEntry *
3322 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3324 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3325 res->method = method;
3326 res->in_mrgctx = in_mrgctx;
3327 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3328 res->data->type = patch_type;
3329 res->data->data.target = patch_data;
3330 res->info_type = info_type;
3335 static inline MonoInst*
3336 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3338 MonoInst *args [16];
3341 // FIXME: No fastpath since the slot is not a compile time constant
3343 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3344 if (entry->in_mrgctx)
3345 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3347 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3351 * FIXME: This can be called during decompose, which is a problem since it creates
3353 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3355 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3357 MonoBasicBlock *is_null_bb, *end_bb;
3358 MonoInst *res, *ins, *call;
3361 slot = mini_get_rgctx_entry_slot (entry);
3363 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3364 index = MONO_RGCTX_SLOT_INDEX (slot);
3366 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3367 for (depth = 0; ; ++depth) {
3368 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3370 if (index < size - 1)
3375 NEW_BBLOCK (cfg, end_bb);
3376 NEW_BBLOCK (cfg, is_null_bb);
3379 rgctx_reg = rgctx->dreg;
3381 rgctx_reg = alloc_preg (cfg);
3383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3384 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3385 NEW_BBLOCK (cfg, is_null_bb);
3387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3391 for (i = 0; i < depth; ++i) {
3392 int array_reg = alloc_preg (cfg);
3394 /* load ptr to next array */
3395 if (mrgctx && i == 0)
3396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3399 rgctx_reg = array_reg;
3400 /* is the ptr null? */
3401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3402 /* if yes, jump to actual trampoline */
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3407 val_reg = alloc_preg (cfg);
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3409 /* is the slot null? */
3410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3411 /* if yes, jump to actual trampoline */
3412 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3415 res_reg = alloc_preg (cfg);
3416 MONO_INST_NEW (cfg, ins, OP_MOVE);
3417 ins->dreg = res_reg;
3418 ins->sreg1 = val_reg;
3419 MONO_ADD_INS (cfg->cbb, ins);
3421 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3424 MONO_START_BB (cfg, is_null_bb);
3426 EMIT_NEW_ICONST (cfg, args [1], index);
3428 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3430 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3431 MONO_INST_NEW (cfg, ins, OP_MOVE);
3432 ins->dreg = res_reg;
3433 ins->sreg1 = call->dreg;
3434 MONO_ADD_INS (cfg->cbb, ins);
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3437 MONO_START_BB (cfg, end_bb);
3446 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3449 static inline MonoInst*
3450 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3453 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3455 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3459 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3460 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3462 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3463 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3465 return emit_rgctx_fetch (cfg, rgctx, entry);
3469 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3470 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3472 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3473 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3475 return emit_rgctx_fetch (cfg, rgctx, entry);
3479 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3480 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3482 MonoJumpInfoGSharedVtCall *call_info;
3483 MonoJumpInfoRgctxEntry *entry;
3486 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3487 call_info->sig = sig;
3488 call_info->method = cmethod;
3490 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3491 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3493 return emit_rgctx_fetch (cfg, rgctx, entry);
3497 * emit_get_rgctx_virt_method:
3499 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3502 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3503 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3505 MonoJumpInfoVirtMethod *info;
3506 MonoJumpInfoRgctxEntry *entry;
3509 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3510 info->klass = klass;
3511 info->method = virt_method;
3513 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3514 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3516 return emit_rgctx_fetch (cfg, rgctx, entry);
3520 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3521 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3523 MonoJumpInfoRgctxEntry *entry;
3526 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3527 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3529 return emit_rgctx_fetch (cfg, rgctx, entry);
3533 * emit_get_rgctx_method:
3535 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3536 * normal constants, else emit a load from the rgctx.
3539 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3540 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3542 if (!context_used) {
3545 switch (rgctx_type) {
3546 case MONO_RGCTX_INFO_METHOD:
3547 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3549 case MONO_RGCTX_INFO_METHOD_RGCTX:
3550 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3553 g_assert_not_reached ();
3556 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3557 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3559 return emit_rgctx_fetch (cfg, rgctx, entry);
3564 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3565 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3567 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3568 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3570 return emit_rgctx_fetch (cfg, rgctx, entry);
3574 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3576 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3577 MonoRuntimeGenericContextInfoTemplate *template_;
3582 for (i = 0; i < info->num_entries; ++i) {
3583 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3585 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3589 if (info->num_entries == info->count_entries) {
3590 MonoRuntimeGenericContextInfoTemplate *new_entries;
3591 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3593 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3595 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3596 info->entries = new_entries;
3597 info->count_entries = new_count_entries;
3600 idx = info->num_entries;
3601 template_ = &info->entries [idx];
3602 template_->info_type = rgctx_type;
3603 template_->data = data;
3605 info->num_entries ++;
3611 * emit_get_gsharedvt_info:
3613 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3616 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3621 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3622 /* Load info->entries [idx] */
3623 dreg = alloc_preg (cfg);
3624 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3630 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3632 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3636 * On return the caller must check @klass for load errors.
3639 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3641 MonoInst *vtable_arg;
3644 context_used = mini_class_check_context_used (cfg, klass);
3647 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3648 klass, MONO_RGCTX_INFO_VTABLE);
3650 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3654 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3657 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3661 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3662 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3664 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3665 ins->sreg1 = vtable_arg->dreg;
3666 MONO_ADD_INS (cfg->cbb, ins);
3669 MonoBasicBlock *inited_bb;
3670 MonoInst *args [16];
3672 inited_reg = alloc_ireg (cfg);
3674 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3676 NEW_BBLOCK (cfg, inited_bb);
3678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3681 args [0] = vtable_arg;
3682 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3684 MONO_START_BB (cfg, inited_bb);
3689 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3693 if (cfg->gen_seq_points && cfg->method == method) {
3694 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3696 ins->flags |= MONO_INST_NONEMPTY_STACK;
3697 MONO_ADD_INS (cfg->cbb, ins);
3702 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3704 if (mini_get_debug_options ()->better_cast_details) {
3705 int vtable_reg = alloc_preg (cfg);
3706 int klass_reg = alloc_preg (cfg);
3707 MonoBasicBlock *is_null_bb = NULL;
3709 int to_klass_reg, context_used;
3712 NEW_BBLOCK (cfg, is_null_bb);
3714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3718 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3720 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3727 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3729 context_used = mini_class_check_context_used (cfg, klass);
3731 MonoInst *class_ins;
3733 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3734 to_klass_reg = class_ins->dreg;
3736 to_klass_reg = alloc_preg (cfg);
3737 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3742 MONO_START_BB (cfg, is_null_bb);
3747 mini_reset_cast_details (MonoCompile *cfg)
3749 /* Reset the variables holding the cast details */
3750 if (mini_get_debug_options ()->better_cast_details) {
3751 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3752 /* It is enough to reset the from field */
3753 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3758 * On return the caller must check @array_class for load errors
3761 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3763 int vtable_reg = alloc_preg (cfg);
3766 context_used = mini_class_check_context_used (cfg, array_class);
3768 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3770 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3772 if (cfg->opt & MONO_OPT_SHARED) {
3773 int class_reg = alloc_preg (cfg);
3776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3777 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3779 } else if (context_used) {
3780 MonoInst *vtable_ins;
3782 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3785 if (cfg->compile_aot) {
3789 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3791 vt_reg = alloc_preg (cfg);
3792 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3793 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3796 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3802 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3804 mini_reset_cast_details (cfg);
3808 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3809 * generic code is generated.
3812 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3814 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3817 MonoInst *rgctx, *addr;
3819 /* FIXME: What if the class is shared? We might not
3820 have to get the address of the method from the
3822 addr = emit_get_rgctx_method (cfg, context_used, method,
3823 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3824 if (cfg->llvm_only) {
3825 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3826 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3828 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3830 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3833 gboolean pass_vtable, pass_mrgctx;
3834 MonoInst *rgctx_arg = NULL;
3836 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3837 g_assert (!pass_mrgctx);
3840 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3843 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3846 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3851 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3855 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3856 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3857 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3858 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3860 obj_reg = sp [0]->dreg;
3861 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3862 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3864 /* FIXME: generics */
3865 g_assert (klass->rank == 0);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3869 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3872 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3875 MonoInst *element_class;
3877 /* This assertion is from the unboxcast insn */
3878 g_assert (klass->rank == 0);
3880 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3881 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3883 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3884 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3886 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3887 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3888 mini_reset_cast_details (cfg);
3891 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3892 MONO_ADD_INS (cfg->cbb, add);
3893 add->type = STACK_MP;
3900 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3902 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3907 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3913 args [1] = klass_inst;
3916 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3918 NEW_BBLOCK (cfg, is_ref_bb);
3919 NEW_BBLOCK (cfg, is_nullable_bb);
3920 NEW_BBLOCK (cfg, end_bb);
3921 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3926 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3928 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3929 addr_reg = alloc_dreg (cfg, STACK_MP);
3933 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3934 MONO_ADD_INS (cfg->cbb, addr);
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3939 MONO_START_BB (cfg, is_ref_bb);
3941 /* Save the ref to a temporary */
3942 dreg = alloc_ireg (cfg);
3943 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3944 addr->dreg = addr_reg;
3945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3946 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3949 MONO_START_BB (cfg, is_nullable_bb);
3952 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3953 MonoInst *unbox_call;
3954 MonoMethodSignature *unbox_sig;
3956 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 unbox_sig->ret = &klass->byval_arg;
3958 unbox_sig->param_count = 1;
3959 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3962 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3964 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3966 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3967 addr->dreg = addr_reg;
3970 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3973 MONO_START_BB (cfg, end_bb);
3976 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3982 * Returns NULL and set the cfg exception on error.
3985 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3987 MonoInst *iargs [2];
3992 MonoRgctxInfoType rgctx_info;
3993 MonoInst *iargs [2];
3994 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3996 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3998 if (cfg->opt & MONO_OPT_SHARED)
3999 rgctx_info = MONO_RGCTX_INFO_KLASS;
4001 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4002 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4004 if (cfg->opt & MONO_OPT_SHARED) {
4005 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4007 alloc_ftn = ves_icall_object_new;
4010 alloc_ftn = ves_icall_object_new_specific;
4013 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4014 if (known_instance_size) {
4015 int size = mono_class_instance_size (klass);
4016 if (size < sizeof (MonoObject))
4017 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4019 EMIT_NEW_ICONST (cfg, iargs [1], size);
4021 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4024 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4027 if (cfg->opt & MONO_OPT_SHARED) {
4028 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4029 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4031 alloc_ftn = ves_icall_object_new;
4032 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4033 /* This happens often in argument checking code, eg. throw new FooException... */
4034 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4035 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4036 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4038 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4039 MonoMethod *managed_alloc = NULL;
4043 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4044 cfg->exception_ptr = klass;
4048 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4050 if (managed_alloc) {
4051 int size = mono_class_instance_size (klass);
4052 if (size < sizeof (MonoObject))
4053 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4055 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4056 EMIT_NEW_ICONST (cfg, iargs [1], size);
4057 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4059 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4061 guint32 lw = vtable->klass->instance_size;
4062 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4063 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4064 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4067 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4071 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4075 * Returns NULL and set the cfg exception on error.
4078 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4080 MonoInst *alloc, *ins;
4082 if (mono_class_is_nullable (klass)) {
4083 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4086 if (cfg->llvm_only && cfg->gsharedvt) {
4087 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4088 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4089 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4091 /* FIXME: What if the class is shared? We might not
4092 have to get the method address from the RGCTX. */
4093 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4094 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4095 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4097 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4100 gboolean pass_vtable, pass_mrgctx;
4101 MonoInst *rgctx_arg = NULL;
4103 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4104 g_assert (!pass_mrgctx);
4107 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4110 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4113 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4117 if (mini_is_gsharedvt_klass (klass)) {
4118 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4119 MonoInst *res, *is_ref, *src_var, *addr;
4122 dreg = alloc_ireg (cfg);
4124 NEW_BBLOCK (cfg, is_ref_bb);
4125 NEW_BBLOCK (cfg, is_nullable_bb);
4126 NEW_BBLOCK (cfg, end_bb);
4127 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4135 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4138 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4139 ins->opcode = OP_STOREV_MEMBASE;
4141 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4142 res->type = STACK_OBJ;
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4147 MONO_START_BB (cfg, is_ref_bb);
4149 /* val is a vtype, so has to load the value manually */
4150 src_var = get_vreg_to_inst (cfg, val->dreg);
4152 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4153 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4158 MONO_START_BB (cfg, is_nullable_bb);
4161 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
4162 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4164 MonoMethodSignature *box_sig;
4167 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4168 * construct that method at JIT time, so have to do things by hand.
4170 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4171 box_sig->ret = &mono_defaults.object_class->byval_arg;
4172 box_sig->param_count = 1;
4173 box_sig->params [0] = &klass->byval_arg;
4176 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4178 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4179 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4180 res->type = STACK_OBJ;
4184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4186 MONO_START_BB (cfg, end_bb);
4190 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4194 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4199 static GHashTable* direct_icall_type_hash;
4202 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4204 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4205 if (!direct_icalls_enabled (cfg))
4209 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4210 * Whitelist a few icalls for now.
4212 if (!direct_icall_type_hash) {
4213 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4215 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4216 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4217 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4218 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4219 mono_memory_barrier ();
4220 direct_icall_type_hash = h;
4223 if (cmethod->klass == mono_defaults.math_class)
4225 /* No locking needed */
4226 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4232 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4234 if (cmethod->klass == mono_defaults.systemtype_class) {
4235 if (!strcmp (cmethod->name, "GetType"))
4241 static G_GNUC_UNUSED MonoInst*
4242 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4244 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4245 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4248 switch (enum_type->type) {
4251 #if SIZEOF_REGISTER == 8
4263 MonoInst *load, *and_, *cmp, *ceq;
4264 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4265 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4266 int dest_reg = alloc_ireg (cfg);
4268 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4269 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4270 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4271 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4273 ceq->type = STACK_I4;
4276 load = mono_decompose_opcode (cfg, load);
4277 and_ = mono_decompose_opcode (cfg, and_);
4278 cmp = mono_decompose_opcode (cfg, cmp);
4279 ceq = mono_decompose_opcode (cfg, ceq);
4287 * Returns NULL and set the cfg exception on error.
4289 static G_GNUC_UNUSED MonoInst*
4290 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4294 gpointer trampoline;
4295 MonoInst *obj, *method_ins, *tramp_ins;
4299 if (virtual_ && !cfg->llvm_only) {
4300 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4303 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4307 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4311 /* Inline the contents of mono_delegate_ctor */
4313 /* Set target field */
4314 /* Optimize away setting of NULL target */
4315 if (!MONO_INS_IS_PCONST_NULL (target)) {
4316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4317 if (cfg->gen_write_barriers) {
4318 dreg = alloc_preg (cfg);
4319 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4320 mini_emit_write_barrier (cfg, ptr, target);
4324 /* Set method field */
4325 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4326 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4329 * To avoid looking up the compiled code belonging to the target method
4330 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4331 * store it, and we fill it after the method has been compiled.
4333 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4334 MonoInst *code_slot_ins;
4337 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4339 domain = mono_domain_get ();
4340 mono_domain_lock (domain);
4341 if (!domain_jit_info (domain)->method_code_hash)
4342 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4343 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4345 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4346 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4348 mono_domain_unlock (domain);
4350 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4352 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4355 if (cfg->llvm_only) {
4356 MonoInst *args [16];
4361 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4362 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4365 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4371 if (cfg->compile_aot) {
4372 MonoDelegateClassMethodPair *del_tramp;
4374 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4375 del_tramp->klass = klass;
4376 del_tramp->method = context_used ? NULL : method;
4377 del_tramp->is_virtual = virtual_;
4378 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4381 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4383 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4384 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4387 /* Set invoke_impl field */
4389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4391 dreg = alloc_preg (cfg);
4392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4395 dreg = alloc_preg (cfg);
4396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4397 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4400 dreg = alloc_preg (cfg);
4401 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4402 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4404 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4410 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4412 MonoJitICallInfo *info;
4414 /* Need to register the icall so it gets an icall wrapper */
4415 info = mono_get_array_new_va_icall (rank);
4417 cfg->flags |= MONO_CFG_HAS_VARARGS;
4419 /* mono_array_new_va () needs a vararg calling convention */
4420 cfg->exception_message = g_strdup ("array-new");
4421 cfg->disable_llvm = TRUE;
4423 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4424 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4428 * handle_constrained_gsharedvt_call:
4430 * Handle constrained calls where the receiver is a gsharedvt type.
4431 * Return the instruction representing the call. Set the cfg exception on failure.
4434 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4435 gboolean *ref_emit_widen)
4437 MonoInst *ins = NULL;
4438 gboolean emit_widen = *ref_emit_widen;
4441 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4442 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4443 * pack the arguments into an array, and do the rest of the work in in an icall.
4445 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4446 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4447 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4448 MonoInst *args [16];
4451 * This case handles calls to
4452 * - object:ToString()/Equals()/GetHashCode(),
4453 * - System.IComparable<T>:CompareTo()
4454 * - System.IEquatable<T>:Equals ()
4455 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4459 if (mono_method_check_context_used (cmethod))
4460 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4462 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4463 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4465 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4466 if (fsig->hasthis && fsig->param_count) {
4467 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4468 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4469 ins->dreg = alloc_preg (cfg);
4470 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4471 MONO_ADD_INS (cfg->cbb, ins);
4474 if (mini_is_gsharedvt_type (fsig->params [0])) {
4475 int addr_reg, deref_arg_reg;
4477 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4478 deref_arg_reg = alloc_preg (cfg);
4479 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4480 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4482 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4483 addr_reg = ins->dreg;
4484 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4486 EMIT_NEW_ICONST (cfg, args [3], 0);
4487 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4490 EMIT_NEW_ICONST (cfg, args [3], 0);
4491 EMIT_NEW_ICONST (cfg, args [4], 0);
4493 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4496 if (mini_is_gsharedvt_type (fsig->ret)) {
4497 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4498 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4502 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4503 MONO_ADD_INS (cfg->cbb, add);
4505 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4506 MONO_ADD_INS (cfg->cbb, ins);
4507 /* ins represents the call result */
4510 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4513 *ref_emit_widen = emit_widen;
4522 mono_emit_load_got_addr (MonoCompile *cfg)
4524 MonoInst *getaddr, *dummy_use;
4526 if (!cfg->got_var || cfg->got_var_allocated)
4529 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4530 getaddr->cil_code = cfg->header->code;
4531 getaddr->dreg = cfg->got_var->dreg;
4533 /* Add it to the start of the first bblock */
4534 if (cfg->bb_entry->code) {
4535 getaddr->next = cfg->bb_entry->code;
4536 cfg->bb_entry->code = getaddr;
4539 MONO_ADD_INS (cfg->bb_entry, getaddr);
4541 cfg->got_var_allocated = TRUE;
4544 * Add a dummy use to keep the got_var alive, since real uses might
4545 * only be generated by the back ends.
4546 * Add it to end_bblock, so the variable's lifetime covers the whole
4548 * It would be better to make the usage of the got var explicit in all
4549 * cases when the backend needs it (i.e. calls, throw etc.), so this
4550 * wouldn't be needed.
4552 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4553 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4556 static int inline_limit;
4557 static gboolean inline_limit_inited;
4560 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4562 MonoMethodHeaderSummary header;
4564 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4565 MonoMethodSignature *sig = mono_method_signature (method);
4569 if (cfg->disable_inline)
4574 if (cfg->inline_depth > 10)
4577 if (!mono_method_get_header_summary (method, &header))
4580 /*runtime, icall and pinvoke are checked by summary call*/
4581 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4582 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4583 (mono_class_is_marshalbyref (method->klass)) ||
4587 /* also consider num_locals? */
4588 /* Do the size check early to avoid creating vtables */
4589 if (!inline_limit_inited) {
4591 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4592 inline_limit = atoi (inlinelimit);
4593 g_free (inlinelimit);
4595 inline_limit = INLINE_LENGTH_LIMIT;
4596 inline_limit_inited = TRUE;
4598 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4602 * if we can initialize the class of the method right away, we do,
4603 * otherwise we don't allow inlining if the class needs initialization,
4604 * since it would mean inserting a call to mono_runtime_class_init()
4605 * inside the inlined code
4607 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4610 if (!(cfg->opt & MONO_OPT_SHARED)) {
4611 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4612 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4613 if (method->klass->has_cctor) {
4614 vtable = mono_class_vtable (cfg->domain, method->klass);
4617 if (!cfg->compile_aot) {
4619 if (!mono_runtime_class_init_full (vtable, &error)) {
4620 mono_error_cleanup (&error);
4625 } else if (mono_class_is_before_field_init (method->klass)) {
4626 if (cfg->run_cctors && method->klass->has_cctor) {
4627 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4628 if (!method->klass->runtime_info)
4629 /* No vtable created yet */
4631 vtable = mono_class_vtable (cfg->domain, method->klass);
4634 /* This makes so that inline cannot trigger */
4635 /* .cctors: too many apps depend on them */
4636 /* running with a specific order... */
4637 if (! vtable->initialized)
4640 if (!mono_runtime_class_init_full (vtable, &error)) {
4641 mono_error_cleanup (&error);
4645 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4646 if (!method->klass->runtime_info)
4647 /* No vtable created yet */
4649 vtable = mono_class_vtable (cfg->domain, method->klass);
4652 if (!vtable->initialized)
4657 * If we're compiling for shared code
4658 * the cctor will need to be run at aot method load time, for example,
4659 * or at the end of the compilation of the inlining method.
4661 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4665 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4666 if (mono_arch_is_soft_float ()) {
4668 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4670 for (i = 0; i < sig->param_count; ++i)
4671 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4676 if (g_list_find (cfg->dont_inline, method))
4683 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4685 if (!cfg->compile_aot) {
4687 if (vtable->initialized)
4691 if (mono_class_is_before_field_init (klass)) {
4692 if (cfg->method == method)
4696 if (!mono_class_needs_cctor_run (klass, method))
4699 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4700 /* The initialization is already done before the method is called */
4707 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4711 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4714 if (mini_is_gsharedvt_variable_klass (klass)) {
4717 mono_class_init (klass);
4718 size = mono_class_array_element_size (klass);
4721 mult_reg = alloc_preg (cfg);
4722 array_reg = arr->dreg;
4723 index_reg = index->dreg;
4725 #if SIZEOF_REGISTER == 8
4726 /* The array reg is 64 bits but the index reg is only 32 */
4727 if (COMPILE_LLVM (cfg)) {
4729 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4730 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4731 * of OP_X86_LEA for llvm.
4733 index2_reg = index_reg;
4735 index2_reg = alloc_preg (cfg);
4736 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4739 if (index->type == STACK_I8) {
4740 index2_reg = alloc_preg (cfg);
4741 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4743 index2_reg = index_reg;
4748 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4750 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4751 if (size == 1 || size == 2 || size == 4 || size == 8) {
4752 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4754 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4755 ins->klass = mono_class_get_element_class (klass);
4756 ins->type = STACK_MP;
4762 add_reg = alloc_ireg_mp (cfg);
4765 MonoInst *rgctx_ins;
4768 g_assert (cfg->gshared);
4769 context_used = mini_class_check_context_used (cfg, klass);
4770 g_assert (context_used);
4771 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4772 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4774 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4776 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4777 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4778 ins->klass = mono_class_get_element_class (klass);
4779 ins->type = STACK_MP;
4780 MONO_ADD_INS (cfg->cbb, ins);
4786 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4788 int bounds_reg = alloc_preg (cfg);
4789 int add_reg = alloc_ireg_mp (cfg);
4790 int mult_reg = alloc_preg (cfg);
4791 int mult2_reg = alloc_preg (cfg);
4792 int low1_reg = alloc_preg (cfg);
4793 int low2_reg = alloc_preg (cfg);
4794 int high1_reg = alloc_preg (cfg);
4795 int high2_reg = alloc_preg (cfg);
4796 int realidx1_reg = alloc_preg (cfg);
4797 int realidx2_reg = alloc_preg (cfg);
4798 int sum_reg = alloc_preg (cfg);
4799 int index1, index2, tmpreg;
4803 mono_class_init (klass);
4804 size = mono_class_array_element_size (klass);
4806 index1 = index_ins1->dreg;
4807 index2 = index_ins2->dreg;
4809 #if SIZEOF_REGISTER == 8
4810 /* The array reg is 64 bits but the index reg is only 32 */
4811 if (COMPILE_LLVM (cfg)) {
4814 tmpreg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4817 tmpreg = alloc_preg (cfg);
4818 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4822 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4826 /* range checking */
4827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4828 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4831 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4832 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4834 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4836 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4839 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4840 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4841 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4842 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4844 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4846 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4847 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4849 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4850 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4852 ins->type = STACK_MP;
4854 MONO_ADD_INS (cfg->cbb, ins);
4860 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4864 MonoMethod *addr_method;
4866 MonoClass *eclass = cmethod->klass->element_class;
4868 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4871 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4873 /* emit_ldelema_2 depends on OP_LMUL */
4874 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4875 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4878 if (mini_is_gsharedvt_variable_klass (eclass))
4881 element_size = mono_class_array_element_size (eclass);
4882 addr_method = mono_marshal_get_array_address (rank, element_size);
4883 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4888 static MonoBreakPolicy
4889 always_insert_breakpoint (MonoMethod *method)
4891 return MONO_BREAK_POLICY_ALWAYS;
4894 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4897 * mono_set_break_policy:
4898 * \param policy_callback the new callback function
4900 * Allow embedders to decide wherther to actually obey breakpoint instructions
4901 * (both break IL instructions and \c Debugger.Break method calls), for example
4902 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4903 * untrusted or semi-trusted code.
4905 * \p policy_callback will be called every time a break point instruction needs to
4906 * be inserted with the method argument being the method that calls \c Debugger.Break
4907 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4908 * if it wants the breakpoint to not be effective in the given method.
4909 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4912 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4914 if (policy_callback)
4915 break_policy_func = policy_callback;
4917 break_policy_func = always_insert_breakpoint;
4921 should_insert_brekpoint (MonoMethod *method) {
4922 switch (break_policy_func (method)) {
4923 case MONO_BREAK_POLICY_ALWAYS:
4925 case MONO_BREAK_POLICY_NEVER:
4927 case MONO_BREAK_POLICY_ON_DBG:
4928 g_warning ("mdb no longer supported");
4931 g_warning ("Incorrect value returned from break policy callback");
4936 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4938 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4940 MonoInst *addr, *store, *load;
4941 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4943 /* the bounds check is already done by the callers */
4944 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4946 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4947 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4948 if (mini_type_is_reference (&eklass->byval_arg))
4949 mini_emit_write_barrier (cfg, addr, load);
4951 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4952 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4959 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4961 return mini_type_is_reference (&klass->byval_arg);
4965 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4967 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4968 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4969 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4970 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4971 MonoInst *iargs [3];
4974 mono_class_setup_vtable (obj_array);
4975 g_assert (helper->slot);
4977 if (sp [0]->type != STACK_OBJ)
4979 if (sp [2]->type != STACK_OBJ)
4986 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4990 if (mini_is_gsharedvt_variable_klass (klass)) {
4993 // FIXME-VT: OP_ICONST optimization
4994 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4995 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4996 ins->opcode = OP_STOREV_MEMBASE;
4997 } else if (sp [1]->opcode == OP_ICONST) {
4998 int array_reg = sp [0]->dreg;
4999 int index_reg = sp [1]->dreg;
5000 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5002 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5003 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5006 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5007 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5009 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5010 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5011 if (generic_class_is_reference_type (cfg, klass))
5012 mini_emit_write_barrier (cfg, addr, sp [2]);
5019 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5024 eklass = mono_class_from_mono_type (fsig->params [2]);
5026 eklass = mono_class_from_mono_type (fsig->ret);
5029 return emit_array_store (cfg, eklass, args, FALSE);
5031 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5032 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5038 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5041 int param_size, return_size;
5043 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5044 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5046 if (cfg->verbose_level > 3)
5047 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5049 //Don't allow mixing reference types with value types
5050 if (param_klass->valuetype != return_klass->valuetype) {
5051 if (cfg->verbose_level > 3)
5052 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5056 if (!param_klass->valuetype) {
5057 if (cfg->verbose_level > 3)
5058 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5063 if (param_klass->has_references || return_klass->has_references)
5066 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5067 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5068 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5069 if (cfg->verbose_level > 3)
5070 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5074 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5075 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5076 if (cfg->verbose_level > 3)
5077 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5081 param_size = mono_class_value_size (param_klass, &align);
5082 return_size = mono_class_value_size (return_klass, &align);
5084 //We can do it if sizes match
5085 if (param_size == return_size) {
5086 if (cfg->verbose_level > 3)
5087 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5091 //No simple way to handle struct if sizes don't match
5092 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5093 if (cfg->verbose_level > 3)
5094 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5099 * Same reg size category.
5100 * A quick note on why we don't require widening here.
5101 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5103 * Since the source value comes from a function argument, the JIT will already have
5104 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5106 if (param_size <= 4 && return_size <= 4) {
5107 if (cfg->verbose_level > 3)
5108 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5116 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5118 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5119 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5121 if (mini_is_gsharedvt_variable_type (fsig->ret))
5124 //Valuetypes that are semantically equivalent or numbers than can be widened to
5125 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5128 //Arrays of valuetypes that are semantically equivalent
5129 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5136 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5138 #ifdef MONO_ARCH_SIMD_INTRINSICS
5139 MonoInst *ins = NULL;
5141 if (cfg->opt & MONO_OPT_SIMD) {
5142 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5148 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5152 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
5154 MonoInst *ins = NULL;
5155 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5156 MONO_ADD_INS (cfg->cbb, ins);
5157 ins->backend.memory_barrier_kind = kind;
5163 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5165 MonoInst *ins = NULL;
5168 /* The LLVM backend supports these intrinsics */
5169 if (cmethod->klass == mono_defaults.math_class) {
5170 if (strcmp (cmethod->name, "Sin") == 0) {
5172 } else if (strcmp (cmethod->name, "Cos") == 0) {
5174 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5176 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5180 if (opcode && fsig->param_count == 1) {
5181 MONO_INST_NEW (cfg, ins, opcode);
5182 ins->type = STACK_R8;
5183 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5184 ins->sreg1 = args [0]->dreg;
5185 MONO_ADD_INS (cfg->cbb, ins);
5189 if (cfg->opt & MONO_OPT_CMOV) {
5190 if (strcmp (cmethod->name, "Min") == 0) {
5191 if (fsig->params [0]->type == MONO_TYPE_I4)
5193 if (fsig->params [0]->type == MONO_TYPE_U4)
5194 opcode = OP_IMIN_UN;
5195 else if (fsig->params [0]->type == MONO_TYPE_I8)
5197 else if (fsig->params [0]->type == MONO_TYPE_U8)
5198 opcode = OP_LMIN_UN;
5199 } else if (strcmp (cmethod->name, "Max") == 0) {
5200 if (fsig->params [0]->type == MONO_TYPE_I4)
5202 if (fsig->params [0]->type == MONO_TYPE_U4)
5203 opcode = OP_IMAX_UN;
5204 else if (fsig->params [0]->type == MONO_TYPE_I8)
5206 else if (fsig->params [0]->type == MONO_TYPE_U8)
5207 opcode = OP_LMAX_UN;
5211 if (opcode && fsig->param_count == 2) {
5212 MONO_INST_NEW (cfg, ins, opcode);
5213 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5214 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5215 ins->sreg1 = args [0]->dreg;
5216 ins->sreg2 = args [1]->dreg;
5217 MONO_ADD_INS (cfg->cbb, ins);
5225 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5227 if (cmethod->klass == mono_defaults.array_class) {
5228 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5229 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5230 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5231 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5232 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5233 return emit_array_unsafe_mov (cfg, fsig, args);
5240 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5242 MonoInst *ins = NULL;
5243 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5245 if (cmethod->klass == mono_defaults.string_class) {
5246 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5247 int dreg = alloc_ireg (cfg);
5248 int index_reg = alloc_preg (cfg);
5249 int add_reg = alloc_preg (cfg);
5251 #if SIZEOF_REGISTER == 8
5252 if (COMPILE_LLVM (cfg)) {
5253 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5255 /* The array reg is 64 bits but the index reg is only 32 */
5256 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5259 index_reg = args [1]->dreg;
5261 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5263 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5264 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5265 add_reg = ins->dreg;
5266 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5269 int mult_reg = alloc_preg (cfg);
5270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5271 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5272 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5273 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5275 type_from_op (cfg, ins, NULL, NULL);
5277 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5278 int dreg = alloc_ireg (cfg);
5279 /* Decompose later to allow more optimizations */
5280 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5281 ins->type = STACK_I4;
5282 ins->flags |= MONO_INST_FAULT;
5283 cfg->cbb->has_array_access = TRUE;
5284 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5289 } else if (cmethod->klass == mono_defaults.object_class) {
5290 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5291 int dreg = alloc_ireg_ref (cfg);
5292 int vt_reg = alloc_preg (cfg);
5293 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5294 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5295 type_from_op (cfg, ins, NULL, NULL);
5298 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5299 int dreg = alloc_ireg (cfg);
5300 int t1 = alloc_ireg (cfg);
5302 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5303 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5304 ins->type = STACK_I4;
5307 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5308 MONO_INST_NEW (cfg, ins, OP_NOP);
5309 MONO_ADD_INS (cfg->cbb, ins);
5313 } else if (cmethod->klass == mono_defaults.array_class) {
5314 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5315 return emit_array_generic_access (cfg, fsig, args, FALSE);
5316 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5317 return emit_array_generic_access (cfg, fsig, args, TRUE);
5319 #ifndef MONO_BIG_ARRAYS
5321 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5324 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5325 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5326 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5327 int dreg = alloc_ireg (cfg);
5328 int bounds_reg = alloc_ireg_mp (cfg);
5329 MonoBasicBlock *end_bb, *szarray_bb;
5330 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5332 NEW_BBLOCK (cfg, end_bb);
5333 NEW_BBLOCK (cfg, szarray_bb);
5335 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5336 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5338 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5339 /* Non-szarray case */
5341 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5342 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5345 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5347 MONO_START_BB (cfg, szarray_bb);
5350 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5351 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5353 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5354 MONO_START_BB (cfg, end_bb);
5356 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5357 ins->type = STACK_I4;
5363 if (cmethod->name [0] != 'g')
5366 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5367 int dreg = alloc_ireg (cfg);
5368 int vtable_reg = alloc_preg (cfg);
5369 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5370 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5371 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5372 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5373 type_from_op (cfg, ins, NULL, NULL);
5376 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5377 int dreg = alloc_ireg (cfg);
5379 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5380 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5381 type_from_op (cfg, ins, NULL, NULL);
5386 } else if (cmethod->klass == runtime_helpers_class) {
5387 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5388 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5390 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5391 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5393 g_assert (ctx->method_inst);
5394 g_assert (ctx->method_inst->type_argc == 1);
5395 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5396 MonoClass *klass = mono_class_from_mono_type (t);
5400 mono_class_init (klass);
5401 if (MONO_TYPE_IS_REFERENCE (t))
5402 EMIT_NEW_ICONST (cfg, ins, 1);
5403 else if (MONO_TYPE_IS_PRIMITIVE (t))
5404 EMIT_NEW_ICONST (cfg, ins, 0);
5405 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5406 EMIT_NEW_ICONST (cfg, ins, 1);
5407 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5408 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5410 g_assert (cfg->gshared);
5412 int context_used = mini_class_check_context_used (cfg, klass);
5414 /* This returns 1 or 2 */
5415 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5416 int dreg = alloc_ireg (cfg);
5417 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5423 } else if (cmethod->klass == mono_defaults.monitor_class) {
5424 gboolean is_enter = FALSE;
5425 gboolean is_v4 = FALSE;
5427 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5431 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5436 * To make async stack traces work, icalls which can block should have a wrapper.
5437 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5439 MonoBasicBlock *end_bb;
5441 NEW_BBLOCK (cfg, end_bb);
5443 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5444 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5446 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5447 MONO_START_BB (cfg, end_bb);
5450 } else if (cmethod->klass == mono_defaults.thread_class) {
5451 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5452 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5453 MONO_ADD_INS (cfg->cbb, ins);
5455 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5456 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5457 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5459 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5461 if (fsig->params [0]->type == MONO_TYPE_I1)
5462 opcode = OP_LOADI1_MEMBASE;
5463 else if (fsig->params [0]->type == MONO_TYPE_U1)
5464 opcode = OP_LOADU1_MEMBASE;
5465 else if (fsig->params [0]->type == MONO_TYPE_I2)
5466 opcode = OP_LOADI2_MEMBASE;
5467 else if (fsig->params [0]->type == MONO_TYPE_U2)
5468 opcode = OP_LOADU2_MEMBASE;
5469 else if (fsig->params [0]->type == MONO_TYPE_I4)
5470 opcode = OP_LOADI4_MEMBASE;
5471 else if (fsig->params [0]->type == MONO_TYPE_U4)
5472 opcode = OP_LOADU4_MEMBASE;
5473 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5474 opcode = OP_LOADI8_MEMBASE;
5475 else if (fsig->params [0]->type == MONO_TYPE_R4)
5476 opcode = OP_LOADR4_MEMBASE;
5477 else if (fsig->params [0]->type == MONO_TYPE_R8)
5478 opcode = OP_LOADR8_MEMBASE;
5479 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5480 opcode = OP_LOAD_MEMBASE;
5483 MONO_INST_NEW (cfg, ins, opcode);
5484 ins->inst_basereg = args [0]->dreg;
5485 ins->inst_offset = 0;
5486 MONO_ADD_INS (cfg->cbb, ins);
5488 switch (fsig->params [0]->type) {
5495 ins->dreg = mono_alloc_ireg (cfg);
5496 ins->type = STACK_I4;
5500 ins->dreg = mono_alloc_lreg (cfg);
5501 ins->type = STACK_I8;
5505 ins->dreg = mono_alloc_ireg (cfg);
5506 #if SIZEOF_REGISTER == 8
5507 ins->type = STACK_I8;
5509 ins->type = STACK_I4;
5514 ins->dreg = mono_alloc_freg (cfg);
5515 ins->type = STACK_R8;
5518 g_assert (mini_type_is_reference (fsig->params [0]));
5519 ins->dreg = mono_alloc_ireg_ref (cfg);
5520 ins->type = STACK_OBJ;
5524 if (opcode == OP_LOADI8_MEMBASE)
5525 ins = mono_decompose_opcode (cfg, ins);
5527 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5531 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5533 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5535 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5536 opcode = OP_STOREI1_MEMBASE_REG;
5537 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5538 opcode = OP_STOREI2_MEMBASE_REG;
5539 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5540 opcode = OP_STOREI4_MEMBASE_REG;
5541 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5542 opcode = OP_STOREI8_MEMBASE_REG;
5543 else if (fsig->params [0]->type == MONO_TYPE_R4)
5544 opcode = OP_STORER4_MEMBASE_REG;
5545 else if (fsig->params [0]->type == MONO_TYPE_R8)
5546 opcode = OP_STORER8_MEMBASE_REG;
5547 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5548 opcode = OP_STORE_MEMBASE_REG;
5551 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5553 MONO_INST_NEW (cfg, ins, opcode);
5554 ins->sreg1 = args [1]->dreg;
5555 ins->inst_destbasereg = args [0]->dreg;
5556 ins->inst_offset = 0;
5557 MONO_ADD_INS (cfg->cbb, ins);
5559 if (opcode == OP_STOREI8_MEMBASE_REG)
5560 ins = mono_decompose_opcode (cfg, ins);
5565 } else if (cmethod->klass->image == mono_defaults.corlib &&
5566 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5567 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5570 #if SIZEOF_REGISTER == 8
5571 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5572 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5573 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5574 ins->dreg = mono_alloc_preg (cfg);
5575 ins->sreg1 = args [0]->dreg;
5576 ins->type = STACK_I8;
5577 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5578 MONO_ADD_INS (cfg->cbb, ins);
5582 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5584 /* 64 bit reads are already atomic */
5585 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5586 load_ins->dreg = mono_alloc_preg (cfg);
5587 load_ins->inst_basereg = args [0]->dreg;
5588 load_ins->inst_offset = 0;
5589 load_ins->type = STACK_I8;
5590 MONO_ADD_INS (cfg->cbb, load_ins);
5592 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5599 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5600 MonoInst *ins_iconst;
5603 if (fsig->params [0]->type == MONO_TYPE_I4) {
5604 opcode = OP_ATOMIC_ADD_I4;
5605 cfg->has_atomic_add_i4 = TRUE;
5607 #if SIZEOF_REGISTER == 8
5608 else if (fsig->params [0]->type == MONO_TYPE_I8)
5609 opcode = OP_ATOMIC_ADD_I8;
5612 if (!mono_arch_opcode_supported (opcode))
5614 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5615 ins_iconst->inst_c0 = 1;
5616 ins_iconst->dreg = mono_alloc_ireg (cfg);
5617 MONO_ADD_INS (cfg->cbb, ins_iconst);
5619 MONO_INST_NEW (cfg, ins, opcode);
5620 ins->dreg = mono_alloc_ireg (cfg);
5621 ins->inst_basereg = args [0]->dreg;
5622 ins->inst_offset = 0;
5623 ins->sreg2 = ins_iconst->dreg;
5624 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5625 MONO_ADD_INS (cfg->cbb, ins);
5627 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5628 MonoInst *ins_iconst;
5631 if (fsig->params [0]->type == MONO_TYPE_I4) {
5632 opcode = OP_ATOMIC_ADD_I4;
5633 cfg->has_atomic_add_i4 = TRUE;
5635 #if SIZEOF_REGISTER == 8
5636 else if (fsig->params [0]->type == MONO_TYPE_I8)
5637 opcode = OP_ATOMIC_ADD_I8;
5640 if (!mono_arch_opcode_supported (opcode))
5642 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5643 ins_iconst->inst_c0 = -1;
5644 ins_iconst->dreg = mono_alloc_ireg (cfg);
5645 MONO_ADD_INS (cfg->cbb, ins_iconst);
5647 MONO_INST_NEW (cfg, ins, opcode);
5648 ins->dreg = mono_alloc_ireg (cfg);
5649 ins->inst_basereg = args [0]->dreg;
5650 ins->inst_offset = 0;
5651 ins->sreg2 = ins_iconst->dreg;
5652 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5653 MONO_ADD_INS (cfg->cbb, ins);
5655 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5658 if (fsig->params [0]->type == MONO_TYPE_I4) {
5659 opcode = OP_ATOMIC_ADD_I4;
5660 cfg->has_atomic_add_i4 = TRUE;
5662 #if SIZEOF_REGISTER == 8
5663 else if (fsig->params [0]->type == MONO_TYPE_I8)
5664 opcode = OP_ATOMIC_ADD_I8;
5667 if (!mono_arch_opcode_supported (opcode))
5669 MONO_INST_NEW (cfg, ins, opcode);
5670 ins->dreg = mono_alloc_ireg (cfg);
5671 ins->inst_basereg = args [0]->dreg;
5672 ins->inst_offset = 0;
5673 ins->sreg2 = args [1]->dreg;
5674 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5675 MONO_ADD_INS (cfg->cbb, ins);
5678 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5679 MonoInst *f2i = NULL, *i2f;
5680 guint32 opcode, f2i_opcode, i2f_opcode;
5681 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5682 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5684 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5685 fsig->params [0]->type == MONO_TYPE_R4) {
5686 opcode = OP_ATOMIC_EXCHANGE_I4;
5687 f2i_opcode = OP_MOVE_F_TO_I4;
5688 i2f_opcode = OP_MOVE_I4_TO_F;
5689 cfg->has_atomic_exchange_i4 = TRUE;
5691 #if SIZEOF_REGISTER == 8
5693 fsig->params [0]->type == MONO_TYPE_I8 ||
5694 fsig->params [0]->type == MONO_TYPE_R8 ||
5695 fsig->params [0]->type == MONO_TYPE_I) {
5696 opcode = OP_ATOMIC_EXCHANGE_I8;
5697 f2i_opcode = OP_MOVE_F_TO_I8;
5698 i2f_opcode = OP_MOVE_I8_TO_F;
5701 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5702 opcode = OP_ATOMIC_EXCHANGE_I4;
5703 cfg->has_atomic_exchange_i4 = TRUE;
5709 if (!mono_arch_opcode_supported (opcode))
5713 /* TODO: Decompose these opcodes instead of bailing here. */
5714 if (COMPILE_SOFT_FLOAT (cfg))
5717 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5718 f2i->dreg = mono_alloc_ireg (cfg);
5719 f2i->sreg1 = args [1]->dreg;
5720 if (f2i_opcode == OP_MOVE_F_TO_I4)
5721 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5722 MONO_ADD_INS (cfg->cbb, f2i);
5725 MONO_INST_NEW (cfg, ins, opcode);
5726 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5727 ins->inst_basereg = args [0]->dreg;
5728 ins->inst_offset = 0;
5729 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5730 MONO_ADD_INS (cfg->cbb, ins);
5732 switch (fsig->params [0]->type) {
5734 ins->type = STACK_I4;
5737 ins->type = STACK_I8;
5740 #if SIZEOF_REGISTER == 8
5741 ins->type = STACK_I8;
5743 ins->type = STACK_I4;
5748 ins->type = STACK_R8;
5751 g_assert (mini_type_is_reference (fsig->params [0]));
5752 ins->type = STACK_OBJ;
5757 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5758 i2f->dreg = mono_alloc_freg (cfg);
5759 i2f->sreg1 = ins->dreg;
5760 i2f->type = STACK_R8;
5761 if (i2f_opcode == OP_MOVE_I4_TO_F)
5762 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5763 MONO_ADD_INS (cfg->cbb, i2f);
5768 if (cfg->gen_write_barriers && is_ref)
5769 mini_emit_write_barrier (cfg, args [0], args [1]);
5771 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5772 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5773 guint32 opcode, f2i_opcode, i2f_opcode;
5774 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5775 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5777 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5778 fsig->params [1]->type == MONO_TYPE_R4) {
5779 opcode = OP_ATOMIC_CAS_I4;
5780 f2i_opcode = OP_MOVE_F_TO_I4;
5781 i2f_opcode = OP_MOVE_I4_TO_F;
5782 cfg->has_atomic_cas_i4 = TRUE;
5784 #if SIZEOF_REGISTER == 8
5786 fsig->params [1]->type == MONO_TYPE_I8 ||
5787 fsig->params [1]->type == MONO_TYPE_R8 ||
5788 fsig->params [1]->type == MONO_TYPE_I) {
5789 opcode = OP_ATOMIC_CAS_I8;
5790 f2i_opcode = OP_MOVE_F_TO_I8;
5791 i2f_opcode = OP_MOVE_I8_TO_F;
5794 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5795 opcode = OP_ATOMIC_CAS_I4;
5796 cfg->has_atomic_cas_i4 = TRUE;
5802 if (!mono_arch_opcode_supported (opcode))
5806 /* TODO: Decompose these opcodes instead of bailing here. */
5807 if (COMPILE_SOFT_FLOAT (cfg))
5810 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5811 f2i_new->dreg = mono_alloc_ireg (cfg);
5812 f2i_new->sreg1 = args [1]->dreg;
5813 if (f2i_opcode == OP_MOVE_F_TO_I4)
5814 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5815 MONO_ADD_INS (cfg->cbb, f2i_new);
5817 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5818 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5819 f2i_cmp->sreg1 = args [2]->dreg;
5820 if (f2i_opcode == OP_MOVE_F_TO_I4)
5821 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5822 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5825 MONO_INST_NEW (cfg, ins, opcode);
5826 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5827 ins->sreg1 = args [0]->dreg;
5828 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5829 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5830 MONO_ADD_INS (cfg->cbb, ins);
5832 switch (fsig->params [1]->type) {
5834 ins->type = STACK_I4;
5837 ins->type = STACK_I8;
5840 #if SIZEOF_REGISTER == 8
5841 ins->type = STACK_I8;
5843 ins->type = STACK_I4;
5847 ins->type = cfg->r4_stack_type;
5850 ins->type = STACK_R8;
5853 g_assert (mini_type_is_reference (fsig->params [1]));
5854 ins->type = STACK_OBJ;
5859 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5860 i2f->dreg = mono_alloc_freg (cfg);
5861 i2f->sreg1 = ins->dreg;
5862 i2f->type = STACK_R8;
5863 if (i2f_opcode == OP_MOVE_I4_TO_F)
5864 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5865 MONO_ADD_INS (cfg->cbb, i2f);
5870 if (cfg->gen_write_barriers && is_ref)
5871 mini_emit_write_barrier (cfg, args [0], args [1]);
5873 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5874 fsig->params [1]->type == MONO_TYPE_I4) {
5875 MonoInst *cmp, *ceq;
5877 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5880 /* int32 r = CAS (location, value, comparand); */
5881 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5882 ins->dreg = alloc_ireg (cfg);
5883 ins->sreg1 = args [0]->dreg;
5884 ins->sreg2 = args [1]->dreg;
5885 ins->sreg3 = args [2]->dreg;
5886 ins->type = STACK_I4;
5887 MONO_ADD_INS (cfg->cbb, ins);
5889 /* bool result = r == comparand; */
5890 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5891 cmp->sreg1 = ins->dreg;
5892 cmp->sreg2 = args [2]->dreg;
5893 cmp->type = STACK_I4;
5894 MONO_ADD_INS (cfg->cbb, cmp);
5896 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5897 ceq->dreg = alloc_ireg (cfg);
5898 ceq->type = STACK_I4;
5899 MONO_ADD_INS (cfg->cbb, ceq);
5901 /* *success = result; */
5902 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5904 cfg->has_atomic_cas_i4 = TRUE;
5906 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5907 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5911 } else if (cmethod->klass->image == mono_defaults.corlib &&
5912 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5913 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5916 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5918 MonoType *t = fsig->params [0];
5920 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5922 g_assert (t->byref);
5923 /* t is a byref type, so the reference check is more complicated */
5924 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5925 if (t->type == MONO_TYPE_I1)
5926 opcode = OP_ATOMIC_LOAD_I1;
5927 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5928 opcode = OP_ATOMIC_LOAD_U1;
5929 else if (t->type == MONO_TYPE_I2)
5930 opcode = OP_ATOMIC_LOAD_I2;
5931 else if (t->type == MONO_TYPE_U2)
5932 opcode = OP_ATOMIC_LOAD_U2;
5933 else if (t->type == MONO_TYPE_I4)
5934 opcode = OP_ATOMIC_LOAD_I4;
5935 else if (t->type == MONO_TYPE_U4)
5936 opcode = OP_ATOMIC_LOAD_U4;
5937 else if (t->type == MONO_TYPE_R4)
5938 opcode = OP_ATOMIC_LOAD_R4;
5939 else if (t->type == MONO_TYPE_R8)
5940 opcode = OP_ATOMIC_LOAD_R8;
5941 #if SIZEOF_REGISTER == 8
5942 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5943 opcode = OP_ATOMIC_LOAD_I8;
5944 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5945 opcode = OP_ATOMIC_LOAD_U8;
5947 else if (t->type == MONO_TYPE_I)
5948 opcode = OP_ATOMIC_LOAD_I4;
5949 else if (is_ref || t->type == MONO_TYPE_U)
5950 opcode = OP_ATOMIC_LOAD_U4;
5954 if (!mono_arch_opcode_supported (opcode))
5957 MONO_INST_NEW (cfg, ins, opcode);
5958 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5959 ins->sreg1 = args [0]->dreg;
5960 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5961 MONO_ADD_INS (cfg->cbb, ins);
5964 case MONO_TYPE_BOOLEAN:
5971 ins->type = STACK_I4;
5975 ins->type = STACK_I8;
5979 #if SIZEOF_REGISTER == 8
5980 ins->type = STACK_I8;
5982 ins->type = STACK_I4;
5986 ins->type = cfg->r4_stack_type;
5989 ins->type = STACK_R8;
5993 ins->type = STACK_OBJ;
5999 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6001 MonoType *t = fsig->params [0];
6004 g_assert (t->byref);
6005 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6006 if (t->type == MONO_TYPE_I1)
6007 opcode = OP_ATOMIC_STORE_I1;
6008 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6009 opcode = OP_ATOMIC_STORE_U1;
6010 else if (t->type == MONO_TYPE_I2)
6011 opcode = OP_ATOMIC_STORE_I2;
6012 else if (t->type == MONO_TYPE_U2)
6013 opcode = OP_ATOMIC_STORE_U2;
6014 else if (t->type == MONO_TYPE_I4)
6015 opcode = OP_ATOMIC_STORE_I4;
6016 else if (t->type == MONO_TYPE_U4)
6017 opcode = OP_ATOMIC_STORE_U4;
6018 else if (t->type == MONO_TYPE_R4)
6019 opcode = OP_ATOMIC_STORE_R4;
6020 else if (t->type == MONO_TYPE_R8)
6021 opcode = OP_ATOMIC_STORE_R8;
6022 #if SIZEOF_REGISTER == 8
6023 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6024 opcode = OP_ATOMIC_STORE_I8;
6025 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6026 opcode = OP_ATOMIC_STORE_U8;
6028 else if (t->type == MONO_TYPE_I)
6029 opcode = OP_ATOMIC_STORE_I4;
6030 else if (is_ref || t->type == MONO_TYPE_U)
6031 opcode = OP_ATOMIC_STORE_U4;
6035 if (!mono_arch_opcode_supported (opcode))
6038 MONO_INST_NEW (cfg, ins, opcode);
6039 ins->dreg = args [0]->dreg;
6040 ins->sreg1 = args [1]->dreg;
6041 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6042 MONO_ADD_INS (cfg->cbb, ins);
6044 if (cfg->gen_write_barriers && is_ref)
6045 mini_emit_write_barrier (cfg, args [0], args [1]);
6051 } else if (cmethod->klass->image == mono_defaults.corlib &&
6052 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6053 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6054 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6055 if (should_insert_brekpoint (cfg->method)) {
6056 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6058 MONO_INST_NEW (cfg, ins, OP_NOP);
6059 MONO_ADD_INS (cfg->cbb, ins);
6063 } else if (cmethod->klass->image == mono_defaults.corlib &&
6064 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6065 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6066 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6068 EMIT_NEW_ICONST (cfg, ins, 1);
6070 EMIT_NEW_ICONST (cfg, ins, 0);
6073 } else if (cmethod->klass->image == mono_defaults.corlib &&
6074 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6075 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6076 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6077 /* No stack walks are currently available, so implement this as an intrinsic */
6078 MonoInst *assembly_ins;
6080 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6081 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6084 } else if (cmethod->klass->image == mono_defaults.corlib &&
6085 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6086 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6087 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6088 /* No stack walks are currently available, so implement this as an intrinsic */
6089 MonoInst *method_ins;
6090 MonoMethod *declaring = cfg->method;
6092 /* This returns the declaring generic method */
6093 if (declaring->is_inflated)
6094 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6095 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6096 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6097 cfg->no_inline = TRUE;
6098 if (cfg->method != cfg->current_method)
6099 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6102 } else if (cmethod->klass == mono_defaults.math_class) {
6104 * There is general branchless code for Min/Max, but it does not work for
6106 * http://everything2.com/?node_id=1051618
6108 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6109 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6110 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6111 ins->dreg = alloc_preg (cfg);
6112 ins->type = STACK_I4;
6113 MONO_ADD_INS (cfg->cbb, ins);
6115 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6116 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6117 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6118 !strcmp (cmethod->klass->name, "Selector")) ||
6119 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6120 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6121 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6122 !strcmp (cmethod->klass->name, "Selector"))
6124 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6125 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6126 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6129 MonoJumpInfoToken *ji;
6132 if (args [0]->opcode == OP_GOT_ENTRY) {
6133 pi = (MonoInst *)args [0]->inst_p1;
6134 g_assert (pi->opcode == OP_PATCH_INFO);
6135 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6136 ji = (MonoJumpInfoToken *)pi->inst_p0;
6138 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6139 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6142 NULLIFY_INS (args [0]);
6144 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6145 return_val_if_nok (&cfg->error, NULL);
6147 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6148 ins->dreg = mono_alloc_ireg (cfg);
6151 MONO_ADD_INS (cfg->cbb, ins);
6156 #ifdef MONO_ARCH_SIMD_INTRINSICS
6157 if (cfg->opt & MONO_OPT_SIMD) {
6158 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6164 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6168 if (COMPILE_LLVM (cfg)) {
6169 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6174 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6178 * This entry point could be used later for arbitrary method
6181 inline static MonoInst*
6182 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6183 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6185 if (method->klass == mono_defaults.string_class) {
6186 /* managed string allocation support */
6187 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6188 MonoInst *iargs [2];
6189 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6190 MonoMethod *managed_alloc = NULL;
6192 g_assert (vtable); /*Should not fail since it System.String*/
6193 #ifndef MONO_CROSS_COMPILE
6194 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6198 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6199 iargs [1] = args [0];
6200 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6207 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6209 MonoInst *store, *temp;
6212 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6213 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6216 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6217 * would be different than the MonoInst's used to represent arguments, and
6218 * the ldelema implementation can't deal with that.
6219 * Solution: When ldelema is used on an inline argument, create a var for
6220 * it, emit ldelema on that var, and emit the saving code below in
6221 * inline_method () if needed.
6223 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6224 cfg->args [i] = temp;
6225 /* This uses cfg->args [i] which is set by the preceeding line */
6226 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6227 store->cil_code = sp [0]->cil_code;
6232 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6233 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6235 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6237 check_inline_called_method_name_limit (MonoMethod *called_method)
6240 static const char *limit = NULL;
6242 if (limit == NULL) {
6243 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6245 if (limit_string != NULL)
6246 limit = limit_string;
6251 if (limit [0] != '\0') {
6252 char *called_method_name = mono_method_full_name (called_method, TRUE);
6254 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6255 g_free (called_method_name);
6257 //return (strncmp_result <= 0);
6258 return (strncmp_result == 0);
6265 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6267 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6270 static const char *limit = NULL;
6272 if (limit == NULL) {
6273 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6274 if (limit_string != NULL) {
6275 limit = limit_string;
6281 if (limit [0] != '\0') {
6282 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6284 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6285 g_free (caller_method_name);
6287 //return (strncmp_result <= 0);
6288 return (strncmp_result == 0);
6296 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6298 static double r8_0 = 0.0;
6299 static float r4_0 = 0.0;
6303 rtype = mini_get_underlying_type (rtype);
6307 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6308 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6309 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6310 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6311 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6312 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6313 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6314 ins->type = STACK_R4;
6315 ins->inst_p0 = (void*)&r4_0;
6317 MONO_ADD_INS (cfg->cbb, ins);
6318 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6319 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6320 ins->type = STACK_R8;
6321 ins->inst_p0 = (void*)&r8_0;
6323 MONO_ADD_INS (cfg->cbb, ins);
6324 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6325 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6326 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6327 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6328 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6330 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6335 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6339 rtype = mini_get_underlying_type (rtype);
6343 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6344 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6345 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6346 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6347 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6348 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6349 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6350 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6351 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6352 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6353 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6354 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6355 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6356 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6358 emit_init_rvar (cfg, dreg, rtype);
6362 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6364 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6366 MonoInst *var = cfg->locals [local];
6367 if (COMPILE_SOFT_FLOAT (cfg)) {
6369 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6370 emit_init_rvar (cfg, reg, type);
6371 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6374 emit_init_rvar (cfg, var->dreg, type);
6376 emit_dummy_init_rvar (cfg, var->dreg, type);
6381 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6383 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6389 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6392 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6393 guchar *ip, guint real_offset, gboolean inline_always)
6396 MonoInst *ins, *rvar = NULL;
6397 MonoMethodHeader *cheader;
6398 MonoBasicBlock *ebblock, *sbblock;
6400 MonoMethod *prev_inlined_method;
6401 MonoInst **prev_locals, **prev_args;
6402 MonoType **prev_arg_types;
6403 guint prev_real_offset;
6404 GHashTable *prev_cbb_hash;
6405 MonoBasicBlock **prev_cil_offset_to_bb;
6406 MonoBasicBlock *prev_cbb;
6407 const unsigned char *prev_ip;
6408 unsigned char *prev_cil_start;
6409 guint32 prev_cil_offset_to_bb_len;
6410 MonoMethod *prev_current_method;
6411 MonoGenericContext *prev_generic_context;
6412 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6414 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6416 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6417 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6420 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6421 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6426 fsig = mono_method_signature (cmethod);
6428 if (cfg->verbose_level > 2)
6429 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6431 if (!cmethod->inline_info) {
6432 cfg->stat_inlineable_methods++;
6433 cmethod->inline_info = 1;
6436 /* allocate local variables */
6437 cheader = mono_method_get_header_checked (cmethod, &error);
6439 if (inline_always) {
6440 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6441 mono_error_move (&cfg->error, &error);
6443 mono_error_cleanup (&error);
6448 /*Must verify before creating locals as it can cause the JIT to assert.*/
6449 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6450 mono_metadata_free_mh (cheader);
6454 /* allocate space to store the return value */
6455 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6456 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6459 prev_locals = cfg->locals;
6460 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6461 for (i = 0; i < cheader->num_locals; ++i)
6462 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6464 /* allocate start and end blocks */
6465 /* This is needed so if the inline is aborted, we can clean up */
6466 NEW_BBLOCK (cfg, sbblock);
6467 sbblock->real_offset = real_offset;
6469 NEW_BBLOCK (cfg, ebblock);
6470 ebblock->block_num = cfg->num_bblocks++;
6471 ebblock->real_offset = real_offset;
6473 prev_args = cfg->args;
6474 prev_arg_types = cfg->arg_types;
6475 prev_inlined_method = cfg->inlined_method;
6476 cfg->inlined_method = cmethod;
6477 cfg->ret_var_set = FALSE;
6478 cfg->inline_depth ++;
6479 prev_real_offset = cfg->real_offset;
6480 prev_cbb_hash = cfg->cbb_hash;
6481 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6482 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6483 prev_cil_start = cfg->cil_start;
6485 prev_cbb = cfg->cbb;
6486 prev_current_method = cfg->current_method;
6487 prev_generic_context = cfg->generic_context;
6488 prev_ret_var_set = cfg->ret_var_set;
6489 prev_disable_inline = cfg->disable_inline;
6491 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6494 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6496 ret_var_set = cfg->ret_var_set;
6498 cfg->inlined_method = prev_inlined_method;
6499 cfg->real_offset = prev_real_offset;
6500 cfg->cbb_hash = prev_cbb_hash;
6501 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6502 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6503 cfg->cil_start = prev_cil_start;
6505 cfg->locals = prev_locals;
6506 cfg->args = prev_args;
6507 cfg->arg_types = prev_arg_types;
6508 cfg->current_method = prev_current_method;
6509 cfg->generic_context = prev_generic_context;
6510 cfg->ret_var_set = prev_ret_var_set;
6511 cfg->disable_inline = prev_disable_inline;
6512 cfg->inline_depth --;
6514 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6515 if (cfg->verbose_level > 2)
6516 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6518 cfg->stat_inlined_methods++;
6520 /* always add some code to avoid block split failures */
6521 MONO_INST_NEW (cfg, ins, OP_NOP);
6522 MONO_ADD_INS (prev_cbb, ins);
6524 prev_cbb->next_bb = sbblock;
6525 link_bblock (cfg, prev_cbb, sbblock);
6528 * Get rid of the begin and end bblocks if possible to aid local
6531 if (prev_cbb->out_count == 1)
6532 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6534 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6535 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6537 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6538 MonoBasicBlock *prev = ebblock->in_bb [0];
6540 if (prev->next_bb == ebblock) {
6541 mono_merge_basic_blocks (cfg, prev, ebblock);
6543 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6544 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6545 cfg->cbb = prev_cbb;
6548 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6553 * Its possible that the rvar is set in some prev bblock, but not in others.
6559 for (i = 0; i < ebblock->in_count; ++i) {
6560 bb = ebblock->in_bb [i];
6562 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6565 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6575 * If the inlined method contains only a throw, then the ret var is not
6576 * set, so set it to a dummy value.
6579 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6581 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6584 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6587 if (cfg->verbose_level > 2)
6588 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6589 cfg->exception_type = MONO_EXCEPTION_NONE;
6591 /* This gets rid of the newly added bblocks */
6592 cfg->cbb = prev_cbb;
6594 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6599 * Some of these comments may well be out-of-date.
6600 * Design decisions: we do a single pass over the IL code (and we do bblock
6601 * splitting/merging in the few cases when it's required: a back jump to an IL
6602 * address that was not already seen as bblock starting point).
6603 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6604 * Complex operations are decomposed in simpler ones right away. We need to let the
6605 * arch-specific code peek and poke inside this process somehow (except when the
6606 * optimizations can take advantage of the full semantic info of coarse opcodes).
6607 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6608 * MonoInst->opcode initially is the IL opcode or some simplification of that
6609 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6610 * opcode with value bigger than OP_LAST.
6611 * At this point the IR can be handed over to an interpreter, a dumb code generator
6612 * or to the optimizing code generator that will translate it to SSA form.
6614 * Profiling directed optimizations.
6615 * We may compile by default with few or no optimizations and instrument the code
6616 * or the user may indicate what methods to optimize the most either in a config file
6617 * or through repeated runs where the compiler applies offline the optimizations to
6618 * each method and then decides if it was worth it.
6621 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6622 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6623 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6624 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6625 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6626 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6627 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6628 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6630 /* offset from br.s -> br like opcodes */
6631 #define BIG_BRANCH_OFFSET 13
6634 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6636 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6638 return b == NULL || b == bb;
6642 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6644 unsigned char *ip = start;
6645 unsigned char *target;
6648 MonoBasicBlock *bblock;
6649 const MonoOpcode *opcode;
6652 cli_addr = ip - start;
6653 i = mono_opcode_value ((const guint8 **)&ip, end);
6656 opcode = &mono_opcodes [i];
6657 switch (opcode->argument) {
6658 case MonoInlineNone:
6661 case MonoInlineString:
6662 case MonoInlineType:
6663 case MonoInlineField:
6664 case MonoInlineMethod:
6667 case MonoShortInlineR:
6674 case MonoShortInlineVar:
6675 case MonoShortInlineI:
6678 case MonoShortInlineBrTarget:
6679 target = start + cli_addr + 2 + (signed char)ip [1];
6680 GET_BBLOCK (cfg, bblock, target);
6683 GET_BBLOCK (cfg, bblock, ip);
6685 case MonoInlineBrTarget:
6686 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6687 GET_BBLOCK (cfg, bblock, target);
6690 GET_BBLOCK (cfg, bblock, ip);
6692 case MonoInlineSwitch: {
6693 guint32 n = read32 (ip + 1);
6696 cli_addr += 5 + 4 * n;
6697 target = start + cli_addr;
6698 GET_BBLOCK (cfg, bblock, target);
6700 for (j = 0; j < n; ++j) {
6701 target = start + cli_addr + (gint32)read32 (ip);
6702 GET_BBLOCK (cfg, bblock, target);
6712 g_assert_not_reached ();
6715 if (i == CEE_THROW) {
6716 unsigned char *bb_start = ip - 1;
6718 /* Find the start of the bblock containing the throw */
6720 while ((bb_start >= start) && !bblock) {
6721 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6725 bblock->out_of_line = 1;
6735 static inline MonoMethod *
6736 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6742 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6743 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6745 method = mono_class_inflate_generic_method_checked (method, context, error);
6748 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6754 static inline MonoMethod *
6755 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6758 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6760 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6761 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6765 if (!method && !cfg)
6766 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6771 static inline MonoClass*
6772 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6777 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6778 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6780 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6781 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6784 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6785 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6788 mono_class_init (klass);
6792 static inline MonoMethodSignature*
6793 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6795 MonoMethodSignature *fsig;
6798 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6799 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6801 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6802 return_val_if_nok (error, NULL);
6805 fsig = mono_inflate_generic_signature(fsig, context, error);
6811 throw_exception (void)
6813 static MonoMethod *method = NULL;
6816 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6817 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6824 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6826 MonoMethod *thrower = throw_exception ();
6829 EMIT_NEW_PCONST (cfg, args [0], ex);
6830 mono_emit_method_call (cfg, thrower, args, NULL);
6834 * Return the original method is a wrapper is specified. We can only access
6835 * the custom attributes from the original method.
6838 get_original_method (MonoMethod *method)
6840 if (method->wrapper_type == MONO_WRAPPER_NONE)
6843 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6844 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6847 /* in other cases we need to find the original method */
6848 return mono_marshal_method_from_wrapper (method);
6852 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6854 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6855 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6857 emit_throw_exception (cfg, ex);
6861 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6863 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6864 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6866 emit_throw_exception (cfg, ex);
6870 * Check that the IL instructions at ip are the array initialization
6871 * sequence and return the pointer to the data and the size.
6874 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6877 * newarr[System.Int32]
6879 * ldtoken field valuetype ...
6880 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6882 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6884 guint32 token = read32 (ip + 7);
6885 guint32 field_token = read32 (ip + 2);
6886 guint32 field_index = field_token & 0xffffff;
6888 const char *data_ptr;
6890 MonoMethod *cmethod;
6891 MonoClass *dummy_class;
6892 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6896 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6900 *out_field_token = field_token;
6902 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6905 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6907 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6911 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6912 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6929 if (size > mono_type_size (field->type, &dummy_align))
6932 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6933 if (!image_is_dynamic (method->klass->image)) {
6934 field_index = read32 (ip + 2) & 0xffffff;
6935 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6936 data_ptr = mono_image_rva_map (method->klass->image, rva);
6937 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6938 /* for aot code we do the lookup on load */
6939 if (aot && data_ptr)
6940 return (const char *)GUINT_TO_POINTER (rva);
6942 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6944 data_ptr = mono_field_get_data (field);
6952 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6955 char *method_fname = mono_method_full_name (method, TRUE);
6957 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6960 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6961 mono_error_cleanup (&error);
6962 } else if (header->code_size == 0)
6963 method_code = g_strdup ("method body is empty.");
6965 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6966 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6967 g_free (method_fname);
6968 g_free (method_code);
6969 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6973 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6976 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6977 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6978 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6979 /* Optimize reg-reg moves away */
6981 * Can't optimize other opcodes, since sp[0] might point to
6982 * the last ins of a decomposed opcode.
6984 sp [0]->dreg = (cfg)->locals [n]->dreg;
6986 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6991 * ldloca inhibits many optimizations so try to get rid of it in common
6994 static inline unsigned char *
6995 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7005 local = read16 (ip + 2);
7009 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7010 /* From the INITOBJ case */
7011 token = read32 (ip + 2);
7012 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7013 CHECK_TYPELOAD (klass);
7014 type = mini_get_underlying_type (&klass->byval_arg);
7015 emit_init_local (cfg, local, type, TRUE);
7023 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7025 MonoInst *icall_args [16];
7026 MonoInst *call_target, *ins, *vtable_ins;
7027 int arg_reg, this_reg, vtable_reg;
7028 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7029 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7030 gboolean variant_iface = FALSE;
7033 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7036 * In llvm-only mode, vtables contain function descriptors instead of
7037 * method addresses/trampolines.
7039 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7042 slot = mono_method_get_imt_slot (cmethod);
7044 slot = mono_method_get_vtable_index (cmethod);
7046 this_reg = sp [0]->dreg;
7048 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7049 variant_iface = TRUE;
7051 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7053 * The simplest case, a normal virtual call.
7055 int slot_reg = alloc_preg (cfg);
7056 int addr_reg = alloc_preg (cfg);
7057 int arg_reg = alloc_preg (cfg);
7058 MonoBasicBlock *non_null_bb;
7060 vtable_reg = alloc_preg (cfg);
7061 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7062 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7064 /* Load the vtable slot, which contains a function descriptor. */
7065 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7067 NEW_BBLOCK (cfg, non_null_bb);
7069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7070 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7074 // FIXME: Make the wrapper use the preserveall cconv
7075 // FIXME: Use one icall per slot for small slot numbers ?
7076 icall_args [0] = vtable_ins;
7077 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7078 /* Make the icall return the vtable slot value to save some code space */
7079 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7080 ins->dreg = slot_reg;
7081 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7084 MONO_START_BB (cfg, non_null_bb);
7085 /* Load the address + arg from the vtable slot */
7086 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7089 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7092 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7094 * A simple interface call
7096 * We make a call through an imt slot to obtain the function descriptor we need to call.
7097 * The imt slot contains a function descriptor for a runtime function + arg.
7099 int slot_reg = alloc_preg (cfg);
7100 int addr_reg = alloc_preg (cfg);
7101 int arg_reg = alloc_preg (cfg);
7102 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7104 vtable_reg = alloc_preg (cfg);
7105 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7106 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7109 * The slot is already initialized when the vtable is created so there is no need
7113 /* Load the imt slot, which contains a function descriptor. */
7114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7116 /* Load the address + arg of the imt thunk from the imt slot */
7117 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7118 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7120 * IMT thunks in llvm-only mode are C functions which take an info argument
7121 * plus the imt method and return the ftndesc to call.
7123 icall_args [0] = thunk_arg_ins;
7124 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7125 cmethod, MONO_RGCTX_INFO_METHOD);
7126 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7128 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7131 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7133 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7134 * dynamically extended as more instantiations are discovered.
7135 * This handles generic virtual methods both on classes and interfaces.
7137 int slot_reg = alloc_preg (cfg);
7138 int addr_reg = alloc_preg (cfg);
7139 int arg_reg = alloc_preg (cfg);
7140 int ftndesc_reg = alloc_preg (cfg);
7141 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7142 MonoBasicBlock *slowpath_bb, *end_bb;
7144 NEW_BBLOCK (cfg, slowpath_bb);
7145 NEW_BBLOCK (cfg, end_bb);
7147 vtable_reg = alloc_preg (cfg);
7148 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7150 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7152 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7154 /* Load the slot, which contains a function descriptor. */
7155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7157 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7158 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7163 /* Same as with iface calls */
7164 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7165 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7166 icall_args [0] = thunk_arg_ins;
7167 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7168 cmethod, MONO_RGCTX_INFO_METHOD);
7169 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7170 ftndesc_ins->dreg = ftndesc_reg;
7172 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7173 * they don't know about yet. Fall back to the slowpath in that case.
7175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7181 MONO_START_BB (cfg, slowpath_bb);
7182 icall_args [0] = vtable_ins;
7183 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7184 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7185 cmethod, MONO_RGCTX_INFO_METHOD);
7187 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7189 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7190 ftndesc_ins->dreg = ftndesc_reg;
7191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7194 MONO_START_BB (cfg, end_bb);
7195 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7199 * Non-optimized cases
7201 icall_args [0] = sp [0];
7202 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7204 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7205 cmethod, MONO_RGCTX_INFO_METHOD);
7207 arg_reg = alloc_preg (cfg);
7208 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7209 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7211 g_assert (is_gsharedvt);
7213 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7215 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7218 * Pass the extra argument even if the callee doesn't receive it, most
7219 * calling conventions allow this.
7221 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7225 is_exception_class (MonoClass *klass)
7228 if (klass == mono_defaults.exception_class)
7230 klass = klass->parent;
7236 * is_jit_optimizer_disabled:
7238 * Determine whenever M's assembly has a DebuggableAttribute with the
7239 * IsJITOptimizerDisabled flag set.
7242 is_jit_optimizer_disabled (MonoMethod *m)
7245 MonoAssembly *ass = m->klass->image->assembly;
7246 MonoCustomAttrInfo* attrs;
7249 gboolean val = FALSE;
7252 if (ass->jit_optimizer_disabled_inited)
7253 return ass->jit_optimizer_disabled;
7255 klass = mono_class_try_get_debuggable_attribute_class ();
7259 ass->jit_optimizer_disabled = FALSE;
7260 mono_memory_barrier ();
7261 ass->jit_optimizer_disabled_inited = TRUE;
7265 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7266 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7268 for (i = 0; i < attrs->num_attrs; ++i) {
7269 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7271 MonoMethodSignature *sig;
7273 if (!attr->ctor || attr->ctor->klass != klass)
7275 /* Decode the attribute. See reflection.c */
7276 p = (const char*)attr->data;
7277 g_assert (read16 (p) == 0x0001);
7280 // FIXME: Support named parameters
7281 sig = mono_method_signature (attr->ctor);
7282 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7284 /* Two boolean arguments */
7288 mono_custom_attrs_free (attrs);
7291 ass->jit_optimizer_disabled = val;
7292 mono_memory_barrier ();
7293 ass->jit_optimizer_disabled_inited = TRUE;
7299 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7301 gboolean supported_tail_call;
7304 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7306 for (i = 0; i < fsig->param_count; ++i) {
7307 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7308 /* These can point to the current method's stack */
7309 supported_tail_call = FALSE;
7311 if (fsig->hasthis && cmethod->klass->valuetype)
7312 /* this might point to the current method's stack */
7313 supported_tail_call = FALSE;
7314 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7315 supported_tail_call = FALSE;
7316 if (cfg->method->save_lmf)
7317 supported_tail_call = FALSE;
7318 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7319 supported_tail_call = FALSE;
7320 if (call_opcode != CEE_CALL)
7321 supported_tail_call = FALSE;
7323 /* Debugging support */
7325 if (supported_tail_call) {
7326 if (!mono_debug_count ())
7327 supported_tail_call = FALSE;
7331 return supported_tail_call;
7337 * Handle calls made to ctors from NEWOBJ opcodes.
7340 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7341 MonoInst **sp, guint8 *ip, int *inline_costs)
7343 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7345 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7346 mono_method_is_generic_sharable (cmethod, TRUE)) {
7347 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7348 mono_class_vtable (cfg->domain, cmethod->klass);
7349 CHECK_TYPELOAD (cmethod->klass);
7351 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7352 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7355 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7356 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7358 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7360 CHECK_TYPELOAD (cmethod->klass);
7361 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7366 /* Avoid virtual calls to ctors if possible */
7367 if (mono_class_is_marshalbyref (cmethod->klass))
7368 callvirt_this_arg = sp [0];
7370 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7371 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7372 CHECK_CFG_EXCEPTION;
7373 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7374 mono_method_check_inlining (cfg, cmethod) &&
7375 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7378 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7379 cfg->real_offset += 5;
7381 *inline_costs += costs - 5;
7383 INLINE_FAILURE ("inline failure");
7384 // FIXME-VT: Clean this up
7385 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7386 GSHAREDVT_FAILURE(*ip);
7387 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7389 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7392 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7394 if (cfg->llvm_only) {
7395 // FIXME: Avoid initializing vtable_arg
7396 emit_llvmonly_calli (cfg, fsig, sp, addr);
7398 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7400 } else if (context_used &&
7401 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7402 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7403 MonoInst *cmethod_addr;
7405 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7407 if (cfg->llvm_only) {
7408 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7409 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7410 emit_llvmonly_calli (cfg, fsig, sp, addr);
7412 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7413 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7415 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7418 INLINE_FAILURE ("ctor call");
7419 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7420 callvirt_this_arg, NULL, vtable_arg);
7427 emit_setret (MonoCompile *cfg, MonoInst *val)
7429 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7432 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7435 if (!cfg->vret_addr) {
7436 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7438 EMIT_NEW_RETLOADA (cfg, ret_addr);
7440 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7441 ins->klass = mono_class_from_mono_type (ret_type);
7444 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7445 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7446 MonoInst *iargs [1];
7450 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7451 mono_arch_emit_setret (cfg, cfg->method, conv);
7453 mono_arch_emit_setret (cfg, cfg->method, val);
7456 mono_arch_emit_setret (cfg, cfg->method, val);
7462 * mono_method_to_ir:
7464 * Translate the .net IL into linear IR.
7466 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7467 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7468 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7469 * @inline_args: if not NULL, contains the arguments to the inline call
7470 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7471 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7473 * This method is used to turn ECMA IL into Mono's internal Linear IR
7474 * reprensetation. It is used both for entire methods, as well as
7475 * inlining existing methods. In the former case, the @start_bblock,
7476 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7477 * inline_offset is set to zero.
7479 * Returns: the inline cost, or -1 if there was an error processing this method.
7482 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7483 MonoInst *return_var, MonoInst **inline_args,
7484 guint inline_offset, gboolean is_virtual_call)
7487 MonoInst *ins, **sp, **stack_start;
7488 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7489 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7490 MonoMethod *cmethod, *method_definition;
7491 MonoInst **arg_array;
7492 MonoMethodHeader *header;
7494 guint32 token, ins_flag;
7496 MonoClass *constrained_class = NULL;
7497 unsigned char *ip, *end, *target, *err_pos;
7498 MonoMethodSignature *sig;
7499 MonoGenericContext *generic_context = NULL;
7500 MonoGenericContainer *generic_container = NULL;
7501 MonoType **param_types;
7502 int i, n, start_new_bblock, dreg;
7503 int num_calls = 0, inline_costs = 0;
7504 int breakpoint_id = 0;
7506 GSList *class_inits = NULL;
7507 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7509 gboolean init_locals, seq_points, skip_dead_blocks;
7510 gboolean sym_seq_points = FALSE;
7511 MonoDebugMethodInfo *minfo;
7512 MonoBitSet *seq_point_locs = NULL;
7513 MonoBitSet *seq_point_set_locs = NULL;
7515 cfg->disable_inline = is_jit_optimizer_disabled (method);
7517 /* serialization and xdomain stuff may need access to private fields and methods */
7518 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7519 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7520 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7521 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7522 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7523 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7525 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7526 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7527 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7528 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7529 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7531 image = method->klass->image;
7532 header = mono_method_get_header_checked (method, &cfg->error);
7534 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7535 goto exception_exit;
7537 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7540 generic_container = mono_method_get_generic_container (method);
7541 sig = mono_method_signature (method);
7542 num_args = sig->hasthis + sig->param_count;
7543 ip = (unsigned char*)header->code;
7544 cfg->cil_start = ip;
7545 end = ip + header->code_size;
7546 cfg->stat_cil_code_size += header->code_size;
7548 seq_points = cfg->gen_seq_points && cfg->method == method;
7550 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7551 /* We could hit a seq point before attaching to the JIT (#8338) */
7555 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7556 minfo = mono_debug_lookup_method (method);
7558 MonoSymSeqPoint *sps;
7559 int i, n_il_offsets;
7561 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7562 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7563 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7564 sym_seq_points = TRUE;
7565 for (i = 0; i < n_il_offsets; ++i) {
7566 if (sps [i].il_offset < header->code_size)
7567 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7571 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7573 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7575 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7576 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7578 mono_debug_free_method_async_debug_info (asyncMethod);
7580 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7581 /* Methods without line number info like auto-generated property accessors */
7582 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7583 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7584 sym_seq_points = TRUE;
7589 * Methods without init_locals set could cause asserts in various passes
7590 * (#497220). To work around this, we emit dummy initialization opcodes
7591 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7592 * on some platforms.
7594 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7595 init_locals = header->init_locals;
7599 method_definition = method;
7600 while (method_definition->is_inflated) {
7601 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7602 method_definition = imethod->declaring;
7605 /* SkipVerification is not allowed if core-clr is enabled */
7606 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7608 dont_verify_stloc = TRUE;
7611 if (sig->is_inflated)
7612 generic_context = mono_method_get_context (method);
7613 else if (generic_container)
7614 generic_context = &generic_container->context;
7615 cfg->generic_context = generic_context;
7618 g_assert (!sig->has_type_parameters);
7620 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7621 g_assert (method->is_inflated);
7622 g_assert (mono_method_get_context (method)->method_inst);
7624 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7625 g_assert (sig->generic_param_count);
7627 if (cfg->method == method) {
7628 cfg->real_offset = 0;
7630 cfg->real_offset = inline_offset;
7633 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7634 cfg->cil_offset_to_bb_len = header->code_size;
7636 cfg->current_method = method;
7638 if (cfg->verbose_level > 2)
7639 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7641 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7643 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7644 for (n = 0; n < sig->param_count; ++n)
7645 param_types [n + sig->hasthis] = sig->params [n];
7646 cfg->arg_types = param_types;
7648 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7649 if (cfg->method == method) {
7651 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7652 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7655 NEW_BBLOCK (cfg, start_bblock);
7656 cfg->bb_entry = start_bblock;
7657 start_bblock->cil_code = NULL;
7658 start_bblock->cil_length = 0;
7661 NEW_BBLOCK (cfg, end_bblock);
7662 cfg->bb_exit = end_bblock;
7663 end_bblock->cil_code = NULL;
7664 end_bblock->cil_length = 0;
7665 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7666 g_assert (cfg->num_bblocks == 2);
7668 arg_array = cfg->args;
7670 if (header->num_clauses) {
7671 cfg->spvars = g_hash_table_new (NULL, NULL);
7672 cfg->exvars = g_hash_table_new (NULL, NULL);
7674 /* handle exception clauses */
7675 for (i = 0; i < header->num_clauses; ++i) {
7676 MonoBasicBlock *try_bb;
7677 MonoExceptionClause *clause = &header->clauses [i];
7678 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7680 try_bb->real_offset = clause->try_offset;
7681 try_bb->try_start = TRUE;
7682 try_bb->region = ((i + 1) << 8) | clause->flags;
7683 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7684 tblock->real_offset = clause->handler_offset;
7685 tblock->flags |= BB_EXCEPTION_HANDLER;
7688 * Linking the try block with the EH block hinders inlining as we won't be able to
7689 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7691 if (COMPILE_LLVM (cfg))
7692 link_bblock (cfg, try_bb, tblock);
7694 if (*(ip + clause->handler_offset) == CEE_POP)
7695 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7697 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7698 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7699 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7700 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7701 MONO_ADD_INS (tblock, ins);
7703 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7704 /* finally clauses already have a seq point */
7705 /* seq points for filter clauses are emitted below */
7706 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7707 MONO_ADD_INS (tblock, ins);
7710 /* todo: is a fault block unsafe to optimize? */
7711 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7712 tblock->flags |= BB_EXCEPTION_UNSAFE;
7715 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7717 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7719 /* catch and filter blocks get the exception object on the stack */
7720 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7721 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7723 /* mostly like handle_stack_args (), but just sets the input args */
7724 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7725 tblock->in_scount = 1;
7726 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7727 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7731 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7732 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7733 if (!cfg->compile_llvm) {
7734 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7735 ins->dreg = tblock->in_stack [0]->dreg;
7736 MONO_ADD_INS (tblock, ins);
7739 MonoInst *dummy_use;
7742 * Add a dummy use for the exvar so its liveness info will be
7745 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7748 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7749 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7750 MONO_ADD_INS (tblock, ins);
7753 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7754 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7755 tblock->flags |= BB_EXCEPTION_HANDLER;
7756 tblock->real_offset = clause->data.filter_offset;
7757 tblock->in_scount = 1;
7758 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7759 /* The filter block shares the exvar with the handler block */
7760 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7761 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7762 MONO_ADD_INS (tblock, ins);
7766 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7767 clause->data.catch_class &&
7769 mono_class_check_context_used (clause->data.catch_class)) {
7771 * In shared generic code with catch
7772 * clauses containing type variables
7773 * the exception handling code has to
7774 * be able to get to the rgctx.
7775 * Therefore we have to make sure that
7776 * the vtable/mrgctx argument (for
7777 * static or generic methods) or the
7778 * "this" argument (for non-static
7779 * methods) are live.
7781 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7782 mini_method_get_context (method)->method_inst ||
7783 method->klass->valuetype) {
7784 mono_get_vtable_var (cfg);
7786 MonoInst *dummy_use;
7788 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7793 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7794 cfg->cbb = start_bblock;
7795 cfg->args = arg_array;
7796 mono_save_args (cfg, sig, inline_args);
7799 /* FIRST CODE BLOCK */
7800 NEW_BBLOCK (cfg, tblock);
7801 tblock->cil_code = ip;
7805 ADD_BBLOCK (cfg, tblock);
7807 if (cfg->method == method) {
7808 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7809 if (breakpoint_id) {
7810 MONO_INST_NEW (cfg, ins, OP_BREAK);
7811 MONO_ADD_INS (cfg->cbb, ins);
7815 /* we use a separate basic block for the initialization code */
7816 NEW_BBLOCK (cfg, init_localsbb);
7817 if (cfg->method == method)
7818 cfg->bb_init = init_localsbb;
7819 init_localsbb->real_offset = cfg->real_offset;
7820 start_bblock->next_bb = init_localsbb;
7821 init_localsbb->next_bb = cfg->cbb;
7822 link_bblock (cfg, start_bblock, init_localsbb);
7823 link_bblock (cfg, init_localsbb, cfg->cbb);
7825 cfg->cbb = init_localsbb;
7827 if (cfg->gsharedvt && cfg->method == method) {
7828 MonoGSharedVtMethodInfo *info;
7829 MonoInst *var, *locals_var;
7832 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7833 info->method = cfg->method;
7834 info->count_entries = 16;
7835 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7836 cfg->gsharedvt_info = info;
7838 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7839 /* prevent it from being register allocated */
7840 //var->flags |= MONO_INST_VOLATILE;
7841 cfg->gsharedvt_info_var = var;
7843 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7844 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7846 /* Allocate locals */
7847 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7848 /* prevent it from being register allocated */
7849 //locals_var->flags |= MONO_INST_VOLATILE;
7850 cfg->gsharedvt_locals_var = locals_var;
7852 dreg = alloc_ireg (cfg);
7853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7855 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7856 ins->dreg = locals_var->dreg;
7858 MONO_ADD_INS (cfg->cbb, ins);
7859 cfg->gsharedvt_locals_var_ins = ins;
7861 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7864 ins->flags |= MONO_INST_INIT;
7868 if (mono_security_core_clr_enabled ()) {
7869 /* check if this is native code, e.g. an icall or a p/invoke */
7870 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7871 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7873 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7874 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7876 /* if this ia a native call then it can only be JITted from platform code */
7877 if ((icall || pinvk) && method->klass && method->klass->image) {
7878 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7879 MonoException *ex = icall ? mono_get_exception_security () :
7880 mono_get_exception_method_access ();
7881 emit_throw_exception (cfg, ex);
7888 CHECK_CFG_EXCEPTION;
7890 if (header->code_size == 0)
7893 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7898 if (cfg->method == method)
7899 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7901 for (n = 0; n < header->num_locals; ++n) {
7902 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7907 /* We force the vtable variable here for all shared methods
7908 for the possibility that they might show up in a stack
7909 trace where their exact instantiation is needed. */
7910 if (cfg->gshared && method == cfg->method) {
7911 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7912 mini_method_get_context (method)->method_inst ||
7913 method->klass->valuetype) {
7914 mono_get_vtable_var (cfg);
7916 /* FIXME: Is there a better way to do this?
7917 We need the variable live for the duration
7918 of the whole method. */
7919 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7923 /* add a check for this != NULL to inlined methods */
7924 if (is_virtual_call) {
7927 NEW_ARGLOAD (cfg, arg_ins, 0);
7928 MONO_ADD_INS (cfg->cbb, arg_ins);
7929 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7932 skip_dead_blocks = !dont_verify;
7933 if (skip_dead_blocks) {
7934 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7939 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7940 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7943 start_new_bblock = 0;
7945 if (cfg->method == method)
7946 cfg->real_offset = ip - header->code;
7948 cfg->real_offset = inline_offset;
7953 if (start_new_bblock) {
7954 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7955 if (start_new_bblock == 2) {
7956 g_assert (ip == tblock->cil_code);
7958 GET_BBLOCK (cfg, tblock, ip);
7960 cfg->cbb->next_bb = tblock;
7962 start_new_bblock = 0;
7963 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7964 if (cfg->verbose_level > 3)
7965 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7966 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7970 g_slist_free (class_inits);
7973 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7974 link_bblock (cfg, cfg->cbb, tblock);
7975 if (sp != stack_start) {
7976 handle_stack_args (cfg, stack_start, sp - stack_start);
7978 CHECK_UNVERIFIABLE (cfg);
7980 cfg->cbb->next_bb = tblock;
7982 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7983 if (cfg->verbose_level > 3)
7984 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7985 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7988 g_slist_free (class_inits);
7993 if (skip_dead_blocks) {
7994 int ip_offset = ip - header->code;
7996 if (ip_offset == bb->end)
8000 int op_size = mono_opcode_size (ip, end);
8001 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8003 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8005 if (ip_offset + op_size == bb->end) {
8006 MONO_INST_NEW (cfg, ins, OP_NOP);
8007 MONO_ADD_INS (cfg->cbb, ins);
8008 start_new_bblock = 1;
8016 * Sequence points are points where the debugger can place a breakpoint.
8017 * Currently, we generate these automatically at points where the IL
8020 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8022 * Make methods interruptable at the beginning, and at the targets of
8023 * backward branches.
8024 * Also, do this at the start of every bblock in methods with clauses too,
8025 * to be able to handle instructions with inprecise control flow like
8027 * Backward branches are handled at the end of method-to-ir ().
8029 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8030 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8032 /* Avoid sequence points on empty IL like .volatile */
8033 // FIXME: Enable this
8034 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8035 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8036 if ((sp != stack_start) && !sym_seq_point)
8037 ins->flags |= MONO_INST_NONEMPTY_STACK;
8038 MONO_ADD_INS (cfg->cbb, ins);
8041 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8044 cfg->cbb->real_offset = cfg->real_offset;
8046 if ((cfg->method == method) && cfg->coverage_info) {
8047 guint32 cil_offset = ip - header->code;
8048 cfg->coverage_info->data [cil_offset].cil_code = ip;
8050 /* TODO: Use an increment here */
8051 #if defined(TARGET_X86)
8052 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8053 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8055 MONO_ADD_INS (cfg->cbb, ins);
8057 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8058 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8062 if (cfg->verbose_level > 3)
8063 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8067 if (seq_points && !sym_seq_points && sp != stack_start) {
8069 * The C# compiler uses these nops to notify the JIT that it should
8070 * insert seq points.
8072 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8073 MONO_ADD_INS (cfg->cbb, ins);
8075 if (cfg->keep_cil_nops)
8076 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8078 MONO_INST_NEW (cfg, ins, OP_NOP);
8080 MONO_ADD_INS (cfg->cbb, ins);
8083 if (should_insert_brekpoint (cfg->method)) {
8084 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8086 MONO_INST_NEW (cfg, ins, OP_NOP);
8089 MONO_ADD_INS (cfg->cbb, ins);
8095 CHECK_STACK_OVF (1);
8096 n = (*ip)-CEE_LDARG_0;
8098 EMIT_NEW_ARGLOAD (cfg, ins, n);
8106 CHECK_STACK_OVF (1);
8107 n = (*ip)-CEE_LDLOC_0;
8109 EMIT_NEW_LOCLOAD (cfg, ins, n);
8118 n = (*ip)-CEE_STLOC_0;
8121 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8123 emit_stloc_ir (cfg, sp, header, n);
8130 CHECK_STACK_OVF (1);
8133 EMIT_NEW_ARGLOAD (cfg, ins, n);
8139 CHECK_STACK_OVF (1);
8142 NEW_ARGLOADA (cfg, ins, n);
8143 MONO_ADD_INS (cfg->cbb, ins);
8153 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8155 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8160 CHECK_STACK_OVF (1);
8163 EMIT_NEW_LOCLOAD (cfg, ins, n);
8167 case CEE_LDLOCA_S: {
8168 unsigned char *tmp_ip;
8170 CHECK_STACK_OVF (1);
8171 CHECK_LOCAL (ip [1]);
8173 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8179 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8188 CHECK_LOCAL (ip [1]);
8189 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8191 emit_stloc_ir (cfg, sp, header, ip [1]);
8196 CHECK_STACK_OVF (1);
8197 EMIT_NEW_PCONST (cfg, ins, NULL);
8198 ins->type = STACK_OBJ;
8203 CHECK_STACK_OVF (1);
8204 EMIT_NEW_ICONST (cfg, ins, -1);
8217 CHECK_STACK_OVF (1);
8218 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8224 CHECK_STACK_OVF (1);
8226 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8232 CHECK_STACK_OVF (1);
8233 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8239 CHECK_STACK_OVF (1);
8240 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8241 ins->type = STACK_I8;
8242 ins->dreg = alloc_dreg (cfg, STACK_I8);
8244 ins->inst_l = (gint64)read64 (ip);
8245 MONO_ADD_INS (cfg->cbb, ins);
8251 gboolean use_aotconst = FALSE;
8253 #ifdef TARGET_POWERPC
8254 /* FIXME: Clean this up */
8255 if (cfg->compile_aot)
8256 use_aotconst = TRUE;
8259 /* FIXME: we should really allocate this only late in the compilation process */
8260 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8262 CHECK_STACK_OVF (1);
8268 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8270 dreg = alloc_freg (cfg);
8271 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8272 ins->type = cfg->r4_stack_type;
8274 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8275 ins->type = cfg->r4_stack_type;
8276 ins->dreg = alloc_dreg (cfg, STACK_R8);
8278 MONO_ADD_INS (cfg->cbb, ins);
8288 gboolean use_aotconst = FALSE;
8290 #ifdef TARGET_POWERPC
8291 /* FIXME: Clean this up */
8292 if (cfg->compile_aot)
8293 use_aotconst = TRUE;
8296 /* FIXME: we should really allocate this only late in the compilation process */
8297 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8299 CHECK_STACK_OVF (1);
8305 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8307 dreg = alloc_freg (cfg);
8308 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8309 ins->type = STACK_R8;
8311 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8312 ins->type = STACK_R8;
8313 ins->dreg = alloc_dreg (cfg, STACK_R8);
8315 MONO_ADD_INS (cfg->cbb, ins);
8324 MonoInst *temp, *store;
8326 CHECK_STACK_OVF (1);
8330 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8331 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8333 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8336 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8349 if (sp [0]->type == STACK_R8)
8350 /* we need to pop the value from the x86 FP stack */
8351 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8356 MonoMethodSignature *fsig;
8359 INLINE_FAILURE ("jmp");
8360 GSHAREDVT_FAILURE (*ip);
8363 if (stack_start != sp)
8365 token = read32 (ip + 1);
8366 /* FIXME: check the signature matches */
8367 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8370 if (cfg->gshared && mono_method_check_context_used (cmethod))
8371 GENERIC_SHARING_FAILURE (CEE_JMP);
8373 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8375 fsig = mono_method_signature (cmethod);
8376 n = fsig->param_count + fsig->hasthis;
8377 if (cfg->llvm_only) {
8380 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8381 for (i = 0; i < n; ++i)
8382 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8383 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8385 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8386 * have to emit a normal return since llvm expects it.
8389 emit_setret (cfg, ins);
8390 MONO_INST_NEW (cfg, ins, OP_BR);
8391 ins->inst_target_bb = end_bblock;
8392 MONO_ADD_INS (cfg->cbb, ins);
8393 link_bblock (cfg, cfg->cbb, end_bblock);
8396 } else if (cfg->backend->have_op_tail_call) {
8397 /* Handle tail calls similarly to calls */
8400 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8401 call->method = cmethod;
8402 call->tail_call = TRUE;
8403 call->signature = mono_method_signature (cmethod);
8404 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8405 call->inst.inst_p0 = cmethod;
8406 for (i = 0; i < n; ++i)
8407 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8409 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8410 call->vret_var = cfg->vret_addr;
8412 mono_arch_emit_call (cfg, call);
8413 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8414 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8416 for (i = 0; i < num_args; ++i)
8417 /* Prevent arguments from being optimized away */
8418 arg_array [i]->flags |= MONO_INST_VOLATILE;
8420 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8421 ins = (MonoInst*)call;
8422 ins->inst_p0 = cmethod;
8423 MONO_ADD_INS (cfg->cbb, ins);
8427 start_new_bblock = 1;
8432 MonoMethodSignature *fsig;
8435 token = read32 (ip + 1);
8439 //GSHAREDVT_FAILURE (*ip);
8444 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8447 if (method->dynamic && fsig->pinvoke) {
8451 * This is a call through a function pointer using a pinvoke
8452 * signature. Have to create a wrapper and call that instead.
8453 * FIXME: This is very slow, need to create a wrapper at JIT time
8454 * instead based on the signature.
8456 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8457 EMIT_NEW_PCONST (cfg, args [1], fsig);
8459 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8462 n = fsig->param_count + fsig->hasthis;
8466 //g_assert (!virtual_ || fsig->hasthis);
8470 inline_costs += 10 * num_calls++;
8473 * Making generic calls out of gsharedvt methods.
8474 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8475 * patching gshared method addresses into a gsharedvt method.
8477 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8479 * We pass the address to the gsharedvt trampoline in the rgctx reg
8481 MonoInst *callee = addr;
8483 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8485 GSHAREDVT_FAILURE (*ip);
8489 GSHAREDVT_FAILURE (*ip);
8491 addr = emit_get_rgctx_sig (cfg, context_used,
8492 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8493 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8497 /* Prevent inlining of methods with indirect calls */
8498 INLINE_FAILURE ("indirect call");
8500 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8501 MonoJumpInfoType info_type;
8505 * Instead of emitting an indirect call, emit a direct call
8506 * with the contents of the aotconst as the patch info.
8508 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8509 info_type = (MonoJumpInfoType)addr->inst_c1;
8510 info_data = addr->inst_p0;
8512 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8513 info_data = addr->inst_right->inst_left;
8516 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8517 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8520 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8521 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8526 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8530 /* End of call, INS should contain the result of the call, if any */
8532 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8534 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8537 CHECK_CFG_EXCEPTION;
8541 constrained_class = NULL;
8545 case CEE_CALLVIRT: {
8546 MonoInst *addr = NULL;
8547 MonoMethodSignature *fsig = NULL;
8549 int virtual_ = *ip == CEE_CALLVIRT;
8550 gboolean pass_imt_from_rgctx = FALSE;
8551 MonoInst *imt_arg = NULL;
8552 MonoInst *keep_this_alive = NULL;
8553 gboolean pass_vtable = FALSE;
8554 gboolean pass_mrgctx = FALSE;
8555 MonoInst *vtable_arg = NULL;
8556 gboolean check_this = FALSE;
8557 gboolean supported_tail_call = FALSE;
8558 gboolean tail_call = FALSE;
8559 gboolean need_seq_point = FALSE;
8560 guint32 call_opcode = *ip;
8561 gboolean emit_widen = TRUE;
8562 gboolean push_res = TRUE;
8563 gboolean skip_ret = FALSE;
8564 gboolean delegate_invoke = FALSE;
8565 gboolean direct_icall = FALSE;
8566 gboolean constrained_partial_call = FALSE;
8567 MonoMethod *cil_method;
8570 token = read32 (ip + 1);
8574 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8577 cil_method = cmethod;
8579 if (constrained_class) {
8580 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8581 if (!mini_is_gsharedvt_klass (constrained_class)) {
8582 g_assert (!cmethod->klass->valuetype);
8583 if (!mini_type_is_reference (&constrained_class->byval_arg))
8584 constrained_partial_call = TRUE;
8588 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8589 if (cfg->verbose_level > 2)
8590 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8591 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8592 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8594 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8598 if (cfg->verbose_level > 2)
8599 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8601 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8603 * This is needed since get_method_constrained can't find
8604 * the method in klass representing a type var.
8605 * The type var is guaranteed to be a reference type in this
8608 if (!mini_is_gsharedvt_klass (constrained_class))
8609 g_assert (!cmethod->klass->valuetype);
8611 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8616 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8617 /* Use the corresponding method from the base type to avoid boxing */
8618 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8619 g_assert (base_type);
8620 constrained_class = mono_class_from_mono_type (base_type);
8621 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8626 if (!dont_verify && !cfg->skip_visibility) {
8627 MonoMethod *target_method = cil_method;
8628 if (method->is_inflated) {
8629 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8632 if (!mono_method_can_access_method (method_definition, target_method) &&
8633 !mono_method_can_access_method (method, cil_method))
8634 emit_method_access_failure (cfg, method, cil_method);
8637 if (mono_security_core_clr_enabled ())
8638 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8640 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8641 /* MS.NET seems to silently convert this to a callvirt */
8646 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8647 * converts to a callvirt.
8649 * tests/bug-515884.il is an example of this behavior
8651 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8652 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8653 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8657 if (!cmethod->klass->inited)
8658 if (!mono_class_init (cmethod->klass))
8659 TYPE_LOAD_ERROR (cmethod->klass);
8661 fsig = mono_method_signature (cmethod);
8664 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8665 mini_class_is_system_array (cmethod->klass)) {
8666 array_rank = cmethod->klass->rank;
8667 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8668 direct_icall = TRUE;
8669 } else if (fsig->pinvoke) {
8670 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8671 fsig = mono_method_signature (wrapper);
8672 } else if (constrained_class) {
8674 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8678 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8679 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8681 /* See code below */
8682 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8683 MonoBasicBlock *tbb;
8685 GET_BBLOCK (cfg, tbb, ip + 5);
8686 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8688 * We want to extend the try block to cover the call, but we can't do it if the
8689 * call is made directly since its followed by an exception check.
8691 direct_icall = FALSE;
8695 mono_save_token_info (cfg, image, token, cil_method);
8697 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8698 need_seq_point = TRUE;
8700 /* Don't support calls made using type arguments for now */
8702 if (cfg->gsharedvt) {
8703 if (mini_is_gsharedvt_signature (fsig))
8704 GSHAREDVT_FAILURE (*ip);
8708 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8709 g_assert_not_reached ();
8711 n = fsig->param_count + fsig->hasthis;
8713 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8717 g_assert (!mono_method_check_context_used (cmethod));
8721 //g_assert (!virtual_ || fsig->hasthis);
8726 * We have the `constrained.' prefix opcode.
8728 if (constrained_class) {
8729 if (mini_is_gsharedvt_klass (constrained_class)) {
8730 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8731 /* The 'Own method' case below */
8732 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8733 /* 'The type parameter is instantiated as a reference type' case below. */
8735 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8736 CHECK_CFG_EXCEPTION;
8742 if (constrained_partial_call) {
8743 gboolean need_box = TRUE;
8746 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8747 * called method is not known at compile time either. The called method could end up being
8748 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8749 * to box the receiver.
8750 * A simple solution would be to box always and make a normal virtual call, but that would
8751 * be bad performance wise.
8753 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8755 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8760 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8761 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8762 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8763 ins->klass = constrained_class;
8764 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8765 CHECK_CFG_EXCEPTION;
8766 } else if (need_box) {
8768 MonoBasicBlock *is_ref_bb, *end_bb;
8769 MonoInst *nonbox_call;
8772 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8774 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8775 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8777 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8779 NEW_BBLOCK (cfg, is_ref_bb);
8780 NEW_BBLOCK (cfg, end_bb);
8782 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8784 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8787 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8792 MONO_START_BB (cfg, is_ref_bb);
8793 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8794 ins->klass = constrained_class;
8795 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8796 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8800 MONO_START_BB (cfg, end_bb);
8803 nonbox_call->dreg = ins->dreg;
8806 g_assert (mono_class_is_interface (cmethod->klass));
8807 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8808 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8811 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8813 * The type parameter is instantiated as a valuetype,
8814 * but that type doesn't override the method we're
8815 * calling, so we need to box `this'.
8817 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8818 ins->klass = constrained_class;
8819 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8820 CHECK_CFG_EXCEPTION;
8821 } else if (!constrained_class->valuetype) {
8822 int dreg = alloc_ireg_ref (cfg);
8825 * The type parameter is instantiated as a reference
8826 * type. We have a managed pointer on the stack, so
8827 * we need to dereference it here.
8829 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8830 ins->type = STACK_OBJ;
8833 if (cmethod->klass->valuetype) {
8836 /* Interface method */
8839 mono_class_setup_vtable (constrained_class);
8840 CHECK_TYPELOAD (constrained_class);
8841 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8843 TYPE_LOAD_ERROR (constrained_class);
8844 slot = mono_method_get_vtable_slot (cmethod);
8846 TYPE_LOAD_ERROR (cmethod->klass);
8847 cmethod = constrained_class->vtable [ioffset + slot];
8849 if (cmethod->klass == mono_defaults.enum_class) {
8850 /* Enum implements some interfaces, so treat this as the first case */
8851 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8852 ins->klass = constrained_class;
8853 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8854 CHECK_CFG_EXCEPTION;
8859 constrained_class = NULL;
8862 if (check_call_signature (cfg, fsig, sp))
8865 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8866 delegate_invoke = TRUE;
8868 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8869 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8870 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8878 * If the callee is a shared method, then its static cctor
8879 * might not get called after the call was patched.
8881 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8882 emit_class_init (cfg, cmethod->klass);
8883 CHECK_TYPELOAD (cmethod->klass);
8886 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8889 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8891 context_used = mini_method_check_context_used (cfg, cmethod);
8893 if (context_used && mono_class_is_interface (cmethod->klass)) {
8894 /* Generic method interface
8895 calls are resolved via a
8896 helper function and don't
8898 if (!cmethod_context || !cmethod_context->method_inst)
8899 pass_imt_from_rgctx = TRUE;
8903 * If a shared method calls another
8904 * shared method then the caller must
8905 * have a generic sharing context
8906 * because the magic trampoline
8907 * requires it. FIXME: We shouldn't
8908 * have to force the vtable/mrgctx
8909 * variable here. Instead there
8910 * should be a flag in the cfg to
8911 * request a generic sharing context.
8914 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8915 mono_get_vtable_var (cfg);
8920 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8922 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8924 CHECK_TYPELOAD (cmethod->klass);
8925 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8930 g_assert (!vtable_arg);
8932 if (!cfg->compile_aot) {
8934 * emit_get_rgctx_method () calls mono_class_vtable () so check
8935 * for type load errors before.
8937 mono_class_setup_vtable (cmethod->klass);
8938 CHECK_TYPELOAD (cmethod->klass);
8941 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8943 /* !marshalbyref is needed to properly handle generic methods + remoting */
8944 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8945 MONO_METHOD_IS_FINAL (cmethod)) &&
8946 !mono_class_is_marshalbyref (cmethod->klass)) {
8953 if (pass_imt_from_rgctx) {
8954 g_assert (!pass_vtable);
8956 imt_arg = emit_get_rgctx_method (cfg, context_used,
8957 cmethod, MONO_RGCTX_INFO_METHOD);
8961 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8963 /* Calling virtual generic methods */
8964 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8965 !(MONO_METHOD_IS_FINAL (cmethod) &&
8966 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8967 fsig->generic_param_count &&
8968 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8970 MonoInst *this_temp, *this_arg_temp, *store;
8971 MonoInst *iargs [4];
8973 g_assert (fsig->is_inflated);
8975 /* Prevent inlining of methods that contain indirect calls */
8976 INLINE_FAILURE ("virtual generic call");
8978 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8979 GSHAREDVT_FAILURE (*ip);
8981 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8982 g_assert (!imt_arg);
8984 g_assert (cmethod->is_inflated);
8985 imt_arg = emit_get_rgctx_method (cfg, context_used,
8986 cmethod, MONO_RGCTX_INFO_METHOD);
8987 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8989 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8990 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8991 MONO_ADD_INS (cfg->cbb, store);
8993 /* FIXME: This should be a managed pointer */
8994 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8996 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8997 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8998 cmethod, MONO_RGCTX_INFO_METHOD);
8999 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9000 addr = mono_emit_jit_icall (cfg,
9001 mono_helper_compile_generic_method, iargs);
9003 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9005 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9012 * Implement a workaround for the inherent races involved in locking:
9018 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9019 * try block, the Exit () won't be executed, see:
9020 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9021 * To work around this, we extend such try blocks to include the last x bytes
9022 * of the Monitor.Enter () call.
9024 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9025 MonoBasicBlock *tbb;
9027 GET_BBLOCK (cfg, tbb, ip + 5);
9029 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9030 * from Monitor.Enter like ArgumentNullException.
9032 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9033 /* Mark this bblock as needing to be extended */
9034 tbb->extend_try_block = TRUE;
9038 /* Conversion to a JIT intrinsic */
9039 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9040 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9041 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9049 if ((cfg->opt & MONO_OPT_INLINE) &&
9050 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9051 mono_method_check_inlining (cfg, cmethod)) {
9053 gboolean always = FALSE;
9055 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9056 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9057 /* Prevent inlining of methods that call wrappers */
9058 INLINE_FAILURE ("wrapper call");
9059 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9063 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9065 cfg->real_offset += 5;
9067 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9068 /* *sp is already set by inline_method */
9073 inline_costs += costs;
9079 /* Tail recursion elimination */
9080 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9081 gboolean has_vtargs = FALSE;
9084 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9085 INLINE_FAILURE ("tail call");
9087 /* keep it simple */
9088 for (i = fsig->param_count - 1; i >= 0; i--) {
9089 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9094 if (need_seq_point) {
9095 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9096 need_seq_point = FALSE;
9098 for (i = 0; i < n; ++i)
9099 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9100 MONO_INST_NEW (cfg, ins, OP_BR);
9101 MONO_ADD_INS (cfg->cbb, ins);
9102 tblock = start_bblock->out_bb [0];
9103 link_bblock (cfg, cfg->cbb, tblock);
9104 ins->inst_target_bb = tblock;
9105 start_new_bblock = 1;
9107 /* skip the CEE_RET, too */
9108 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9115 inline_costs += 10 * num_calls++;
9118 * Synchronized wrappers.
9119 * Its hard to determine where to replace a method with its synchronized
9120 * wrapper without causing an infinite recursion. The current solution is
9121 * to add the synchronized wrapper in the trampolines, and to
9122 * change the called method to a dummy wrapper, and resolve that wrapper
9123 * to the real method in mono_jit_compile_method ().
9125 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9126 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9127 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9128 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9132 * Making generic calls out of gsharedvt methods.
9133 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9134 * patching gshared method addresses into a gsharedvt method.
9136 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9137 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9138 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9139 MonoRgctxInfoType info_type;
9142 //if (mono_class_is_interface (cmethod->klass))
9143 //GSHAREDVT_FAILURE (*ip);
9144 // disable for possible remoting calls
9145 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9146 GSHAREDVT_FAILURE (*ip);
9147 if (fsig->generic_param_count) {
9148 /* virtual generic call */
9149 g_assert (!imt_arg);
9150 /* Same as the virtual generic case above */
9151 imt_arg = emit_get_rgctx_method (cfg, context_used,
9152 cmethod, MONO_RGCTX_INFO_METHOD);
9153 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9155 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9156 /* This can happen when we call a fully instantiated iface method */
9157 imt_arg = emit_get_rgctx_method (cfg, context_used,
9158 cmethod, MONO_RGCTX_INFO_METHOD);
9163 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9164 keep_this_alive = sp [0];
9166 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9167 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9169 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9170 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9172 if (cfg->llvm_only) {
9173 // FIXME: Avoid initializing vtable_arg
9174 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9176 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9181 /* Generic sharing */
9184 * Use this if the callee is gsharedvt sharable too, since
9185 * at runtime we might find an instantiation so the call cannot
9186 * be patched (the 'no_patch' code path in mini-trampolines.c).
9188 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9189 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9190 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9191 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9192 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9193 INLINE_FAILURE ("gshared");
9195 g_assert (cfg->gshared && cmethod);
9199 * We are compiling a call to a
9200 * generic method from shared code,
9201 * which means that we have to look up
9202 * the method in the rgctx and do an
9206 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9208 if (cfg->llvm_only) {
9209 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9210 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9212 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9213 // FIXME: Avoid initializing imt_arg/vtable_arg
9214 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9216 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9217 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9222 /* Direct calls to icalls */
9224 MonoMethod *wrapper;
9227 /* Inline the wrapper */
9228 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9230 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9231 g_assert (costs > 0);
9232 cfg->real_offset += 5;
9234 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9235 /* *sp is already set by inline_method */
9240 inline_costs += costs;
9249 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9250 MonoInst *val = sp [fsig->param_count];
9252 if (val->type == STACK_OBJ) {
9253 MonoInst *iargs [2];
9258 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9261 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9263 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9264 mini_emit_write_barrier (cfg, addr, val);
9265 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9266 GSHAREDVT_FAILURE (*ip);
9267 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9268 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9271 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9272 if (!cmethod->klass->element_class->valuetype && !readonly)
9273 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9274 CHECK_TYPELOAD (cmethod->klass);
9277 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9280 g_assert_not_reached ();
9287 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9291 /* Tail prefix / tail call optimization */
9293 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9294 /* FIXME: runtime generic context pointer for jumps? */
9295 /* FIXME: handle this for generic sharing eventually */
9296 if ((ins_flag & MONO_INST_TAILCALL) &&
9297 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9298 supported_tail_call = TRUE;
9300 if (supported_tail_call) {
9303 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9304 INLINE_FAILURE ("tail call");
9306 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9308 if (cfg->backend->have_op_tail_call) {
9309 /* Handle tail calls similarly to normal calls */
9312 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9314 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9315 call->tail_call = TRUE;
9316 call->method = cmethod;
9317 call->signature = mono_method_signature (cmethod);
9320 * We implement tail calls by storing the actual arguments into the
9321 * argument variables, then emitting a CEE_JMP.
9323 for (i = 0; i < n; ++i) {
9324 /* Prevent argument from being register allocated */
9325 arg_array [i]->flags |= MONO_INST_VOLATILE;
9326 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9328 ins = (MonoInst*)call;
9329 ins->inst_p0 = cmethod;
9330 ins->inst_p1 = arg_array [0];
9331 MONO_ADD_INS (cfg->cbb, ins);
9332 link_bblock (cfg, cfg->cbb, end_bblock);
9333 start_new_bblock = 1;
9335 // FIXME: Eliminate unreachable epilogs
9338 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9339 * only reachable from this call.
9341 GET_BBLOCK (cfg, tblock, ip + 5);
9342 if (tblock == cfg->cbb || tblock->in_count == 0)
9351 * Virtual calls in llvm-only mode.
9353 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9354 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9359 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9360 INLINE_FAILURE ("call");
9361 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9362 imt_arg, vtable_arg);
9364 if (tail_call && !cfg->llvm_only) {
9365 link_bblock (cfg, cfg->cbb, end_bblock);
9366 start_new_bblock = 1;
9368 // FIXME: Eliminate unreachable epilogs
9371 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9372 * only reachable from this call.
9374 GET_BBLOCK (cfg, tblock, ip + 5);
9375 if (tblock == cfg->cbb || tblock->in_count == 0)
9382 /* End of call, INS should contain the result of the call, if any */
9384 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9387 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9392 if (keep_this_alive) {
9393 MonoInst *dummy_use;
9395 /* See mono_emit_method_call_full () */
9396 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9399 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9401 * Clang can convert these calls to tail calls which screw up the stack
9402 * walk. This happens even when the -fno-optimize-sibling-calls
9403 * option is passed to clang.
9404 * Work around this by emitting a dummy call.
9406 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9409 CHECK_CFG_EXCEPTION;
9413 g_assert (*ip == CEE_RET);
9417 constrained_class = NULL;
9419 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9423 if (cfg->method != method) {
9424 /* return from inlined method */
9426 * If in_count == 0, that means the ret is unreachable due to
9427 * being preceeded by a throw. In that case, inline_method () will
9428 * handle setting the return value
9429 * (test case: test_0_inline_throw ()).
9431 if (return_var && cfg->cbb->in_count) {
9432 MonoType *ret_type = mono_method_signature (method)->ret;
9438 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9441 //g_assert (returnvar != -1);
9442 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9443 cfg->ret_var_set = TRUE;
9446 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9448 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9452 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9454 if (seq_points && !sym_seq_points) {
9456 * Place a seq point here too even through the IL stack is not
9457 * empty, so a step over on
9460 * will work correctly.
9462 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9463 MONO_ADD_INS (cfg->cbb, ins);
9466 g_assert (!return_var);
9470 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9473 emit_setret (cfg, *sp);
9476 if (sp != stack_start)
9478 MONO_INST_NEW (cfg, ins, OP_BR);
9480 ins->inst_target_bb = end_bblock;
9481 MONO_ADD_INS (cfg->cbb, ins);
9482 link_bblock (cfg, cfg->cbb, end_bblock);
9483 start_new_bblock = 1;
9487 MONO_INST_NEW (cfg, ins, OP_BR);
9489 target = ip + 1 + (signed char)(*ip);
9491 GET_BBLOCK (cfg, tblock, target);
9492 link_bblock (cfg, cfg->cbb, tblock);
9493 ins->inst_target_bb = tblock;
9494 if (sp != stack_start) {
9495 handle_stack_args (cfg, stack_start, sp - stack_start);
9497 CHECK_UNVERIFIABLE (cfg);
9499 MONO_ADD_INS (cfg->cbb, ins);
9500 start_new_bblock = 1;
9501 inline_costs += BRANCH_COST;
9515 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9517 target = ip + 1 + *(signed char*)ip;
9523 inline_costs += BRANCH_COST;
9527 MONO_INST_NEW (cfg, ins, OP_BR);
9530 target = ip + 4 + (gint32)read32(ip);
9532 GET_BBLOCK (cfg, tblock, target);
9533 link_bblock (cfg, cfg->cbb, tblock);
9534 ins->inst_target_bb = tblock;
9535 if (sp != stack_start) {
9536 handle_stack_args (cfg, stack_start, sp - stack_start);
9538 CHECK_UNVERIFIABLE (cfg);
9541 MONO_ADD_INS (cfg->cbb, ins);
9543 start_new_bblock = 1;
9544 inline_costs += BRANCH_COST;
9551 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9552 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9553 guint32 opsize = is_short ? 1 : 4;
9555 CHECK_OPSIZE (opsize);
9557 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9560 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9565 GET_BBLOCK (cfg, tblock, target);
9566 link_bblock (cfg, cfg->cbb, tblock);
9567 GET_BBLOCK (cfg, tblock, ip);
9568 link_bblock (cfg, cfg->cbb, tblock);
9570 if (sp != stack_start) {
9571 handle_stack_args (cfg, stack_start, sp - stack_start);
9572 CHECK_UNVERIFIABLE (cfg);
9575 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9576 cmp->sreg1 = sp [0]->dreg;
9577 type_from_op (cfg, cmp, sp [0], NULL);
9580 #if SIZEOF_REGISTER == 4
9581 if (cmp->opcode == OP_LCOMPARE_IMM) {
9582 /* Convert it to OP_LCOMPARE */
9583 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9584 ins->type = STACK_I8;
9585 ins->dreg = alloc_dreg (cfg, STACK_I8);
9587 MONO_ADD_INS (cfg->cbb, ins);
9588 cmp->opcode = OP_LCOMPARE;
9589 cmp->sreg2 = ins->dreg;
9592 MONO_ADD_INS (cfg->cbb, cmp);
9594 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9595 type_from_op (cfg, ins, sp [0], NULL);
9596 MONO_ADD_INS (cfg->cbb, ins);
9597 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9598 GET_BBLOCK (cfg, tblock, target);
9599 ins->inst_true_bb = tblock;
9600 GET_BBLOCK (cfg, tblock, ip);
9601 ins->inst_false_bb = tblock;
9602 start_new_bblock = 2;
9605 inline_costs += BRANCH_COST;
9620 MONO_INST_NEW (cfg, ins, *ip);
9622 target = ip + 4 + (gint32)read32(ip);
9628 inline_costs += BRANCH_COST;
9632 MonoBasicBlock **targets;
9633 MonoBasicBlock *default_bblock;
9634 MonoJumpInfoBBTable *table;
9635 int offset_reg = alloc_preg (cfg);
9636 int target_reg = alloc_preg (cfg);
9637 int table_reg = alloc_preg (cfg);
9638 int sum_reg = alloc_preg (cfg);
9639 gboolean use_op_switch;
9643 n = read32 (ip + 1);
9646 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9650 CHECK_OPSIZE (n * sizeof (guint32));
9651 target = ip + n * sizeof (guint32);
9653 GET_BBLOCK (cfg, default_bblock, target);
9654 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9656 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9657 for (i = 0; i < n; ++i) {
9658 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9659 targets [i] = tblock;
9660 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9664 if (sp != stack_start) {
9666 * Link the current bb with the targets as well, so handle_stack_args
9667 * will set their in_stack correctly.
9669 link_bblock (cfg, cfg->cbb, default_bblock);
9670 for (i = 0; i < n; ++i)
9671 link_bblock (cfg, cfg->cbb, targets [i]);
9673 handle_stack_args (cfg, stack_start, sp - stack_start);
9675 CHECK_UNVERIFIABLE (cfg);
9677 /* Undo the links */
9678 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9679 for (i = 0; i < n; ++i)
9680 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9686 for (i = 0; i < n; ++i)
9687 link_bblock (cfg, cfg->cbb, targets [i]);
9689 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9690 table->table = targets;
9691 table->table_size = n;
9693 use_op_switch = FALSE;
9695 /* ARM implements SWITCH statements differently */
9696 /* FIXME: Make it use the generic implementation */
9697 if (!cfg->compile_aot)
9698 use_op_switch = TRUE;
9701 if (COMPILE_LLVM (cfg))
9702 use_op_switch = TRUE;
9704 cfg->cbb->has_jump_table = 1;
9706 if (use_op_switch) {
9707 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9708 ins->sreg1 = src1->dreg;
9709 ins->inst_p0 = table;
9710 ins->inst_many_bb = targets;
9711 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9712 MONO_ADD_INS (cfg->cbb, ins);
9714 if (sizeof (gpointer) == 8)
9715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9719 #if SIZEOF_REGISTER == 8
9720 /* The upper word might not be zero, and we add it to a 64 bit address later */
9721 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9724 if (cfg->compile_aot) {
9725 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9727 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9728 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9729 ins->inst_p0 = table;
9730 ins->dreg = table_reg;
9731 MONO_ADD_INS (cfg->cbb, ins);
9734 /* FIXME: Use load_memindex */
9735 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9737 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9739 start_new_bblock = 1;
9740 inline_costs += (BRANCH_COST * 2);
9760 dreg = alloc_freg (cfg);
9763 dreg = alloc_lreg (cfg);
9766 dreg = alloc_ireg_ref (cfg);
9769 dreg = alloc_preg (cfg);
9772 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9773 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9774 if (*ip == CEE_LDIND_R4)
9775 ins->type = cfg->r4_stack_type;
9776 ins->flags |= ins_flag;
9777 MONO_ADD_INS (cfg->cbb, ins);
9779 if (ins_flag & MONO_INST_VOLATILE) {
9780 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9781 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9797 if (ins_flag & MONO_INST_VOLATILE) {
9798 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9799 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9802 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9803 ins->flags |= ins_flag;
9806 MONO_ADD_INS (cfg->cbb, ins);
9808 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9809 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9818 MONO_INST_NEW (cfg, ins, (*ip));
9820 ins->sreg1 = sp [0]->dreg;
9821 ins->sreg2 = sp [1]->dreg;
9822 type_from_op (cfg, ins, sp [0], sp [1]);
9824 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9826 /* Use the immediate opcodes if possible */
9827 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9828 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9829 if (imm_opcode != -1) {
9830 ins->opcode = imm_opcode;
9831 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9834 NULLIFY_INS (sp [1]);
9838 MONO_ADD_INS ((cfg)->cbb, (ins));
9840 *sp++ = mono_decompose_opcode (cfg, ins);
9857 MONO_INST_NEW (cfg, ins, (*ip));
9859 ins->sreg1 = sp [0]->dreg;
9860 ins->sreg2 = sp [1]->dreg;
9861 type_from_op (cfg, ins, sp [0], sp [1]);
9863 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9864 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9866 /* FIXME: Pass opcode to is_inst_imm */
9868 /* Use the immediate opcodes if possible */
9869 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9870 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9871 if (imm_opcode != -1) {
9872 ins->opcode = imm_opcode;
9873 if (sp [1]->opcode == OP_I8CONST) {
9874 #if SIZEOF_REGISTER == 8
9875 ins->inst_imm = sp [1]->inst_l;
9877 ins->inst_ls_word = sp [1]->inst_ls_word;
9878 ins->inst_ms_word = sp [1]->inst_ms_word;
9882 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9885 /* Might be followed by an instruction added by add_widen_op */
9886 if (sp [1]->next == NULL)
9887 NULLIFY_INS (sp [1]);
9890 MONO_ADD_INS ((cfg)->cbb, (ins));
9892 *sp++ = mono_decompose_opcode (cfg, ins);
9905 case CEE_CONV_OVF_I8:
9906 case CEE_CONV_OVF_U8:
9910 /* Special case this earlier so we have long constants in the IR */
9911 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9912 int data = sp [-1]->inst_c0;
9913 sp [-1]->opcode = OP_I8CONST;
9914 sp [-1]->type = STACK_I8;
9915 #if SIZEOF_REGISTER == 8
9916 if ((*ip) == CEE_CONV_U8)
9917 sp [-1]->inst_c0 = (guint32)data;
9919 sp [-1]->inst_c0 = data;
9921 sp [-1]->inst_ls_word = data;
9922 if ((*ip) == CEE_CONV_U8)
9923 sp [-1]->inst_ms_word = 0;
9925 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9927 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9934 case CEE_CONV_OVF_I4:
9935 case CEE_CONV_OVF_I1:
9936 case CEE_CONV_OVF_I2:
9937 case CEE_CONV_OVF_I:
9938 case CEE_CONV_OVF_U:
9941 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9942 ADD_UNOP (CEE_CONV_OVF_I8);
9949 case CEE_CONV_OVF_U1:
9950 case CEE_CONV_OVF_U2:
9951 case CEE_CONV_OVF_U4:
9954 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9955 ADD_UNOP (CEE_CONV_OVF_U8);
9962 case CEE_CONV_OVF_I1_UN:
9963 case CEE_CONV_OVF_I2_UN:
9964 case CEE_CONV_OVF_I4_UN:
9965 case CEE_CONV_OVF_I8_UN:
9966 case CEE_CONV_OVF_U1_UN:
9967 case CEE_CONV_OVF_U2_UN:
9968 case CEE_CONV_OVF_U4_UN:
9969 case CEE_CONV_OVF_U8_UN:
9970 case CEE_CONV_OVF_I_UN:
9971 case CEE_CONV_OVF_U_UN:
9978 CHECK_CFG_EXCEPTION;
9982 case CEE_ADD_OVF_UN:
9984 case CEE_MUL_OVF_UN:
9986 case CEE_SUB_OVF_UN:
9992 GSHAREDVT_FAILURE (*ip);
9995 token = read32 (ip + 1);
9996 klass = mini_get_class (method, token, generic_context);
9997 CHECK_TYPELOAD (klass);
9999 if (generic_class_is_reference_type (cfg, klass)) {
10000 MonoInst *store, *load;
10001 int dreg = alloc_ireg_ref (cfg);
10003 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10004 load->flags |= ins_flag;
10005 MONO_ADD_INS (cfg->cbb, load);
10007 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10008 store->flags |= ins_flag;
10009 MONO_ADD_INS (cfg->cbb, store);
10011 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10012 mini_emit_write_barrier (cfg, sp [0], sp [1]);
10014 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10020 int loc_index = -1;
10026 token = read32 (ip + 1);
10027 klass = mini_get_class (method, token, generic_context);
10028 CHECK_TYPELOAD (klass);
10030 /* Optimize the common ldobj+stloc combination */
10033 loc_index = ip [6];
10040 loc_index = ip [5] - CEE_STLOC_0;
10047 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10048 CHECK_LOCAL (loc_index);
10050 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10051 ins->dreg = cfg->locals [loc_index]->dreg;
10052 ins->flags |= ins_flag;
10055 if (ins_flag & MONO_INST_VOLATILE) {
10056 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10057 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10063 /* Optimize the ldobj+stobj combination */
10064 /* The reference case ends up being a load+store anyway */
10065 /* Skip this if the operation is volatile. */
10066 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10071 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10078 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10079 ins->flags |= ins_flag;
10082 if (ins_flag & MONO_INST_VOLATILE) {
10083 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10084 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10093 CHECK_STACK_OVF (1);
10095 n = read32 (ip + 1);
10097 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10098 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10099 ins->type = STACK_OBJ;
10102 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10103 MonoInst *iargs [1];
10104 char *str = (char *)mono_method_get_wrapper_data (method, n);
10106 if (cfg->compile_aot)
10107 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10109 EMIT_NEW_PCONST (cfg, iargs [0], str);
10110 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10112 if (cfg->opt & MONO_OPT_SHARED) {
10113 MonoInst *iargs [3];
10115 if (cfg->compile_aot) {
10116 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10118 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10119 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10120 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10121 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10122 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10125 if (cfg->cbb->out_of_line) {
10126 MonoInst *iargs [2];
10128 if (image == mono_defaults.corlib) {
10130 * Avoid relocations in AOT and save some space by using a
10131 * version of helper_ldstr specialized to mscorlib.
10133 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10134 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10136 /* Avoid creating the string object */
10137 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10138 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10139 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10143 if (cfg->compile_aot) {
10144 NEW_LDSTRCONST (cfg, ins, image, n);
10146 MONO_ADD_INS (cfg->cbb, ins);
10149 NEW_PCONST (cfg, ins, NULL);
10150 ins->type = STACK_OBJ;
10151 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10155 OUT_OF_MEMORY_FAILURE;
10158 MONO_ADD_INS (cfg->cbb, ins);
10167 MonoInst *iargs [2];
10168 MonoMethodSignature *fsig;
10171 MonoInst *vtable_arg = NULL;
10174 token = read32 (ip + 1);
10175 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10178 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10181 mono_save_token_info (cfg, image, token, cmethod);
10183 if (!mono_class_init (cmethod->klass))
10184 TYPE_LOAD_ERROR (cmethod->klass);
10186 context_used = mini_method_check_context_used (cfg, cmethod);
10188 if (mono_security_core_clr_enabled ())
10189 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10191 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10192 emit_class_init (cfg, cmethod->klass);
10193 CHECK_TYPELOAD (cmethod->klass);
10197 if (cfg->gsharedvt) {
10198 if (mini_is_gsharedvt_variable_signature (sig))
10199 GSHAREDVT_FAILURE (*ip);
10203 n = fsig->param_count;
10207 * Generate smaller code for the common newobj <exception> instruction in
10208 * argument checking code.
10210 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10211 is_exception_class (cmethod->klass) && n <= 2 &&
10212 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10213 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10214 MonoInst *iargs [3];
10218 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10221 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10224 iargs [1] = sp [0];
10225 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10228 iargs [1] = sp [0];
10229 iargs [2] = sp [1];
10230 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10233 g_assert_not_reached ();
10241 /* move the args to allow room for 'this' in the first position */
10247 /* check_call_signature () requires sp[0] to be set */
10248 this_ins.type = STACK_OBJ;
10249 sp [0] = &this_ins;
10250 if (check_call_signature (cfg, fsig, sp))
10255 if (mini_class_is_system_array (cmethod->klass)) {
10256 *sp = emit_get_rgctx_method (cfg, context_used,
10257 cmethod, MONO_RGCTX_INFO_METHOD);
10259 /* Avoid varargs in the common case */
10260 if (fsig->param_count == 1)
10261 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10262 else if (fsig->param_count == 2)
10263 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10264 else if (fsig->param_count == 3)
10265 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10266 else if (fsig->param_count == 4)
10267 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10269 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10270 } else if (cmethod->string_ctor) {
10271 g_assert (!context_used);
10272 g_assert (!vtable_arg);
10273 /* we simply pass a null pointer */
10274 EMIT_NEW_PCONST (cfg, *sp, NULL);
10275 /* now call the string ctor */
10276 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10278 if (cmethod->klass->valuetype) {
10279 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10280 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10281 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10286 * The code generated by mini_emit_virtual_call () expects
10287 * iargs [0] to be a boxed instance, but luckily the vcall
10288 * will be transformed into a normal call there.
10290 } else if (context_used) {
10291 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10294 MonoVTable *vtable = NULL;
10296 if (!cfg->compile_aot)
10297 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10298 CHECK_TYPELOAD (cmethod->klass);
10301 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10302 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10303 * As a workaround, we call class cctors before allocating objects.
10305 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10306 emit_class_init (cfg, cmethod->klass);
10307 if (cfg->verbose_level > 2)
10308 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10309 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10312 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10315 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10318 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10320 /* Now call the actual ctor */
10321 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10322 CHECK_CFG_EXCEPTION;
10325 if (alloc == NULL) {
10327 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10328 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10336 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10337 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10340 case CEE_CASTCLASS:
10345 token = read32 (ip + 1);
10346 klass = mini_get_class (method, token, generic_context);
10347 CHECK_TYPELOAD (klass);
10348 if (sp [0]->type != STACK_OBJ)
10351 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10352 ins->dreg = alloc_preg (cfg);
10353 ins->sreg1 = (*sp)->dreg;
10354 ins->klass = klass;
10355 ins->type = STACK_OBJ;
10356 MONO_ADD_INS (cfg->cbb, ins);
10358 CHECK_CFG_EXCEPTION;
10362 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10365 case CEE_UNBOX_ANY: {
10366 MonoInst *res, *addr;
10371 token = read32 (ip + 1);
10372 klass = mini_get_class (method, token, generic_context);
10373 CHECK_TYPELOAD (klass);
10375 mono_save_token_info (cfg, image, token, klass);
10377 context_used = mini_class_check_context_used (cfg, klass);
10379 if (mini_is_gsharedvt_klass (klass)) {
10380 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10382 } else if (generic_class_is_reference_type (cfg, klass)) {
10383 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10384 EMIT_NEW_PCONST (cfg, res, NULL);
10385 res->type = STACK_OBJ;
10387 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10388 res->dreg = alloc_preg (cfg);
10389 res->sreg1 = (*sp)->dreg;
10390 res->klass = klass;
10391 res->type = STACK_OBJ;
10392 MONO_ADD_INS (cfg->cbb, res);
10393 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10395 } else if (mono_class_is_nullable (klass)) {
10396 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10398 addr = handle_unbox (cfg, klass, sp, context_used);
10400 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10411 MonoClass *enum_class;
10412 MonoMethod *has_flag;
10418 token = read32 (ip + 1);
10419 klass = mini_get_class (method, token, generic_context);
10420 CHECK_TYPELOAD (klass);
10422 mono_save_token_info (cfg, image, token, klass);
10424 context_used = mini_class_check_context_used (cfg, klass);
10426 if (generic_class_is_reference_type (cfg, klass)) {
10432 if (klass == mono_defaults.void_class)
10434 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10436 /* frequent check in generic code: box (struct), brtrue */
10441 * <push int/long ptr>
10444 * constrained. MyFlags
10445 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10447 * If we find this sequence and the operand types on box and constrained
10448 * are equal, we can emit a specialized instruction sequence instead of
10449 * the very slow HasFlag () call.
10451 if ((cfg->opt & MONO_OPT_INTRINS) &&
10452 /* Cheap checks first. */
10453 ip + 5 + 6 + 5 < end &&
10454 ip [5] == CEE_PREFIX1 &&
10455 ip [6] == CEE_CONSTRAINED_ &&
10456 ip [11] == CEE_CALLVIRT &&
10457 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10458 mono_class_is_enum (klass) &&
10459 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10460 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10461 has_flag->klass == mono_defaults.enum_class &&
10462 !strcmp (has_flag->name, "HasFlag") &&
10463 has_flag->signature->hasthis &&
10464 has_flag->signature->param_count == 1) {
10465 CHECK_TYPELOAD (enum_class);
10467 if (enum_class == klass) {
10468 MonoInst *enum_this, *enum_flag;
10473 enum_this = sp [0];
10474 enum_flag = sp [1];
10476 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10481 // FIXME: LLVM can't handle the inconsistent bb linking
10482 if (!mono_class_is_nullable (klass) &&
10483 !mini_is_gsharedvt_klass (klass) &&
10484 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10485 (ip [5] == CEE_BRTRUE ||
10486 ip [5] == CEE_BRTRUE_S ||
10487 ip [5] == CEE_BRFALSE ||
10488 ip [5] == CEE_BRFALSE_S)) {
10489 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10491 MonoBasicBlock *true_bb, *false_bb;
10495 if (cfg->verbose_level > 3) {
10496 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10497 printf ("<box+brtrue opt>\n");
10502 case CEE_BRFALSE_S:
10505 target = ip + 1 + (signed char)(*ip);
10512 target = ip + 4 + (gint)(read32 (ip));
10516 g_assert_not_reached ();
10520 * We need to link both bblocks, since it is needed for handling stack
10521 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10522 * Branching to only one of them would lead to inconsistencies, so
10523 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10525 GET_BBLOCK (cfg, true_bb, target);
10526 GET_BBLOCK (cfg, false_bb, ip);
10528 mono_link_bblock (cfg, cfg->cbb, true_bb);
10529 mono_link_bblock (cfg, cfg->cbb, false_bb);
10531 if (sp != stack_start) {
10532 handle_stack_args (cfg, stack_start, sp - stack_start);
10534 CHECK_UNVERIFIABLE (cfg);
10537 if (COMPILE_LLVM (cfg)) {
10538 dreg = alloc_ireg (cfg);
10539 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10542 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10544 /* The JIT can't eliminate the iconst+compare */
10545 MONO_INST_NEW (cfg, ins, OP_BR);
10546 ins->inst_target_bb = is_true ? true_bb : false_bb;
10547 MONO_ADD_INS (cfg->cbb, ins);
10550 start_new_bblock = 1;
10554 *sp++ = handle_box (cfg, val, klass, context_used);
10556 CHECK_CFG_EXCEPTION;
10565 token = read32 (ip + 1);
10566 klass = mini_get_class (method, token, generic_context);
10567 CHECK_TYPELOAD (klass);
10569 mono_save_token_info (cfg, image, token, klass);
10571 context_used = mini_class_check_context_used (cfg, klass);
10573 if (mono_class_is_nullable (klass)) {
10576 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10577 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10581 ins = handle_unbox (cfg, klass, sp, context_used);
10594 MonoClassField *field;
10595 #ifndef DISABLE_REMOTING
10599 gboolean is_instance;
10601 gpointer addr = NULL;
10602 gboolean is_special_static;
10604 MonoInst *store_val = NULL;
10605 MonoInst *thread_ins;
10608 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10610 if (op == CEE_STFLD) {
10613 store_val = sp [1];
10618 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10620 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10623 if (op == CEE_STSFLD) {
10626 store_val = sp [0];
10631 token = read32 (ip + 1);
10632 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10633 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10634 klass = field->parent;
10637 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10640 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10641 FIELD_ACCESS_FAILURE (method, field);
10642 mono_class_init (klass);
10644 /* if the class is Critical then transparent code cannot access it's fields */
10645 if (!is_instance && mono_security_core_clr_enabled ())
10646 ensure_method_is_allowed_to_access_field (cfg, method, field);
10648 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10649 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10650 if (mono_security_core_clr_enabled ())
10651 ensure_method_is_allowed_to_access_field (cfg, method, field);
10654 ftype = mono_field_get_type (field);
10657 * LDFLD etc. is usable on static fields as well, so convert those cases to
10660 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10672 g_assert_not_reached ();
10674 is_instance = FALSE;
10677 context_used = mini_class_check_context_used (cfg, klass);
10679 /* INSTANCE CASE */
10681 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10682 if (op == CEE_STFLD) {
10683 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10685 #ifndef DISABLE_REMOTING
10686 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10687 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10688 MonoInst *iargs [5];
10690 GSHAREDVT_FAILURE (op);
10692 iargs [0] = sp [0];
10693 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10694 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10695 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10697 iargs [4] = sp [1];
10699 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10700 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10701 iargs, ip, cfg->real_offset, TRUE);
10702 CHECK_CFG_EXCEPTION;
10703 g_assert (costs > 0);
10705 cfg->real_offset += 5;
10707 inline_costs += costs;
10709 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10714 MonoInst *store, *wbarrier_ptr_ins = NULL;
10716 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10718 if (ins_flag & MONO_INST_VOLATILE) {
10719 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10720 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10723 if (mini_is_gsharedvt_klass (klass)) {
10724 MonoInst *offset_ins;
10726 context_used = mini_class_check_context_used (cfg, klass);
10728 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10729 /* The value is offset by 1 */
10730 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10731 dreg = alloc_ireg_mp (cfg);
10732 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10733 wbarrier_ptr_ins = ins;
10734 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10735 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10737 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10739 if (sp [0]->opcode != OP_LDADDR)
10740 store->flags |= MONO_INST_FAULT;
10742 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10743 if (mini_is_gsharedvt_klass (klass)) {
10744 g_assert (wbarrier_ptr_ins);
10745 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10747 /* insert call to write barrier */
10751 dreg = alloc_ireg_mp (cfg);
10752 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10753 mini_emit_write_barrier (cfg, ptr, sp [1]);
10757 store->flags |= ins_flag;
10764 #ifndef DISABLE_REMOTING
10765 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10766 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10767 MonoInst *iargs [4];
10769 GSHAREDVT_FAILURE (op);
10771 iargs [0] = sp [0];
10772 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10773 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10774 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10775 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10776 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10777 iargs, ip, cfg->real_offset, TRUE);
10778 CHECK_CFG_EXCEPTION;
10779 g_assert (costs > 0);
10781 cfg->real_offset += 5;
10785 inline_costs += costs;
10787 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10793 if (sp [0]->type == STACK_VTYPE) {
10796 /* Have to compute the address of the variable */
10798 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10800 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10802 g_assert (var->klass == klass);
10804 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10808 if (op == CEE_LDFLDA) {
10809 if (sp [0]->type == STACK_OBJ) {
10810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10811 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10814 dreg = alloc_ireg_mp (cfg);
10816 if (mini_is_gsharedvt_klass (klass)) {
10817 MonoInst *offset_ins;
10819 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10820 /* The value is offset by 1 */
10821 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10822 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10824 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10826 ins->klass = mono_class_from_mono_type (field->type);
10827 ins->type = STACK_MP;
10832 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10834 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10835 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10844 if (mini_is_gsharedvt_klass (klass)) {
10845 MonoInst *offset_ins;
10847 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10848 /* The value is offset by 1 */
10849 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10850 dreg = alloc_ireg_mp (cfg);
10851 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10852 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10854 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10856 load->flags |= ins_flag;
10857 if (sp [0]->opcode != OP_LDADDR)
10858 load->flags |= MONO_INST_FAULT;
10870 context_used = mini_class_check_context_used (cfg, klass);
10872 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10873 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10877 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10878 * to be called here.
10880 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10881 mono_class_vtable (cfg->domain, klass);
10882 CHECK_TYPELOAD (klass);
10884 mono_domain_lock (cfg->domain);
10885 if (cfg->domain->special_static_fields)
10886 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10887 mono_domain_unlock (cfg->domain);
10889 is_special_static = mono_class_field_is_special_static (field);
10891 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10892 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10896 /* Generate IR to compute the field address */
10897 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10899 * Fast access to TLS data
10900 * Inline version of get_thread_static_data () in
10904 int idx, static_data_reg, array_reg, dreg;
10906 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10907 GSHAREDVT_FAILURE (op);
10909 static_data_reg = alloc_ireg (cfg);
10910 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10912 if (cfg->compile_aot) {
10913 int offset_reg, offset2_reg, idx_reg;
10915 /* For TLS variables, this will return the TLS offset */
10916 EMIT_NEW_SFLDACONST (cfg, ins, field);
10917 offset_reg = ins->dreg;
10918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10919 idx_reg = alloc_ireg (cfg);
10920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10922 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10923 array_reg = alloc_ireg (cfg);
10924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10925 offset2_reg = alloc_ireg (cfg);
10926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10928 dreg = alloc_ireg (cfg);
10929 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10931 offset = (gsize)addr & 0x7fffffff;
10932 idx = offset & 0x3f;
10934 array_reg = alloc_ireg (cfg);
10935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10936 dreg = alloc_ireg (cfg);
10937 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10939 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10940 (cfg->compile_aot && is_special_static) ||
10941 (context_used && is_special_static)) {
10942 MonoInst *iargs [2];
10944 g_assert (field->parent);
10945 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10946 if (context_used) {
10947 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10948 field, MONO_RGCTX_INFO_CLASS_FIELD);
10950 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10952 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10953 } else if (context_used) {
10954 MonoInst *static_data;
10957 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10958 method->klass->name_space, method->klass->name, method->name,
10959 depth, field->offset);
10962 if (mono_class_needs_cctor_run (klass, method))
10963 emit_class_init (cfg, klass);
10966 * The pointer we're computing here is
10968 * super_info.static_data + field->offset
10970 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10971 klass, MONO_RGCTX_INFO_STATIC_DATA);
10973 if (mini_is_gsharedvt_klass (klass)) {
10974 MonoInst *offset_ins;
10976 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10977 /* The value is offset by 1 */
10978 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10979 dreg = alloc_ireg_mp (cfg);
10980 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10981 } else if (field->offset == 0) {
10984 int addr_reg = mono_alloc_preg (cfg);
10985 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10987 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10988 MonoInst *iargs [2];
10990 g_assert (field->parent);
10991 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10992 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10993 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10995 MonoVTable *vtable = NULL;
10997 if (!cfg->compile_aot)
10998 vtable = mono_class_vtable (cfg->domain, klass);
10999 CHECK_TYPELOAD (klass);
11002 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11003 if (!(g_slist_find (class_inits, klass))) {
11004 emit_class_init (cfg, klass);
11005 if (cfg->verbose_level > 2)
11006 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11007 class_inits = g_slist_prepend (class_inits, klass);
11010 if (cfg->run_cctors) {
11011 /* This makes so that inline cannot trigger */
11012 /* .cctors: too many apps depend on them */
11013 /* running with a specific order... */
11015 if (! vtable->initialized)
11016 INLINE_FAILURE ("class init");
11017 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11018 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11019 goto exception_exit;
11023 if (cfg->compile_aot)
11024 EMIT_NEW_SFLDACONST (cfg, ins, field);
11027 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11029 EMIT_NEW_PCONST (cfg, ins, addr);
11032 MonoInst *iargs [1];
11033 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11034 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11038 /* Generate IR to do the actual load/store operation */
11040 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11041 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11042 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11045 if (op == CEE_LDSFLDA) {
11046 ins->klass = mono_class_from_mono_type (ftype);
11047 ins->type = STACK_PTR;
11049 } else if (op == CEE_STSFLD) {
11052 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11053 store->flags |= ins_flag;
11055 gboolean is_const = FALSE;
11056 MonoVTable *vtable = NULL;
11057 gpointer addr = NULL;
11059 if (!context_used) {
11060 vtable = mono_class_vtable (cfg->domain, klass);
11061 CHECK_TYPELOAD (klass);
11063 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11064 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11065 int ro_type = ftype->type;
11067 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11068 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11069 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11072 GSHAREDVT_FAILURE (op);
11074 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11077 case MONO_TYPE_BOOLEAN:
11079 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11083 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11086 case MONO_TYPE_CHAR:
11088 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11092 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11097 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11101 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11106 case MONO_TYPE_PTR:
11107 case MONO_TYPE_FNPTR:
11108 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11109 type_to_eval_stack_type ((cfg), field->type, *sp);
11112 case MONO_TYPE_STRING:
11113 case MONO_TYPE_OBJECT:
11114 case MONO_TYPE_CLASS:
11115 case MONO_TYPE_SZARRAY:
11116 case MONO_TYPE_ARRAY:
11117 if (!mono_gc_is_moving ()) {
11118 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11119 type_to_eval_stack_type ((cfg), field->type, *sp);
11127 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11132 case MONO_TYPE_VALUETYPE:
11142 CHECK_STACK_OVF (1);
11144 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11145 load->flags |= ins_flag;
11151 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11152 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11153 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11164 token = read32 (ip + 1);
11165 klass = mini_get_class (method, token, generic_context);
11166 CHECK_TYPELOAD (klass);
11167 if (ins_flag & MONO_INST_VOLATILE) {
11168 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11169 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11171 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11172 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11173 ins->flags |= ins_flag;
11174 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11175 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11176 /* insert call to write barrier */
11177 mini_emit_write_barrier (cfg, sp [0], sp [1]);
11189 const char *data_ptr;
11191 guint32 field_token;
11197 token = read32 (ip + 1);
11199 klass = mini_get_class (method, token, generic_context);
11200 CHECK_TYPELOAD (klass);
11201 if (klass->byval_arg.type == MONO_TYPE_VOID)
11204 context_used = mini_class_check_context_used (cfg, klass);
11206 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11207 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11208 ins->sreg1 = sp [0]->dreg;
11209 ins->type = STACK_I4;
11210 ins->dreg = alloc_ireg (cfg);
11211 MONO_ADD_INS (cfg->cbb, ins);
11212 *sp = mono_decompose_opcode (cfg, ins);
11215 if (context_used) {
11216 MonoInst *args [3];
11217 MonoClass *array_class = mono_array_class_get (klass, 1);
11218 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11220 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11223 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11224 array_class, MONO_RGCTX_INFO_VTABLE);
11229 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11231 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11233 if (cfg->opt & MONO_OPT_SHARED) {
11234 /* Decompose now to avoid problems with references to the domainvar */
11235 MonoInst *iargs [3];
11237 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11238 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11239 iargs [2] = sp [0];
11241 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11243 /* Decompose later since it is needed by abcrem */
11244 MonoClass *array_type = mono_array_class_get (klass, 1);
11245 mono_class_vtable (cfg->domain, array_type);
11246 CHECK_TYPELOAD (array_type);
11248 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11249 ins->dreg = alloc_ireg_ref (cfg);
11250 ins->sreg1 = sp [0]->dreg;
11251 ins->inst_newa_class = klass;
11252 ins->type = STACK_OBJ;
11253 ins->klass = array_type;
11254 MONO_ADD_INS (cfg->cbb, ins);
11255 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11256 cfg->cbb->has_array_access = TRUE;
11258 /* Needed so mono_emit_load_get_addr () gets called */
11259 mono_get_got_var (cfg);
11269 * we inline/optimize the initialization sequence if possible.
11270 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11271 * for small sizes open code the memcpy
11272 * ensure the rva field is big enough
11274 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11275 MonoMethod *memcpy_method = mini_get_memcpy_method ();
11276 MonoInst *iargs [3];
11277 int add_reg = alloc_ireg_mp (cfg);
11279 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11280 if (cfg->compile_aot) {
11281 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11283 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11285 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11286 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11295 if (sp [0]->type != STACK_OBJ)
11298 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11299 ins->dreg = alloc_preg (cfg);
11300 ins->sreg1 = sp [0]->dreg;
11301 ins->type = STACK_I4;
11302 /* This flag will be inherited by the decomposition */
11303 ins->flags |= MONO_INST_FAULT;
11304 MONO_ADD_INS (cfg->cbb, ins);
11305 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11306 cfg->cbb->has_array_access = TRUE;
11314 if (sp [0]->type != STACK_OBJ)
11317 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11319 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11320 CHECK_TYPELOAD (klass);
11321 /* we need to make sure that this array is exactly the type it needs
11322 * to be for correctness. the wrappers are lax with their usage
11323 * so we need to ignore them here
11325 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11326 MonoClass *array_class = mono_array_class_get (klass, 1);
11327 mini_emit_check_array_type (cfg, sp [0], array_class);
11328 CHECK_TYPELOAD (array_class);
11332 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11337 case CEE_LDELEM_I1:
11338 case CEE_LDELEM_U1:
11339 case CEE_LDELEM_I2:
11340 case CEE_LDELEM_U2:
11341 case CEE_LDELEM_I4:
11342 case CEE_LDELEM_U4:
11343 case CEE_LDELEM_I8:
11345 case CEE_LDELEM_R4:
11346 case CEE_LDELEM_R8:
11347 case CEE_LDELEM_REF: {
11353 if (*ip == CEE_LDELEM) {
11355 token = read32 (ip + 1);
11356 klass = mini_get_class (method, token, generic_context);
11357 CHECK_TYPELOAD (klass);
11358 mono_class_init (klass);
11361 klass = array_access_to_klass (*ip);
11363 if (sp [0]->type != STACK_OBJ)
11366 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11368 if (mini_is_gsharedvt_variable_klass (klass)) {
11369 // FIXME-VT: OP_ICONST optimization
11370 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11371 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11372 ins->opcode = OP_LOADV_MEMBASE;
11373 } else if (sp [1]->opcode == OP_ICONST) {
11374 int array_reg = sp [0]->dreg;
11375 int index_reg = sp [1]->dreg;
11376 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11378 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11379 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11381 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11382 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11384 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11385 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11388 if (*ip == CEE_LDELEM)
11395 case CEE_STELEM_I1:
11396 case CEE_STELEM_I2:
11397 case CEE_STELEM_I4:
11398 case CEE_STELEM_I8:
11399 case CEE_STELEM_R4:
11400 case CEE_STELEM_R8:
11401 case CEE_STELEM_REF:
11406 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11408 if (*ip == CEE_STELEM) {
11410 token = read32 (ip + 1);
11411 klass = mini_get_class (method, token, generic_context);
11412 CHECK_TYPELOAD (klass);
11413 mono_class_init (klass);
11416 klass = array_access_to_klass (*ip);
11418 if (sp [0]->type != STACK_OBJ)
11421 emit_array_store (cfg, klass, sp, TRUE);
11423 if (*ip == CEE_STELEM)
11430 case CEE_CKFINITE: {
11434 if (cfg->llvm_only) {
11435 MonoInst *iargs [1];
11437 iargs [0] = sp [0];
11438 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11440 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11441 ins->sreg1 = sp [0]->dreg;
11442 ins->dreg = alloc_freg (cfg);
11443 ins->type = STACK_R8;
11444 MONO_ADD_INS (cfg->cbb, ins);
11446 *sp++ = mono_decompose_opcode (cfg, ins);
11452 case CEE_REFANYVAL: {
11453 MonoInst *src_var, *src;
11455 int klass_reg = alloc_preg (cfg);
11456 int dreg = alloc_preg (cfg);
11458 GSHAREDVT_FAILURE (*ip);
11461 MONO_INST_NEW (cfg, ins, *ip);
11464 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11465 CHECK_TYPELOAD (klass);
11467 context_used = mini_class_check_context_used (cfg, klass);
11470 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11472 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11473 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11476 if (context_used) {
11477 MonoInst *klass_ins;
11479 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11480 klass, MONO_RGCTX_INFO_KLASS);
11483 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11484 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11486 mini_emit_class_check (cfg, klass_reg, klass);
11488 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11489 ins->type = STACK_MP;
11490 ins->klass = klass;
11495 case CEE_MKREFANY: {
11496 MonoInst *loc, *addr;
11498 GSHAREDVT_FAILURE (*ip);
11501 MONO_INST_NEW (cfg, ins, *ip);
11504 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11505 CHECK_TYPELOAD (klass);
11507 context_used = mini_class_check_context_used (cfg, klass);
11509 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11510 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11512 if (context_used) {
11513 MonoInst *const_ins;
11514 int type_reg = alloc_preg (cfg);
11516 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11521 int const_reg = alloc_preg (cfg);
11522 int type_reg = alloc_preg (cfg);
11524 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11527 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11531 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11532 ins->type = STACK_VTYPE;
11533 ins->klass = mono_defaults.typed_reference_class;
11538 case CEE_LDTOKEN: {
11540 MonoClass *handle_class;
11542 CHECK_STACK_OVF (1);
11545 n = read32 (ip + 1);
11547 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11548 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11549 handle = mono_method_get_wrapper_data (method, n);
11550 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11551 if (handle_class == mono_defaults.typehandle_class)
11552 handle = &((MonoClass*)handle)->byval_arg;
11555 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11560 mono_class_init (handle_class);
11561 if (cfg->gshared) {
11562 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11563 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11564 /* This case handles ldtoken
11565 of an open type, like for
11568 } else if (handle_class == mono_defaults.typehandle_class) {
11569 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11570 } else if (handle_class == mono_defaults.fieldhandle_class)
11571 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11572 else if (handle_class == mono_defaults.methodhandle_class)
11573 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11575 g_assert_not_reached ();
11578 if ((cfg->opt & MONO_OPT_SHARED) &&
11579 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11580 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11581 MonoInst *addr, *vtvar, *iargs [3];
11582 int method_context_used;
11584 method_context_used = mini_method_check_context_used (cfg, method);
11586 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11588 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11589 EMIT_NEW_ICONST (cfg, iargs [1], n);
11590 if (method_context_used) {
11591 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11592 method, MONO_RGCTX_INFO_METHOD);
11593 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11595 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11596 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11598 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11600 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11602 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11604 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11605 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11606 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11607 (cmethod->klass == mono_defaults.systemtype_class) &&
11608 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11609 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11611 mono_class_init (tclass);
11612 if (context_used) {
11613 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11614 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11615 } else if (cfg->compile_aot) {
11616 if (method->wrapper_type) {
11617 error_init (&error); //got to do it since there are multiple conditionals below
11618 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11619 /* Special case for static synchronized wrappers */
11620 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11622 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11623 /* FIXME: n is not a normal token */
11625 EMIT_NEW_PCONST (cfg, ins, NULL);
11628 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11631 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11633 EMIT_NEW_PCONST (cfg, ins, rt);
11635 ins->type = STACK_OBJ;
11636 ins->klass = cmethod->klass;
11639 MonoInst *addr, *vtvar;
11641 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11643 if (context_used) {
11644 if (handle_class == mono_defaults.typehandle_class) {
11645 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11646 mono_class_from_mono_type ((MonoType *)handle),
11647 MONO_RGCTX_INFO_TYPE);
11648 } else if (handle_class == mono_defaults.methodhandle_class) {
11649 ins = emit_get_rgctx_method (cfg, context_used,
11650 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11651 } else if (handle_class == mono_defaults.fieldhandle_class) {
11652 ins = emit_get_rgctx_field (cfg, context_used,
11653 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11655 g_assert_not_reached ();
11657 } else if (cfg->compile_aot) {
11658 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11660 EMIT_NEW_PCONST (cfg, ins, handle);
11662 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11664 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11674 if (sp [-1]->type != STACK_OBJ)
11677 MONO_INST_NEW (cfg, ins, OP_THROW);
11679 ins->sreg1 = sp [0]->dreg;
11681 cfg->cbb->out_of_line = TRUE;
11682 MONO_ADD_INS (cfg->cbb, ins);
11683 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11684 MONO_ADD_INS (cfg->cbb, ins);
11687 link_bblock (cfg, cfg->cbb, end_bblock);
11688 start_new_bblock = 1;
11689 /* This can complicate code generation for llvm since the return value might not be defined */
11690 if (COMPILE_LLVM (cfg))
11691 INLINE_FAILURE ("throw");
11693 case CEE_ENDFINALLY:
11694 if (!ip_in_finally_clause (cfg, ip - header->code))
11696 /* mono_save_seq_point_info () depends on this */
11697 if (sp != stack_start)
11698 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11699 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11700 MONO_ADD_INS (cfg->cbb, ins);
11702 start_new_bblock = 1;
11705 * Control will leave the method so empty the stack, otherwise
11706 * the next basic block will start with a nonempty stack.
11708 while (sp != stack_start) {
11713 case CEE_LEAVE_S: {
11716 if (*ip == CEE_LEAVE) {
11718 target = ip + 5 + (gint32)read32(ip + 1);
11721 target = ip + 2 + (signed char)(ip [1]);
11724 /* empty the stack */
11725 while (sp != stack_start) {
11730 * If this leave statement is in a catch block, check for a
11731 * pending exception, and rethrow it if necessary.
11732 * We avoid doing this in runtime invoke wrappers, since those are called
11733 * by native code which excepts the wrapper to catch all exceptions.
11735 for (i = 0; i < header->num_clauses; ++i) {
11736 MonoExceptionClause *clause = &header->clauses [i];
11739 * Use <= in the final comparison to handle clauses with multiple
11740 * leave statements, like in bug #78024.
11741 * The ordering of the exception clauses guarantees that we find the
11742 * innermost clause.
11744 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11746 MonoBasicBlock *dont_throw;
11751 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11754 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11756 NEW_BBLOCK (cfg, dont_throw);
11759 * Currently, we always rethrow the abort exception, despite the
11760 * fact that this is not correct. See thread6.cs for an example.
11761 * But propagating the abort exception is more important than
11762 * getting the sematics right.
11764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11766 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11768 MONO_START_BB (cfg, dont_throw);
11773 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11776 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11778 MonoExceptionClause *clause;
11780 for (tmp = handlers; tmp; tmp = tmp->next) {
11781 clause = (MonoExceptionClause *)tmp->data;
11782 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11784 link_bblock (cfg, cfg->cbb, tblock);
11785 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11786 ins->inst_target_bb = tblock;
11787 ins->inst_eh_block = clause;
11788 MONO_ADD_INS (cfg->cbb, ins);
11789 cfg->cbb->has_call_handler = 1;
11790 if (COMPILE_LLVM (cfg)) {
11791 MonoBasicBlock *target_bb;
11794 * Link the finally bblock with the target, since it will
11795 * conceptually branch there.
11797 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11798 GET_BBLOCK (cfg, target_bb, target);
11799 link_bblock (cfg, tblock, target_bb);
11802 g_list_free (handlers);
11805 MONO_INST_NEW (cfg, ins, OP_BR);
11806 MONO_ADD_INS (cfg->cbb, ins);
11807 GET_BBLOCK (cfg, tblock, target);
11808 link_bblock (cfg, cfg->cbb, tblock);
11809 ins->inst_target_bb = tblock;
11811 start_new_bblock = 1;
11813 if (*ip == CEE_LEAVE)
11822 * Mono specific opcodes
11824 case MONO_CUSTOM_PREFIX: {
11826 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11830 case CEE_MONO_ICALL: {
11832 MonoJitICallInfo *info;
11834 token = read32 (ip + 2);
11835 func = mono_method_get_wrapper_data (method, token);
11836 info = mono_find_jit_icall_by_addr (func);
11838 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11841 CHECK_STACK (info->sig->param_count);
11842 sp -= info->sig->param_count;
11844 ins = mono_emit_jit_icall (cfg, info->func, sp);
11845 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11849 inline_costs += 10 * num_calls++;
11853 case CEE_MONO_LDPTR_CARD_TABLE:
11854 case CEE_MONO_LDPTR_NURSERY_START:
11855 case CEE_MONO_LDPTR_NURSERY_BITS:
11856 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11857 CHECK_STACK_OVF (1);
11860 case CEE_MONO_LDPTR_CARD_TABLE:
11861 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11863 case CEE_MONO_LDPTR_NURSERY_START:
11864 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11866 case CEE_MONO_LDPTR_NURSERY_BITS:
11867 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11869 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11870 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11876 inline_costs += 10 * num_calls++;
11879 case CEE_MONO_LDPTR: {
11882 CHECK_STACK_OVF (1);
11884 token = read32 (ip + 2);
11886 ptr = mono_method_get_wrapper_data (method, token);
11887 EMIT_NEW_PCONST (cfg, ins, ptr);
11890 inline_costs += 10 * num_calls++;
11891 /* Can't embed random pointers into AOT code */
11895 case CEE_MONO_JIT_ICALL_ADDR: {
11896 MonoJitICallInfo *callinfo;
11899 CHECK_STACK_OVF (1);
11901 token = read32 (ip + 2);
11903 ptr = mono_method_get_wrapper_data (method, token);
11904 callinfo = mono_find_jit_icall_by_addr (ptr);
11905 g_assert (callinfo);
11906 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11909 inline_costs += 10 * num_calls++;
11912 case CEE_MONO_ICALL_ADDR: {
11913 MonoMethod *cmethod;
11916 CHECK_STACK_OVF (1);
11918 token = read32 (ip + 2);
11920 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11922 if (cfg->compile_aot) {
11923 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11925 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11926 * before the call, its not needed when using direct pinvoke.
11927 * This is not an optimization, but its used to avoid looking up pinvokes
11928 * on platforms which don't support dlopen ().
11930 EMIT_NEW_PCONST (cfg, ins, NULL);
11932 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11935 ptr = mono_lookup_internal_call (cmethod);
11937 EMIT_NEW_PCONST (cfg, ins, ptr);
11943 case CEE_MONO_VTADDR: {
11944 MonoInst *src_var, *src;
11950 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11951 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11956 case CEE_MONO_NEWOBJ: {
11957 MonoInst *iargs [2];
11959 CHECK_STACK_OVF (1);
11961 token = read32 (ip + 2);
11962 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11963 mono_class_init (klass);
11964 NEW_DOMAINCONST (cfg, iargs [0]);
11965 MONO_ADD_INS (cfg->cbb, iargs [0]);
11966 NEW_CLASSCONST (cfg, iargs [1], klass);
11967 MONO_ADD_INS (cfg->cbb, iargs [1]);
11968 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11970 inline_costs += 10 * num_calls++;
11973 case CEE_MONO_OBJADDR:
11976 MONO_INST_NEW (cfg, ins, OP_MOVE);
11977 ins->dreg = alloc_ireg_mp (cfg);
11978 ins->sreg1 = sp [0]->dreg;
11979 ins->type = STACK_MP;
11980 MONO_ADD_INS (cfg->cbb, ins);
11984 case CEE_MONO_LDNATIVEOBJ:
11986 * Similar to LDOBJ, but instead load the unmanaged
11987 * representation of the vtype to the stack.
11992 token = read32 (ip + 2);
11993 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11994 g_assert (klass->valuetype);
11995 mono_class_init (klass);
11998 MonoInst *src, *dest, *temp;
12001 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12002 temp->backend.is_pinvoke = 1;
12003 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12004 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12006 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12007 dest->type = STACK_VTYPE;
12008 dest->klass = klass;
12014 case CEE_MONO_RETOBJ: {
12016 * Same as RET, but return the native representation of a vtype
12019 g_assert (cfg->ret);
12020 g_assert (mono_method_signature (method)->pinvoke);
12025 token = read32 (ip + 2);
12026 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12028 if (!cfg->vret_addr) {
12029 g_assert (cfg->ret_var_is_local);
12031 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12033 EMIT_NEW_RETLOADA (cfg, ins);
12035 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12037 if (sp != stack_start)
12040 MONO_INST_NEW (cfg, ins, OP_BR);
12041 ins->inst_target_bb = end_bblock;
12042 MONO_ADD_INS (cfg->cbb, ins);
12043 link_bblock (cfg, cfg->cbb, end_bblock);
12044 start_new_bblock = 1;
12048 case CEE_MONO_SAVE_LMF:
12049 case CEE_MONO_RESTORE_LMF:
12052 case CEE_MONO_CLASSCONST:
12053 CHECK_STACK_OVF (1);
12055 token = read32 (ip + 2);
12056 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12059 inline_costs += 10 * num_calls++;
12061 case CEE_MONO_NOT_TAKEN:
12062 cfg->cbb->out_of_line = TRUE;
12065 case CEE_MONO_TLS: {
12068 CHECK_STACK_OVF (1);
12070 key = (MonoTlsKey)read32 (ip + 2);
12071 g_assert (key < TLS_KEY_NUM);
12073 ins = mono_create_tls_get (cfg, key);
12075 ins->type = STACK_PTR;
12080 case CEE_MONO_DYN_CALL: {
12081 MonoCallInst *call;
12083 /* It would be easier to call a trampoline, but that would put an
12084 * extra frame on the stack, confusing exception handling. So
12085 * implement it inline using an opcode for now.
12088 if (!cfg->dyn_call_var) {
12089 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12090 /* prevent it from being register allocated */
12091 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12094 /* Has to use a call inst since it local regalloc expects it */
12095 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12096 ins = (MonoInst*)call;
12098 ins->sreg1 = sp [0]->dreg;
12099 ins->sreg2 = sp [1]->dreg;
12100 MONO_ADD_INS (cfg->cbb, ins);
12102 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12105 inline_costs += 10 * num_calls++;
12109 case CEE_MONO_MEMORY_BARRIER: {
12111 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
12115 case CEE_MONO_ATOMIC_STORE_I4: {
12116 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12122 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12123 ins->dreg = sp [0]->dreg;
12124 ins->sreg1 = sp [1]->dreg;
12125 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12126 MONO_ADD_INS (cfg->cbb, ins);
12131 case CEE_MONO_JIT_ATTACH: {
12132 MonoInst *args [16], *domain_ins;
12133 MonoInst *ad_ins, *jit_tls_ins;
12134 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12136 g_assert (!mono_threads_is_coop_enabled ());
12138 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12140 EMIT_NEW_PCONST (cfg, ins, NULL);
12141 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12143 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12144 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12146 if (ad_ins && jit_tls_ins) {
12147 NEW_BBLOCK (cfg, next_bb);
12148 NEW_BBLOCK (cfg, call_bb);
12150 if (cfg->compile_aot) {
12151 /* AOT code is only used in the root domain */
12152 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12154 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12156 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12163 MONO_START_BB (cfg, call_bb);
12166 /* AOT code is only used in the root domain */
12167 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12168 if (cfg->compile_aot) {
12172 * This is called on unattached threads, so it cannot go through the trampoline
12173 * infrastructure. Use an indirect call through a got slot initialized at load time
12176 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12177 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12179 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12181 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12184 MONO_START_BB (cfg, next_bb);
12189 case CEE_MONO_JIT_DETACH: {
12190 MonoInst *args [16];
12192 /* Restore the original domain */
12193 dreg = alloc_ireg (cfg);
12194 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12195 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12199 case CEE_MONO_CALLI_EXTRA_ARG: {
12201 MonoMethodSignature *fsig;
12205 * This is the same as CEE_CALLI, but passes an additional argument
12206 * to the called method in llvmonly mode.
12207 * This is only used by delegate invoke wrappers to call the
12208 * actual delegate method.
12210 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12213 token = read32 (ip + 2);
12221 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12224 if (cfg->llvm_only)
12225 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12227 n = fsig->param_count + fsig->hasthis + 1;
12234 if (cfg->llvm_only) {
12236 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12237 * cconv. This is set by mono_init_delegate ().
12239 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12240 MonoInst *callee = addr;
12241 MonoInst *call, *localloc_ins;
12242 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12243 int low_bit_reg = alloc_preg (cfg);
12245 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12246 NEW_BBLOCK (cfg, end_bb);
12248 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12252 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12253 addr = emit_get_rgctx_sig (cfg, context_used,
12254 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12256 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12258 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12259 ins->dreg = alloc_preg (cfg);
12260 ins->inst_imm = 2 * SIZEOF_VOID_P;
12261 MONO_ADD_INS (cfg->cbb, ins);
12262 localloc_ins = ins;
12263 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12267 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12268 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12270 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12271 MONO_START_BB (cfg, is_gsharedvt_bb);
12272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12273 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12274 ins->dreg = call->dreg;
12276 MONO_START_BB (cfg, end_bb);
12278 /* Caller uses a normal calling conv */
12280 MonoInst *callee = addr;
12281 MonoInst *call, *localloc_ins;
12282 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12283 int low_bit_reg = alloc_preg (cfg);
12285 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12286 NEW_BBLOCK (cfg, end_bb);
12288 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12290 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12292 /* Normal case: callee uses a normal cconv, no conversion is needed */
12293 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12295 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12296 MONO_START_BB (cfg, is_gsharedvt_bb);
12297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12298 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12299 MONO_ADD_INS (cfg->cbb, addr);
12301 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12303 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12304 ins->dreg = alloc_preg (cfg);
12305 ins->inst_imm = 2 * SIZEOF_VOID_P;
12306 MONO_ADD_INS (cfg->cbb, ins);
12307 localloc_ins = ins;
12308 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12310 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12312 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12313 ins->dreg = call->dreg;
12314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12316 MONO_START_BB (cfg, end_bb);
12319 /* Same as CEE_CALLI */
12320 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12322 * We pass the address to the gsharedvt trampoline in the rgctx reg
12324 MonoInst *callee = addr;
12326 addr = emit_get_rgctx_sig (cfg, context_used,
12327 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12328 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12330 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12334 if (!MONO_TYPE_IS_VOID (fsig->ret))
12335 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12337 CHECK_CFG_EXCEPTION;
12341 constrained_class = NULL;
12344 case CEE_MONO_LDDOMAIN:
12345 CHECK_STACK_OVF (1);
12346 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12350 case CEE_MONO_GET_LAST_ERROR:
12352 CHECK_STACK_OVF (1);
12354 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12355 ins->dreg = alloc_dreg (cfg, STACK_I4);
12356 ins->type = STACK_I4;
12357 MONO_ADD_INS (cfg->cbb, ins);
12362 case CEE_MONO_GET_RGCTX_ARG:
12364 CHECK_STACK_OVF (1);
12366 mono_create_rgctx_var (cfg);
12368 MONO_INST_NEW (cfg, ins, OP_MOVE);
12369 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12370 ins->sreg1 = cfg->rgctx_var->dreg;
12371 ins->type = STACK_PTR;
12372 MONO_ADD_INS (cfg->cbb, ins);
12378 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12384 case CEE_PREFIX1: {
12387 case CEE_ARGLIST: {
12388 /* somewhat similar to LDTOKEN */
12389 MonoInst *addr, *vtvar;
12390 CHECK_STACK_OVF (1);
12391 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12393 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12394 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12396 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12397 ins->type = STACK_VTYPE;
12398 ins->klass = mono_defaults.argumenthandle_class;
12408 MonoInst *cmp, *arg1, *arg2;
12416 * The following transforms:
12417 * CEE_CEQ into OP_CEQ
12418 * CEE_CGT into OP_CGT
12419 * CEE_CGT_UN into OP_CGT_UN
12420 * CEE_CLT into OP_CLT
12421 * CEE_CLT_UN into OP_CLT_UN
12423 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12425 MONO_INST_NEW (cfg, ins, cmp->opcode);
12426 cmp->sreg1 = arg1->dreg;
12427 cmp->sreg2 = arg2->dreg;
12428 type_from_op (cfg, cmp, arg1, arg2);
12430 add_widen_op (cfg, cmp, &arg1, &arg2);
12431 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12432 cmp->opcode = OP_LCOMPARE;
12433 else if (arg1->type == STACK_R4)
12434 cmp->opcode = OP_RCOMPARE;
12435 else if (arg1->type == STACK_R8)
12436 cmp->opcode = OP_FCOMPARE;
12438 cmp->opcode = OP_ICOMPARE;
12439 MONO_ADD_INS (cfg->cbb, cmp);
12440 ins->type = STACK_I4;
12441 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12442 type_from_op (cfg, ins, arg1, arg2);
12444 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12446 * The backends expect the fceq opcodes to do the
12449 ins->sreg1 = cmp->sreg1;
12450 ins->sreg2 = cmp->sreg2;
12453 MONO_ADD_INS (cfg->cbb, ins);
12459 MonoInst *argconst;
12460 MonoMethod *cil_method;
12462 CHECK_STACK_OVF (1);
12464 n = read32 (ip + 2);
12465 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12468 mono_class_init (cmethod->klass);
12470 mono_save_token_info (cfg, image, n, cmethod);
12472 context_used = mini_method_check_context_used (cfg, cmethod);
12474 cil_method = cmethod;
12475 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12476 emit_method_access_failure (cfg, method, cil_method);
12478 if (mono_security_core_clr_enabled ())
12479 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12482 * Optimize the common case of ldftn+delegate creation
12484 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12485 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12486 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12487 MonoInst *target_ins, *handle_ins;
12488 MonoMethod *invoke;
12489 int invoke_context_used;
12491 invoke = mono_get_delegate_invoke (ctor_method->klass);
12492 if (!invoke || !mono_method_signature (invoke))
12495 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12497 target_ins = sp [-1];
12499 if (mono_security_core_clr_enabled ())
12500 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12502 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12503 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12504 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12506 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12510 /* FIXME: SGEN support */
12511 if (invoke_context_used == 0 || cfg->llvm_only) {
12513 if (cfg->verbose_level > 3)
12514 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12515 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12518 CHECK_CFG_EXCEPTION;
12528 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12529 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12533 inline_costs += 10 * num_calls++;
12536 case CEE_LDVIRTFTN: {
12537 MonoInst *args [2];
12541 n = read32 (ip + 2);
12542 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12545 mono_class_init (cmethod->klass);
12547 context_used = mini_method_check_context_used (cfg, cmethod);
12549 if (mono_security_core_clr_enabled ())
12550 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12553 * Optimize the common case of ldvirtftn+delegate creation
12555 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12556 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12557 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12558 MonoInst *target_ins, *handle_ins;
12559 MonoMethod *invoke;
12560 int invoke_context_used;
12561 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12563 invoke = mono_get_delegate_invoke (ctor_method->klass);
12564 if (!invoke || !mono_method_signature (invoke))
12567 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12569 target_ins = sp [-1];
12571 if (mono_security_core_clr_enabled ())
12572 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12574 /* FIXME: SGEN support */
12575 if (invoke_context_used == 0 || cfg->llvm_only) {
12577 if (cfg->verbose_level > 3)
12578 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12579 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12582 CHECK_CFG_EXCEPTION;
12595 args [1] = emit_get_rgctx_method (cfg, context_used,
12596 cmethod, MONO_RGCTX_INFO_METHOD);
12599 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12601 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12604 inline_costs += 10 * num_calls++;
12608 CHECK_STACK_OVF (1);
12610 n = read16 (ip + 2);
12612 EMIT_NEW_ARGLOAD (cfg, ins, n);
12617 CHECK_STACK_OVF (1);
12619 n = read16 (ip + 2);
12621 NEW_ARGLOADA (cfg, ins, n);
12622 MONO_ADD_INS (cfg->cbb, ins);
12630 n = read16 (ip + 2);
12632 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12634 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12638 CHECK_STACK_OVF (1);
12640 n = read16 (ip + 2);
12642 EMIT_NEW_LOCLOAD (cfg, ins, n);
12647 unsigned char *tmp_ip;
12648 CHECK_STACK_OVF (1);
12650 n = read16 (ip + 2);
12653 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12659 EMIT_NEW_LOCLOADA (cfg, ins, n);
12668 n = read16 (ip + 2);
12670 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12672 emit_stloc_ir (cfg, sp, header, n);
12676 case CEE_LOCALLOC: {
12678 MonoBasicBlock *non_zero_bb, *end_bb;
12679 int alloc_ptr = alloc_preg (cfg);
12681 if (sp != stack_start)
12683 if (cfg->method != method)
12685 * Inlining this into a loop in a parent could lead to
12686 * stack overflows which is different behavior than the
12687 * non-inlined case, thus disable inlining in this case.
12689 INLINE_FAILURE("localloc");
12691 NEW_BBLOCK (cfg, non_zero_bb);
12692 NEW_BBLOCK (cfg, end_bb);
12694 /* if size != zero */
12695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12698 //size is zero, so result is NULL
12699 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12702 MONO_START_BB (cfg, non_zero_bb);
12703 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12704 ins->dreg = alloc_ptr;
12705 ins->sreg1 = sp [0]->dreg;
12706 ins->type = STACK_PTR;
12707 MONO_ADD_INS (cfg->cbb, ins);
12709 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12711 ins->flags |= MONO_INST_INIT;
12713 MONO_START_BB (cfg, end_bb);
12714 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12715 ins->type = STACK_PTR;
12721 case CEE_ENDFILTER: {
12722 MonoExceptionClause *clause, *nearest;
12727 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12729 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12730 ins->sreg1 = (*sp)->dreg;
12731 MONO_ADD_INS (cfg->cbb, ins);
12732 start_new_bblock = 1;
12736 for (cc = 0; cc < header->num_clauses; ++cc) {
12737 clause = &header->clauses [cc];
12738 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12739 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12740 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12743 g_assert (nearest);
12744 if ((ip - header->code) != nearest->handler_offset)
12749 case CEE_UNALIGNED_:
12750 ins_flag |= MONO_INST_UNALIGNED;
12751 /* FIXME: record alignment? we can assume 1 for now */
12755 case CEE_VOLATILE_:
12756 ins_flag |= MONO_INST_VOLATILE;
12760 ins_flag |= MONO_INST_TAILCALL;
12761 cfg->flags |= MONO_CFG_HAS_TAIL;
12762 /* Can't inline tail calls at this time */
12763 inline_costs += 100000;
12770 token = read32 (ip + 2);
12771 klass = mini_get_class (method, token, generic_context);
12772 CHECK_TYPELOAD (klass);
12773 if (generic_class_is_reference_type (cfg, klass))
12774 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12776 mini_emit_initobj (cfg, *sp, NULL, klass);
12780 case CEE_CONSTRAINED_:
12782 token = read32 (ip + 2);
12783 constrained_class = mini_get_class (method, token, generic_context);
12784 CHECK_TYPELOAD (constrained_class);
12788 case CEE_INITBLK: {
12789 MonoInst *iargs [3];
12793 /* Skip optimized paths for volatile operations. */
12794 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12795 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12796 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12797 /* emit_memset only works when val == 0 */
12798 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12801 iargs [0] = sp [0];
12802 iargs [1] = sp [1];
12803 iargs [2] = sp [2];
12804 if (ip [1] == CEE_CPBLK) {
12806 * FIXME: It's unclear whether we should be emitting both the acquire
12807 * and release barriers for cpblk. It is technically both a load and
12808 * store operation, so it seems like that's the sensible thing to do.
12810 * FIXME: We emit full barriers on both sides of the operation for
12811 * simplicity. We should have a separate atomic memcpy method instead.
12813 MonoMethod *memcpy_method = mini_get_memcpy_method ();
12815 if (ins_flag & MONO_INST_VOLATILE)
12816 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12818 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12819 call->flags |= ins_flag;
12821 if (ins_flag & MONO_INST_VOLATILE)
12822 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12824 MonoMethod *memset_method = mini_get_memset_method ();
12825 if (ins_flag & MONO_INST_VOLATILE) {
12826 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12827 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12829 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12830 call->flags |= ins_flag;
12841 ins_flag |= MONO_INST_NOTYPECHECK;
12843 ins_flag |= MONO_INST_NORANGECHECK;
12844 /* we ignore the no-nullcheck for now since we
12845 * really do it explicitly only when doing callvirt->call
12849 case CEE_RETHROW: {
12851 int handler_offset = -1;
12853 for (i = 0; i < header->num_clauses; ++i) {
12854 MonoExceptionClause *clause = &header->clauses [i];
12855 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12856 handler_offset = clause->handler_offset;
12861 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12863 if (handler_offset == -1)
12866 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12867 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12868 ins->sreg1 = load->dreg;
12869 MONO_ADD_INS (cfg->cbb, ins);
12871 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12872 MONO_ADD_INS (cfg->cbb, ins);
12875 link_bblock (cfg, cfg->cbb, end_bblock);
12876 start_new_bblock = 1;
12884 CHECK_STACK_OVF (1);
12886 token = read32 (ip + 2);
12887 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12888 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12891 val = mono_type_size (type, &ialign);
12893 MonoClass *klass = mini_get_class (method, token, generic_context);
12894 CHECK_TYPELOAD (klass);
12896 val = mono_type_size (&klass->byval_arg, &ialign);
12898 if (mini_is_gsharedvt_klass (klass))
12899 GSHAREDVT_FAILURE (*ip);
12901 EMIT_NEW_ICONST (cfg, ins, val);
12906 case CEE_REFANYTYPE: {
12907 MonoInst *src_var, *src;
12909 GSHAREDVT_FAILURE (*ip);
12915 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12917 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12918 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12919 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12924 case CEE_READONLY_:
12937 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12947 g_warning ("opcode 0x%02x not handled", *ip);
12951 if (start_new_bblock != 1)
12954 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12955 if (cfg->cbb->next_bb) {
12956 /* This could already be set because of inlining, #693905 */
12957 MonoBasicBlock *bb = cfg->cbb;
12959 while (bb->next_bb)
12961 bb->next_bb = end_bblock;
12963 cfg->cbb->next_bb = end_bblock;
12966 if (cfg->method == method && cfg->domainvar) {
12968 MonoInst *get_domain;
12970 cfg->cbb = init_localsbb;
12972 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12973 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12974 MONO_ADD_INS (cfg->cbb, store);
12977 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12978 if (cfg->compile_aot)
12979 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12980 mono_get_got_var (cfg);
12983 if (cfg->method == method && cfg->got_var)
12984 mono_emit_load_got_addr (cfg);
12986 if (init_localsbb) {
12987 cfg->cbb = init_localsbb;
12989 for (i = 0; i < header->num_locals; ++i) {
12990 emit_init_local (cfg, i, header->locals [i], init_locals);
12994 if (cfg->init_ref_vars && cfg->method == method) {
12995 /* Emit initialization for ref vars */
12996 // FIXME: Avoid duplication initialization for IL locals.
12997 for (i = 0; i < cfg->num_varinfo; ++i) {
12998 MonoInst *ins = cfg->varinfo [i];
13000 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13001 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13005 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13006 cfg->cbb = init_localsbb;
13007 emit_push_lmf (cfg);
13010 cfg->cbb = init_localsbb;
13011 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13014 MonoBasicBlock *bb;
13017 * Make seq points at backward branch targets interruptable.
13019 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13020 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13021 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13024 /* Add a sequence point for method entry/exit events */
13025 if (seq_points && cfg->gen_sdb_seq_points) {
13026 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13027 MONO_ADD_INS (init_localsbb, ins);
13028 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13029 MONO_ADD_INS (cfg->bb_exit, ins);
13033 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13034 * the code they refer to was dead (#11880).
13036 if (sym_seq_points) {
13037 for (i = 0; i < header->code_size; ++i) {
13038 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13041 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13042 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13049 if (cfg->method == method) {
13050 MonoBasicBlock *bb;
13051 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13052 if (bb == cfg->bb_init)
13055 bb->region = mono_find_block_region (cfg, bb->real_offset);
13057 mono_create_spvar_for_region (cfg, bb->region);
13058 if (cfg->verbose_level > 2)
13059 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13062 MonoBasicBlock *bb;
13063 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13064 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13065 bb->real_offset = inline_offset;
13069 if (inline_costs < 0) {
13072 /* Method is too large */
13073 mname = mono_method_full_name (method, TRUE);
13074 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13078 if ((cfg->verbose_level > 2) && (cfg->method == method))
13079 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13084 g_assert (!mono_error_ok (&cfg->error));
13088 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13092 set_exception_type_from_invalid_il (cfg, method, ip);
13096 g_slist_free (class_inits);
13097 mono_basic_block_free (original_bb);
13098 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13099 if (cfg->exception_type)
13102 return inline_costs;
13106 store_membase_reg_to_store_membase_imm (int opcode)
13109 case OP_STORE_MEMBASE_REG:
13110 return OP_STORE_MEMBASE_IMM;
13111 case OP_STOREI1_MEMBASE_REG:
13112 return OP_STOREI1_MEMBASE_IMM;
13113 case OP_STOREI2_MEMBASE_REG:
13114 return OP_STOREI2_MEMBASE_IMM;
13115 case OP_STOREI4_MEMBASE_REG:
13116 return OP_STOREI4_MEMBASE_IMM;
13117 case OP_STOREI8_MEMBASE_REG:
13118 return OP_STOREI8_MEMBASE_IMM;
13120 g_assert_not_reached ();
13127 mono_op_to_op_imm (int opcode)
13131 return OP_IADD_IMM;
13133 return OP_ISUB_IMM;
13135 return OP_IDIV_IMM;
13137 return OP_IDIV_UN_IMM;
13139 return OP_IREM_IMM;
13141 return OP_IREM_UN_IMM;
13143 return OP_IMUL_IMM;
13145 return OP_IAND_IMM;
13149 return OP_IXOR_IMM;
13151 return OP_ISHL_IMM;
13153 return OP_ISHR_IMM;
13155 return OP_ISHR_UN_IMM;
13158 return OP_LADD_IMM;
13160 return OP_LSUB_IMM;
13162 return OP_LAND_IMM;
13166 return OP_LXOR_IMM;
13168 return OP_LSHL_IMM;
13170 return OP_LSHR_IMM;
13172 return OP_LSHR_UN_IMM;
13173 #if SIZEOF_REGISTER == 8
13175 return OP_LREM_IMM;
13179 return OP_COMPARE_IMM;
13181 return OP_ICOMPARE_IMM;
13183 return OP_LCOMPARE_IMM;
13185 case OP_STORE_MEMBASE_REG:
13186 return OP_STORE_MEMBASE_IMM;
13187 case OP_STOREI1_MEMBASE_REG:
13188 return OP_STOREI1_MEMBASE_IMM;
13189 case OP_STOREI2_MEMBASE_REG:
13190 return OP_STOREI2_MEMBASE_IMM;
13191 case OP_STOREI4_MEMBASE_REG:
13192 return OP_STOREI4_MEMBASE_IMM;
13194 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13196 return OP_X86_PUSH_IMM;
13197 case OP_X86_COMPARE_MEMBASE_REG:
13198 return OP_X86_COMPARE_MEMBASE_IMM;
13200 #if defined(TARGET_AMD64)
13201 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13202 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13204 case OP_VOIDCALL_REG:
13205 return OP_VOIDCALL;
13213 return OP_LOCALLOC_IMM;
13220 ldind_to_load_membase (int opcode)
13224 return OP_LOADI1_MEMBASE;
13226 return OP_LOADU1_MEMBASE;
13228 return OP_LOADI2_MEMBASE;
13230 return OP_LOADU2_MEMBASE;
13232 return OP_LOADI4_MEMBASE;
13234 return OP_LOADU4_MEMBASE;
13236 return OP_LOAD_MEMBASE;
13237 case CEE_LDIND_REF:
13238 return OP_LOAD_MEMBASE;
13240 return OP_LOADI8_MEMBASE;
13242 return OP_LOADR4_MEMBASE;
13244 return OP_LOADR8_MEMBASE;
13246 g_assert_not_reached ();
13253 stind_to_store_membase (int opcode)
13257 return OP_STOREI1_MEMBASE_REG;
13259 return OP_STOREI2_MEMBASE_REG;
13261 return OP_STOREI4_MEMBASE_REG;
13263 case CEE_STIND_REF:
13264 return OP_STORE_MEMBASE_REG;
13266 return OP_STOREI8_MEMBASE_REG;
13268 return OP_STORER4_MEMBASE_REG;
13270 return OP_STORER8_MEMBASE_REG;
13272 g_assert_not_reached ();
13279 mono_load_membase_to_load_mem (int opcode)
13281 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13282 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13284 case OP_LOAD_MEMBASE:
13285 return OP_LOAD_MEM;
13286 case OP_LOADU1_MEMBASE:
13287 return OP_LOADU1_MEM;
13288 case OP_LOADU2_MEMBASE:
13289 return OP_LOADU2_MEM;
13290 case OP_LOADI4_MEMBASE:
13291 return OP_LOADI4_MEM;
13292 case OP_LOADU4_MEMBASE:
13293 return OP_LOADU4_MEM;
13294 #if SIZEOF_REGISTER == 8
13295 case OP_LOADI8_MEMBASE:
13296 return OP_LOADI8_MEM;
13305 op_to_op_dest_membase (int store_opcode, int opcode)
13307 #if defined(TARGET_X86)
13308 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13313 return OP_X86_ADD_MEMBASE_REG;
13315 return OP_X86_SUB_MEMBASE_REG;
13317 return OP_X86_AND_MEMBASE_REG;
13319 return OP_X86_OR_MEMBASE_REG;
13321 return OP_X86_XOR_MEMBASE_REG;
13324 return OP_X86_ADD_MEMBASE_IMM;
13327 return OP_X86_SUB_MEMBASE_IMM;
13330 return OP_X86_AND_MEMBASE_IMM;
13333 return OP_X86_OR_MEMBASE_IMM;
13336 return OP_X86_XOR_MEMBASE_IMM;
13342 #if defined(TARGET_AMD64)
13343 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13348 return OP_X86_ADD_MEMBASE_REG;
13350 return OP_X86_SUB_MEMBASE_REG;
13352 return OP_X86_AND_MEMBASE_REG;
13354 return OP_X86_OR_MEMBASE_REG;
13356 return OP_X86_XOR_MEMBASE_REG;
13358 return OP_X86_ADD_MEMBASE_IMM;
13360 return OP_X86_SUB_MEMBASE_IMM;
13362 return OP_X86_AND_MEMBASE_IMM;
13364 return OP_X86_OR_MEMBASE_IMM;
13366 return OP_X86_XOR_MEMBASE_IMM;
13368 return OP_AMD64_ADD_MEMBASE_REG;
13370 return OP_AMD64_SUB_MEMBASE_REG;
13372 return OP_AMD64_AND_MEMBASE_REG;
13374 return OP_AMD64_OR_MEMBASE_REG;
13376 return OP_AMD64_XOR_MEMBASE_REG;
13379 return OP_AMD64_ADD_MEMBASE_IMM;
13382 return OP_AMD64_SUB_MEMBASE_IMM;
13385 return OP_AMD64_AND_MEMBASE_IMM;
13388 return OP_AMD64_OR_MEMBASE_IMM;
13391 return OP_AMD64_XOR_MEMBASE_IMM;
13401 op_to_op_store_membase (int store_opcode, int opcode)
13403 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13406 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13407 return OP_X86_SETEQ_MEMBASE;
13409 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13410 return OP_X86_SETNE_MEMBASE;
13418 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13421 /* FIXME: This has sign extension issues */
13423 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13424 return OP_X86_COMPARE_MEMBASE8_IMM;
13427 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13432 return OP_X86_PUSH_MEMBASE;
13433 case OP_COMPARE_IMM:
13434 case OP_ICOMPARE_IMM:
13435 return OP_X86_COMPARE_MEMBASE_IMM;
13438 return OP_X86_COMPARE_MEMBASE_REG;
13442 #ifdef TARGET_AMD64
13443 /* FIXME: This has sign extension issues */
13445 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13446 return OP_X86_COMPARE_MEMBASE8_IMM;
13451 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13452 return OP_X86_PUSH_MEMBASE;
13454 /* FIXME: This only works for 32 bit immediates
13455 case OP_COMPARE_IMM:
13456 case OP_LCOMPARE_IMM:
13457 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13458 return OP_AMD64_COMPARE_MEMBASE_IMM;
13460 case OP_ICOMPARE_IMM:
13461 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13462 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13466 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13467 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13468 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13469 return OP_AMD64_COMPARE_MEMBASE_REG;
13472 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13473 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13482 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13485 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13491 return OP_X86_COMPARE_REG_MEMBASE;
13493 return OP_X86_ADD_REG_MEMBASE;
13495 return OP_X86_SUB_REG_MEMBASE;
13497 return OP_X86_AND_REG_MEMBASE;
13499 return OP_X86_OR_REG_MEMBASE;
13501 return OP_X86_XOR_REG_MEMBASE;
13505 #ifdef TARGET_AMD64
13506 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13509 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13511 return OP_X86_ADD_REG_MEMBASE;
13513 return OP_X86_SUB_REG_MEMBASE;
13515 return OP_X86_AND_REG_MEMBASE;
13517 return OP_X86_OR_REG_MEMBASE;
13519 return OP_X86_XOR_REG_MEMBASE;
13521 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13525 return OP_AMD64_COMPARE_REG_MEMBASE;
13527 return OP_AMD64_ADD_REG_MEMBASE;
13529 return OP_AMD64_SUB_REG_MEMBASE;
13531 return OP_AMD64_AND_REG_MEMBASE;
13533 return OP_AMD64_OR_REG_MEMBASE;
13535 return OP_AMD64_XOR_REG_MEMBASE;
13544 mono_op_to_op_imm_noemul (int opcode)
13547 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13553 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13560 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13565 return mono_op_to_op_imm (opcode);
13570 * mono_handle_global_vregs:
13572 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13576 mono_handle_global_vregs (MonoCompile *cfg)
13578 gint32 *vreg_to_bb;
13579 MonoBasicBlock *bb;
13582 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13584 #ifdef MONO_ARCH_SIMD_INTRINSICS
13585 if (cfg->uses_simd_intrinsics)
13586 mono_simd_simplify_indirection (cfg);
13589 /* Find local vregs used in more than one bb */
13590 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13591 MonoInst *ins = bb->code;
13592 int block_num = bb->block_num;
13594 if (cfg->verbose_level > 2)
13595 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13598 for (; ins; ins = ins->next) {
13599 const char *spec = INS_INFO (ins->opcode);
13600 int regtype = 0, regindex;
13603 if (G_UNLIKELY (cfg->verbose_level > 2))
13604 mono_print_ins (ins);
13606 g_assert (ins->opcode >= MONO_CEE_LAST);
13608 for (regindex = 0; regindex < 4; regindex ++) {
13611 if (regindex == 0) {
13612 regtype = spec [MONO_INST_DEST];
13613 if (regtype == ' ')
13616 } else if (regindex == 1) {
13617 regtype = spec [MONO_INST_SRC1];
13618 if (regtype == ' ')
13621 } else if (regindex == 2) {
13622 regtype = spec [MONO_INST_SRC2];
13623 if (regtype == ' ')
13626 } else if (regindex == 3) {
13627 regtype = spec [MONO_INST_SRC3];
13628 if (regtype == ' ')
13633 #if SIZEOF_REGISTER == 4
13634 /* In the LLVM case, the long opcodes are not decomposed */
13635 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13637 * Since some instructions reference the original long vreg,
13638 * and some reference the two component vregs, it is quite hard
13639 * to determine when it needs to be global. So be conservative.
13641 if (!get_vreg_to_inst (cfg, vreg)) {
13642 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13644 if (cfg->verbose_level > 2)
13645 printf ("LONG VREG R%d made global.\n", vreg);
13649 * Make the component vregs volatile since the optimizations can
13650 * get confused otherwise.
13652 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13653 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13657 g_assert (vreg != -1);
13659 prev_bb = vreg_to_bb [vreg];
13660 if (prev_bb == 0) {
13661 /* 0 is a valid block num */
13662 vreg_to_bb [vreg] = block_num + 1;
13663 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13664 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13667 if (!get_vreg_to_inst (cfg, vreg)) {
13668 if (G_UNLIKELY (cfg->verbose_level > 2))
13669 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13673 if (vreg_is_ref (cfg, vreg))
13674 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13676 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13679 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13682 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13686 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13689 g_assert_not_reached ();
13693 /* Flag as having been used in more than one bb */
13694 vreg_to_bb [vreg] = -1;
13700 /* If a variable is used in only one bblock, convert it into a local vreg */
13701 for (i = 0; i < cfg->num_varinfo; i++) {
13702 MonoInst *var = cfg->varinfo [i];
13703 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13705 switch (var->type) {
13711 #if SIZEOF_REGISTER == 8
13714 #if !defined(TARGET_X86)
13715 /* Enabling this screws up the fp stack on x86 */
13718 if (mono_arch_is_soft_float ())
13722 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13726 /* Arguments are implicitly global */
13727 /* Putting R4 vars into registers doesn't work currently */
13728 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13729 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13731 * Make that the variable's liveness interval doesn't contain a call, since
13732 * that would cause the lvreg to be spilled, making the whole optimization
13735 /* This is too slow for JIT compilation */
13737 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13739 int def_index, call_index, ins_index;
13740 gboolean spilled = FALSE;
13745 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13746 const char *spec = INS_INFO (ins->opcode);
13748 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13749 def_index = ins_index;
13751 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13752 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13753 if (call_index > def_index) {
13759 if (MONO_IS_CALL (ins))
13760 call_index = ins_index;
13770 if (G_UNLIKELY (cfg->verbose_level > 2))
13771 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13772 var->flags |= MONO_INST_IS_DEAD;
13773 cfg->vreg_to_inst [var->dreg] = NULL;
13780 * Compress the varinfo and vars tables so the liveness computation is faster and
13781 * takes up less space.
13784 for (i = 0; i < cfg->num_varinfo; ++i) {
13785 MonoInst *var = cfg->varinfo [i];
13786 if (pos < i && cfg->locals_start == i)
13787 cfg->locals_start = pos;
13788 if (!(var->flags & MONO_INST_IS_DEAD)) {
13790 cfg->varinfo [pos] = cfg->varinfo [i];
13791 cfg->varinfo [pos]->inst_c0 = pos;
13792 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13793 cfg->vars [pos].idx = pos;
13794 #if SIZEOF_REGISTER == 4
13795 if (cfg->varinfo [pos]->type == STACK_I8) {
13796 /* Modify the two component vars too */
13799 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13800 var1->inst_c0 = pos;
13801 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13802 var1->inst_c0 = pos;
13809 cfg->num_varinfo = pos;
13810 if (cfg->locals_start > cfg->num_varinfo)
13811 cfg->locals_start = cfg->num_varinfo;
13815 * mono_allocate_gsharedvt_vars:
13817 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13818 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13821 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13825 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13827 for (i = 0; i < cfg->num_varinfo; ++i) {
13828 MonoInst *ins = cfg->varinfo [i];
13831 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13832 if (i >= cfg->locals_start) {
13834 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13835 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13836 ins->opcode = OP_GSHAREDVT_LOCAL;
13837 ins->inst_imm = idx;
13840 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13841 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13848 * mono_spill_global_vars:
13850 * Generate spill code for variables which are not allocated to registers,
13851 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13852 * code is generated which could be optimized by the local optimization passes.
13855 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13857 MonoBasicBlock *bb;
13859 int orig_next_vreg;
13860 guint32 *vreg_to_lvreg;
13862 guint32 i, lvregs_len, lvregs_size;
13863 gboolean dest_has_lvreg = FALSE;
13864 MonoStackType stacktypes [128];
13865 MonoInst **live_range_start, **live_range_end;
13866 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13868 *need_local_opts = FALSE;
13870 memset (spec2, 0, sizeof (spec2));
13872 /* FIXME: Move this function to mini.c */
13873 stacktypes ['i'] = STACK_PTR;
13874 stacktypes ['l'] = STACK_I8;
13875 stacktypes ['f'] = STACK_R8;
13876 #ifdef MONO_ARCH_SIMD_INTRINSICS
13877 stacktypes ['x'] = STACK_VTYPE;
13880 #if SIZEOF_REGISTER == 4
13881 /* Create MonoInsts for longs */
13882 for (i = 0; i < cfg->num_varinfo; i++) {
13883 MonoInst *ins = cfg->varinfo [i];
13885 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13886 switch (ins->type) {
13891 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13894 g_assert (ins->opcode == OP_REGOFFSET);
13896 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13898 tree->opcode = OP_REGOFFSET;
13899 tree->inst_basereg = ins->inst_basereg;
13900 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13902 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13904 tree->opcode = OP_REGOFFSET;
13905 tree->inst_basereg = ins->inst_basereg;
13906 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13916 if (cfg->compute_gc_maps) {
13917 /* registers need liveness info even for !non refs */
13918 for (i = 0; i < cfg->num_varinfo; i++) {
13919 MonoInst *ins = cfg->varinfo [i];
13921 if (ins->opcode == OP_REGVAR)
13922 ins->flags |= MONO_INST_GC_TRACK;
13926 /* FIXME: widening and truncation */
13929 * As an optimization, when a variable allocated to the stack is first loaded into
13930 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13931 * the variable again.
13933 orig_next_vreg = cfg->next_vreg;
13934 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13935 lvregs_size = 1024;
13936 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13940 * These arrays contain the first and last instructions accessing a given
13942 * Since we emit bblocks in the same order we process them here, and we
13943 * don't split live ranges, these will precisely describe the live range of
13944 * the variable, i.e. the instruction range where a valid value can be found
13945 * in the variables location.
13946 * The live range is computed using the liveness info computed by the liveness pass.
13947 * We can't use vmv->range, since that is an abstract live range, and we need
13948 * one which is instruction precise.
13949 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13951 /* FIXME: Only do this if debugging info is requested */
13952 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13953 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13954 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13955 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13957 /* Add spill loads/stores */
13958 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13961 if (cfg->verbose_level > 2)
13962 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13964 /* Clear vreg_to_lvreg array */
13965 for (i = 0; i < lvregs_len; i++)
13966 vreg_to_lvreg [lvregs [i]] = 0;
13970 MONO_BB_FOR_EACH_INS (bb, ins) {
13971 const char *spec = INS_INFO (ins->opcode);
13972 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13973 gboolean store, no_lvreg;
13974 int sregs [MONO_MAX_SRC_REGS];
13976 if (G_UNLIKELY (cfg->verbose_level > 2))
13977 mono_print_ins (ins);
13979 if (ins->opcode == OP_NOP)
13983 * We handle LDADDR here as well, since it can only be decomposed
13984 * when variable addresses are known.
13986 if (ins->opcode == OP_LDADDR) {
13987 MonoInst *var = (MonoInst *)ins->inst_p0;
13989 if (var->opcode == OP_VTARG_ADDR) {
13990 /* Happens on SPARC/S390 where vtypes are passed by reference */
13991 MonoInst *vtaddr = var->inst_left;
13992 if (vtaddr->opcode == OP_REGVAR) {
13993 ins->opcode = OP_MOVE;
13994 ins->sreg1 = vtaddr->dreg;
13996 else if (var->inst_left->opcode == OP_REGOFFSET) {
13997 ins->opcode = OP_LOAD_MEMBASE;
13998 ins->inst_basereg = vtaddr->inst_basereg;
13999 ins->inst_offset = vtaddr->inst_offset;
14002 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14003 /* gsharedvt arg passed by ref */
14004 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14006 ins->opcode = OP_LOAD_MEMBASE;
14007 ins->inst_basereg = var->inst_basereg;
14008 ins->inst_offset = var->inst_offset;
14009 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14010 MonoInst *load, *load2, *load3;
14011 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14012 int reg1, reg2, reg3;
14013 MonoInst *info_var = cfg->gsharedvt_info_var;
14014 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14018 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14021 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14023 g_assert (info_var);
14024 g_assert (locals_var);
14026 /* Mark the instruction used to compute the locals var as used */
14027 cfg->gsharedvt_locals_var_ins = NULL;
14029 /* Load the offset */
14030 if (info_var->opcode == OP_REGOFFSET) {
14031 reg1 = alloc_ireg (cfg);
14032 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14033 } else if (info_var->opcode == OP_REGVAR) {
14035 reg1 = info_var->dreg;
14037 g_assert_not_reached ();
14039 reg2 = alloc_ireg (cfg);
14040 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14041 /* Load the locals area address */
14042 reg3 = alloc_ireg (cfg);
14043 if (locals_var->opcode == OP_REGOFFSET) {
14044 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14045 } else if (locals_var->opcode == OP_REGVAR) {
14046 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14048 g_assert_not_reached ();
14050 /* Compute the address */
14051 ins->opcode = OP_PADD;
14055 mono_bblock_insert_before_ins (bb, ins, load3);
14056 mono_bblock_insert_before_ins (bb, load3, load2);
14058 mono_bblock_insert_before_ins (bb, load2, load);
14060 g_assert (var->opcode == OP_REGOFFSET);
14062 ins->opcode = OP_ADD_IMM;
14063 ins->sreg1 = var->inst_basereg;
14064 ins->inst_imm = var->inst_offset;
14067 *need_local_opts = TRUE;
14068 spec = INS_INFO (ins->opcode);
14071 if (ins->opcode < MONO_CEE_LAST) {
14072 mono_print_ins (ins);
14073 g_assert_not_reached ();
14077 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14081 if (MONO_IS_STORE_MEMBASE (ins)) {
14082 tmp_reg = ins->dreg;
14083 ins->dreg = ins->sreg2;
14084 ins->sreg2 = tmp_reg;
14087 spec2 [MONO_INST_DEST] = ' ';
14088 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14089 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14090 spec2 [MONO_INST_SRC3] = ' ';
14092 } else if (MONO_IS_STORE_MEMINDEX (ins))
14093 g_assert_not_reached ();
14098 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14099 printf ("\t %.3s %d", spec, ins->dreg);
14100 num_sregs = mono_inst_get_src_registers (ins, sregs);
14101 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14102 printf (" %d", sregs [srcindex]);
14109 regtype = spec [MONO_INST_DEST];
14110 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14113 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14114 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14115 MonoInst *store_ins;
14117 MonoInst *def_ins = ins;
14118 int dreg = ins->dreg; /* The original vreg */
14120 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14122 if (var->opcode == OP_REGVAR) {
14123 ins->dreg = var->dreg;
14124 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14126 * Instead of emitting a load+store, use a _membase opcode.
14128 g_assert (var->opcode == OP_REGOFFSET);
14129 if (ins->opcode == OP_MOVE) {
14133 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14134 ins->inst_basereg = var->inst_basereg;
14135 ins->inst_offset = var->inst_offset;
14138 spec = INS_INFO (ins->opcode);
14142 g_assert (var->opcode == OP_REGOFFSET);
14144 prev_dreg = ins->dreg;
14146 /* Invalidate any previous lvreg for this vreg */
14147 vreg_to_lvreg [ins->dreg] = 0;
14151 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14153 store_opcode = OP_STOREI8_MEMBASE_REG;
14156 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14158 #if SIZEOF_REGISTER != 8
14159 if (regtype == 'l') {
14160 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14161 mono_bblock_insert_after_ins (bb, ins, store_ins);
14162 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14163 mono_bblock_insert_after_ins (bb, ins, store_ins);
14164 def_ins = store_ins;
14169 g_assert (store_opcode != OP_STOREV_MEMBASE);
14171 /* Try to fuse the store into the instruction itself */
14172 /* FIXME: Add more instructions */
14173 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14174 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14175 ins->inst_imm = ins->inst_c0;
14176 ins->inst_destbasereg = var->inst_basereg;
14177 ins->inst_offset = var->inst_offset;
14178 spec = INS_INFO (ins->opcode);
14179 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14180 ins->opcode = store_opcode;
14181 ins->inst_destbasereg = var->inst_basereg;
14182 ins->inst_offset = var->inst_offset;
14186 tmp_reg = ins->dreg;
14187 ins->dreg = ins->sreg2;
14188 ins->sreg2 = tmp_reg;
14191 spec2 [MONO_INST_DEST] = ' ';
14192 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14193 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14194 spec2 [MONO_INST_SRC3] = ' ';
14196 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14197 // FIXME: The backends expect the base reg to be in inst_basereg
14198 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14200 ins->inst_basereg = var->inst_basereg;
14201 ins->inst_offset = var->inst_offset;
14202 spec = INS_INFO (ins->opcode);
14204 /* printf ("INS: "); mono_print_ins (ins); */
14205 /* Create a store instruction */
14206 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14208 /* Insert it after the instruction */
14209 mono_bblock_insert_after_ins (bb, ins, store_ins);
14211 def_ins = store_ins;
14214 * We can't assign ins->dreg to var->dreg here, since the
14215 * sregs could use it. So set a flag, and do it after
14218 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14219 dest_has_lvreg = TRUE;
14224 if (def_ins && !live_range_start [dreg]) {
14225 live_range_start [dreg] = def_ins;
14226 live_range_start_bb [dreg] = bb;
14229 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14232 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14233 tmp->inst_c1 = dreg;
14234 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14241 num_sregs = mono_inst_get_src_registers (ins, sregs);
14242 for (srcindex = 0; srcindex < 3; ++srcindex) {
14243 regtype = spec [MONO_INST_SRC1 + srcindex];
14244 sreg = sregs [srcindex];
14246 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14247 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14248 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14249 MonoInst *use_ins = ins;
14250 MonoInst *load_ins;
14251 guint32 load_opcode;
14253 if (var->opcode == OP_REGVAR) {
14254 sregs [srcindex] = var->dreg;
14255 //mono_inst_set_src_registers (ins, sregs);
14256 live_range_end [sreg] = use_ins;
14257 live_range_end_bb [sreg] = bb;
14259 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14262 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14263 /* var->dreg is a hreg */
14264 tmp->inst_c1 = sreg;
14265 mono_bblock_insert_after_ins (bb, ins, tmp);
14271 g_assert (var->opcode == OP_REGOFFSET);
14273 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14275 g_assert (load_opcode != OP_LOADV_MEMBASE);
14277 if (vreg_to_lvreg [sreg]) {
14278 g_assert (vreg_to_lvreg [sreg] != -1);
14280 /* The variable is already loaded to an lvreg */
14281 if (G_UNLIKELY (cfg->verbose_level > 2))
14282 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14283 sregs [srcindex] = vreg_to_lvreg [sreg];
14284 //mono_inst_set_src_registers (ins, sregs);
14288 /* Try to fuse the load into the instruction */
14289 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14290 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14291 sregs [0] = var->inst_basereg;
14292 //mono_inst_set_src_registers (ins, sregs);
14293 ins->inst_offset = var->inst_offset;
14294 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14295 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14296 sregs [1] = var->inst_basereg;
14297 //mono_inst_set_src_registers (ins, sregs);
14298 ins->inst_offset = var->inst_offset;
14300 if (MONO_IS_REAL_MOVE (ins)) {
14301 ins->opcode = OP_NOP;
14304 //printf ("%d ", srcindex); mono_print_ins (ins);
14306 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14308 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14309 if (var->dreg == prev_dreg) {
14311 * sreg refers to the value loaded by the load
14312 * emitted below, but we need to use ins->dreg
14313 * since it refers to the store emitted earlier.
14317 g_assert (sreg != -1);
14318 vreg_to_lvreg [var->dreg] = sreg;
14319 if (lvregs_len >= lvregs_size) {
14320 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14321 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14322 lvregs = new_lvregs;
14325 lvregs [lvregs_len ++] = var->dreg;
14329 sregs [srcindex] = sreg;
14330 //mono_inst_set_src_registers (ins, sregs);
14332 #if SIZEOF_REGISTER != 8
14333 if (regtype == 'l') {
14334 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14335 mono_bblock_insert_before_ins (bb, ins, load_ins);
14336 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14337 mono_bblock_insert_before_ins (bb, ins, load_ins);
14338 use_ins = load_ins;
14343 #if SIZEOF_REGISTER == 4
14344 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14346 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14347 mono_bblock_insert_before_ins (bb, ins, load_ins);
14348 use_ins = load_ins;
14352 if (var->dreg < orig_next_vreg) {
14353 live_range_end [var->dreg] = use_ins;
14354 live_range_end_bb [var->dreg] = bb;
14357 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14360 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14361 tmp->inst_c1 = var->dreg;
14362 mono_bblock_insert_after_ins (bb, ins, tmp);
14366 mono_inst_set_src_registers (ins, sregs);
14368 if (dest_has_lvreg) {
14369 g_assert (ins->dreg != -1);
14370 vreg_to_lvreg [prev_dreg] = ins->dreg;
14371 if (lvregs_len >= lvregs_size) {
14372 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14373 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14374 lvregs = new_lvregs;
14377 lvregs [lvregs_len ++] = prev_dreg;
14378 dest_has_lvreg = FALSE;
14382 tmp_reg = ins->dreg;
14383 ins->dreg = ins->sreg2;
14384 ins->sreg2 = tmp_reg;
14387 if (MONO_IS_CALL (ins)) {
14388 /* Clear vreg_to_lvreg array */
14389 for (i = 0; i < lvregs_len; i++)
14390 vreg_to_lvreg [lvregs [i]] = 0;
14392 } else if (ins->opcode == OP_NOP) {
14394 MONO_INST_NULLIFY_SREGS (ins);
14397 if (cfg->verbose_level > 2)
14398 mono_print_ins_index (1, ins);
14401 /* Extend the live range based on the liveness info */
14402 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14403 for (i = 0; i < cfg->num_varinfo; i ++) {
14404 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14406 if (vreg_is_volatile (cfg, vi->vreg))
14407 /* The liveness info is incomplete */
14410 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14411 /* Live from at least the first ins of this bb */
14412 live_range_start [vi->vreg] = bb->code;
14413 live_range_start_bb [vi->vreg] = bb;
14416 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14417 /* Live at least until the last ins of this bb */
14418 live_range_end [vi->vreg] = bb->last_ins;
14419 live_range_end_bb [vi->vreg] = bb;
14426 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14427 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14429 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14430 for (i = 0; i < cfg->num_varinfo; ++i) {
14431 int vreg = MONO_VARINFO (cfg, i)->vreg;
14434 if (live_range_start [vreg]) {
14435 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14437 ins->inst_c1 = vreg;
14438 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14440 if (live_range_end [vreg]) {
14441 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14443 ins->inst_c1 = vreg;
14444 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14445 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14447 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14452 if (cfg->gsharedvt_locals_var_ins) {
14453 /* Nullify if unused */
14454 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14455 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14458 g_free (live_range_start);
14459 g_free (live_range_end);
14460 g_free (live_range_start_bb);
14461 g_free (live_range_end_bb);
14467 * - use 'iadd' instead of 'int_add'
14468 * - handling ovf opcodes: decompose in method_to_ir.
14469 * - unify iregs/fregs
14470 * -> partly done, the missing parts are:
14471 * - a more complete unification would involve unifying the hregs as well, so
14472 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14473 * would no longer map to the machine hregs, so the code generators would need to
14474 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14475 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14476 * fp/non-fp branches speeds it up by about 15%.
14477 * - use sext/zext opcodes instead of shifts
14479 * - get rid of TEMPLOADs if possible and use vregs instead
14480 * - clean up usage of OP_P/OP_ opcodes
14481 * - cleanup usage of DUMMY_USE
14482 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14484 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14485 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14486 * - make sure handle_stack_args () is called before the branch is emitted
14487 * - when the new IR is done, get rid of all unused stuff
14488 * - COMPARE/BEQ as separate instructions or unify them ?
14489 * - keeping them separate allows specialized compare instructions like
14490 * compare_imm, compare_membase
14491 * - most back ends unify fp compare+branch, fp compare+ceq
14492 * - integrate mono_save_args into inline_method
14493 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14494 * - handle long shift opts on 32 bit platforms somehow: they require
14495 * 3 sregs (2 for arg1 and 1 for arg2)
14496 * - make byref a 'normal' type.
14497 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14498 * variable if needed.
14499 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14500 * like inline_method.
14501 * - remove inlining restrictions
14502 * - fix LNEG and enable cfold of INEG
14503 * - generalize x86 optimizations like ldelema as a peephole optimization
14504 * - add store_mem_imm for amd64
14505 * - optimize the loading of the interruption flag in the managed->native wrappers
14506 * - avoid special handling of OP_NOP in passes
14507 * - move code inserting instructions into one function/macro.
14508 * - try a coalescing phase after liveness analysis
14509 * - add float -> vreg conversion + local optimizations on !x86
14510 * - figure out how to handle decomposed branches during optimizations, ie.
14511 * compare+branch, op_jump_table+op_br etc.
14512 * - promote RuntimeXHandles to vregs
14513 * - vtype cleanups:
14514 * - add a NEW_VARLOADA_VREG macro
14515 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14516 * accessing vtype fields.
14517 * - get rid of I8CONST on 64 bit platforms
14518 * - dealing with the increase in code size due to branches created during opcode
14520 * - use extended basic blocks
14521 * - all parts of the JIT
14522 * - handle_global_vregs () && local regalloc
14523 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14524 * - sources of increase in code size:
14527 * - isinst and castclass
14528 * - lvregs not allocated to global registers even if used multiple times
14529 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14531 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14532 * - add all micro optimizations from the old JIT
14533 * - put tree optimizations into the deadce pass
14534 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14535 * specific function.
14536 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14537 * fcompare + branchCC.
14538 * - create a helper function for allocating a stack slot, taking into account
14539 * MONO_CFG_HAS_SPILLUP.
14541 * - merge the ia64 switch changes.
14542 * - optimize mono_regstate2_alloc_int/float.
14543 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14544 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14545 * parts of the tree could be separated by other instructions, killing the tree
14546 * arguments, or stores killing loads etc. Also, should we fold loads into other
14547 * instructions if the result of the load is used multiple times ?
14548 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14549 * - LAST MERGE: 108395.
14550 * - when returning vtypes in registers, generate IR and append it to the end of the
14551 * last bb instead of doing it in the epilog.
14552 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14560 - When to decompose opcodes:
14561 - earlier: this makes some optimizations hard to implement, since the low level IR
14562 no longer contains the neccessary information. But it is easier to do.
14563 - later: harder to implement, enables more optimizations.
14564 - Branches inside bblocks:
14565 - created when decomposing complex opcodes.
14566 - branches to another bblock: harmless, but not tracked by the branch
14567 optimizations, so need to branch to a label at the start of the bblock.
14568 - branches to inside the same bblock: very problematic, trips up the local
14569 reg allocator. Can be fixed by spitting the current bblock, but that is a
14570 complex operation, since some local vregs can become global vregs etc.
14571 - Local/global vregs:
14572 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14573 local register allocator.
14574 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14575 structure, created by mono_create_var (). Assigned to hregs or the stack by
14576 the global register allocator.
14577 - When to do optimizations like alu->alu_imm:
14578 - earlier -> saves work later on since the IR will be smaller/simpler
14579 - later -> can work on more instructions
14580 - Handling of valuetypes:
14581 - When a vtype is pushed on the stack, a new temporary is created, an
14582 instruction computing its address (LDADDR) is emitted and pushed on
14583 the stack. Need to optimize cases when the vtype is used immediately as in
14584 argument passing, stloc etc.
14585 - Instead of the to_end stuff in the old JIT, simply call the function handling
14586 the values on the stack before emitting the last instruction of the bb.
14589 #else /* !DISABLE_JIT */
14592 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14596 #endif /* !DISABLE_JIT */