3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 inline static MonoInst*
152 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
158 static MonoMethodSignature *helper_sig_jit_thread_attach;
159 static MonoMethodSignature *helper_sig_get_tls_tramp;
160 static MonoMethodSignature *helper_sig_set_tls_tramp;
162 /* type loading helpers */
163 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
164 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
167 * Instruction metadata
175 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
176 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
182 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 /* keep in sync with the enum in mini.h */
190 #include "mini-ops.h"
195 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
196 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
198 * This should contain the index of the last sreg + 1. This is not the same
199 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
201 const gint8 ins_sreg_counts[] = {
202 #include "mini-ops.h"
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 type = &type->data.generic_class->container_class->byval_arg;
332 g_assert (cfg->gshared);
333 if (mini_type_var_is_vt (type))
336 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
344 mono_print_bb (MonoBasicBlock *bb, const char *msg)
348 GString *str = g_string_new ("");
350 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 g_string_append_printf (str, ", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 g_string_append_printf (str, " ]\n");
358 g_print ("%s", str->str);
359 g_string_free (str, TRUE);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
372 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
373 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
376 static MONO_NEVER_INLINE void
377 break_on_unverified (void)
379 if (mini_get_debug_options ()->break_on_unverified)
383 static MONO_NEVER_INLINE void
384 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
386 char *method_fname = mono_method_full_name (method, TRUE);
387 char *field_fname = mono_field_full_name (field);
388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
389 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
390 g_free (method_fname);
391 g_free (field_fname);
394 static MONO_NEVER_INLINE void
395 inline_failure (MonoCompile *cfg, const char *msg)
397 if (cfg->verbose_level >= 2)
398 printf ("inline failed: %s\n", msg);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
402 static MONO_NEVER_INLINE void
403 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 if (cfg->verbose_level > 2) \
406 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
410 static MONO_NEVER_INLINE void
411 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
413 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
414 if (cfg->verbose_level >= 2)
415 printf ("%s\n", cfg->exception_message);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
420 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
421 * foo<T> (int i) { ldarg.0; box T; }
423 #define UNVERIFIED do { \
424 if (cfg->gsharedvt) { \
425 if (cfg->verbose_level > 2) \
426 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
428 goto exception_exit; \
430 break_on_unverified (); \
434 #define GET_BBLOCK(cfg,tblock,ip) do { \
435 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
437 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
438 NEW_BBLOCK (cfg, (tblock)); \
439 (tblock)->cil_code = (ip); \
440 ADD_BBLOCK (cfg, (tblock)); \
444 #if defined(TARGET_X86) || defined(TARGET_AMD64)
445 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
446 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
447 (dest)->dreg = alloc_ireg_mp ((cfg)); \
448 (dest)->sreg1 = (sr1); \
449 (dest)->sreg2 = (sr2); \
450 (dest)->inst_imm = (imm); \
451 (dest)->backend.shift_amount = (shift); \
452 MONO_ADD_INS ((cfg)->cbb, (dest)); \
456 /* Emit conversions so both operands of a binary opcode are of the same type */
458 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
460 MonoInst *arg1 = *arg1_ref;
461 MonoInst *arg2 = *arg2_ref;
464 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
465 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
468 /* Mixing r4/r8 is allowed by the spec */
469 if (arg1->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
473 conv->type = STACK_R8;
477 if (arg2->type == STACK_R4) {
478 int dreg = alloc_freg (cfg);
480 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
481 conv->type = STACK_R8;
487 #if SIZEOF_REGISTER == 8
488 /* FIXME: Need to add many more cases */
489 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
492 int dr = alloc_preg (cfg);
493 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
494 (ins)->sreg2 = widen->dreg;
499 #define ADD_BINOP(op) do { \
500 MONO_INST_NEW (cfg, ins, (op)); \
502 ins->sreg1 = sp [0]->dreg; \
503 ins->sreg2 = sp [1]->dreg; \
504 type_from_op (cfg, ins, sp [0], sp [1]); \
506 /* Have to insert a widening op */ \
507 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
508 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
509 MONO_ADD_INS ((cfg)->cbb, (ins)); \
510 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
513 #define ADD_UNOP(op) do { \
514 MONO_INST_NEW (cfg, ins, (op)); \
516 ins->sreg1 = sp [0]->dreg; \
517 type_from_op (cfg, ins, sp [0], NULL); \
519 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
520 MONO_ADD_INS ((cfg)->cbb, (ins)); \
521 *sp++ = mono_decompose_opcode (cfg, ins); \
524 #define ADD_BINCOND(next_block) do { \
527 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
528 cmp->sreg1 = sp [0]->dreg; \
529 cmp->sreg2 = sp [1]->dreg; \
530 type_from_op (cfg, cmp, sp [0], sp [1]); \
532 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
533 type_from_op (cfg, ins, sp [0], sp [1]); \
534 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
535 GET_BBLOCK (cfg, tblock, target); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_true_bb = tblock; \
538 if ((next_block)) { \
539 link_bblock (cfg, cfg->cbb, (next_block)); \
540 ins->inst_false_bb = (next_block); \
541 start_new_bblock = 1; \
543 GET_BBLOCK (cfg, tblock, ip); \
544 link_bblock (cfg, cfg->cbb, tblock); \
545 ins->inst_false_bb = tblock; \
546 start_new_bblock = 2; \
548 if (sp != stack_start) { \
549 handle_stack_args (cfg, stack_start, sp - stack_start); \
550 CHECK_UNVERIFIABLE (cfg); \
552 MONO_ADD_INS (cfg->cbb, cmp); \
553 MONO_ADD_INS (cfg->cbb, ins); \
557 * link_bblock: Links two basic blocks
559 * links two basic blocks in the control flow graph, the 'from'
560 * argument is the starting block and the 'to' argument is the block
561 * the control flow ends to after 'from'.
564 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
566 MonoBasicBlock **newa;
570 if (from->cil_code) {
572 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
574 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
577 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
579 printf ("edge from entry to exit\n");
584 for (i = 0; i < from->out_count; ++i) {
585 if (to == from->out_bb [i]) {
591 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
592 for (i = 0; i < from->out_count; ++i) {
593 newa [i] = from->out_bb [i];
601 for (i = 0; i < to->in_count; ++i) {
602 if (from == to->in_bb [i]) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
609 for (i = 0; i < to->in_count; ++i) {
610 newa [i] = to->in_bb [i];
619 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
621 link_bblock (cfg, from, to);
625 * mono_find_block_region:
627 * We mark each basic block with a region ID. We use that to avoid BB
628 * optimizations when blocks are in different regions.
631 * A region token that encodes where this region is, and information
632 * about the clause owner for this block.
634 * The region encodes the try/catch/filter clause that owns this block
635 * as well as the type. -1 is a special value that represents a block
636 * that is in none of try/catch/filter.
639 mono_find_block_region (MonoCompile *cfg, int offset)
641 MonoMethodHeader *header = cfg->header;
642 MonoExceptionClause *clause;
645 for (i = 0; i < header->num_clauses; ++i) {
646 clause = &header->clauses [i];
647 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
648 (offset < (clause->handler_offset)))
649 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
651 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
652 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
653 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
654 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
655 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
657 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
660 for (i = 0; i < header->num_clauses; ++i) {
661 clause = &header->clauses [i];
663 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
664 return ((i + 1) << 8) | clause->flags;
671 ip_in_finally_clause (MonoCompile *cfg, int offset)
673 MonoMethodHeader *header = cfg->header;
674 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
682 if (MONO_OFFSET_IN_HANDLER (clause, offset))
689 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
691 MonoMethodHeader *header = cfg->header;
692 MonoExceptionClause *clause;
696 for (i = 0; i < header->num_clauses; ++i) {
697 clause = &header->clauses [i];
698 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
699 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
700 if (clause->flags == type)
701 res = g_list_append (res, clause);
708 mono_create_spvar_for_region (MonoCompile *cfg, int region)
712 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
717 /* prevent it from being register allocated */
718 var->flags |= MONO_INST_VOLATILE;
720 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
724 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
726 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
730 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
734 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
738 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
739 /* prevent it from being register allocated */
740 var->flags |= MONO_INST_VOLATILE;
742 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
748 * Returns the type used in the eval stack when @type is loaded.
749 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
752 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
756 type = mini_get_underlying_type (type);
757 inst->klass = klass = mono_class_from_mono_type (type);
759 inst->type = STACK_MP;
764 switch (type->type) {
766 inst->type = STACK_INV;
774 inst->type = STACK_I4;
779 case MONO_TYPE_FNPTR:
780 inst->type = STACK_PTR;
782 case MONO_TYPE_CLASS:
783 case MONO_TYPE_STRING:
784 case MONO_TYPE_OBJECT:
785 case MONO_TYPE_SZARRAY:
786 case MONO_TYPE_ARRAY:
787 inst->type = STACK_OBJ;
791 inst->type = STACK_I8;
794 inst->type = cfg->r4_stack_type;
797 inst->type = STACK_R8;
799 case MONO_TYPE_VALUETYPE:
800 if (type->data.klass->enumtype) {
801 type = mono_class_enum_basetype (type->data.klass);
805 inst->type = STACK_VTYPE;
808 case MONO_TYPE_TYPEDBYREF:
809 inst->klass = mono_defaults.typed_reference_class;
810 inst->type = STACK_VTYPE;
812 case MONO_TYPE_GENERICINST:
813 type = &type->data.generic_class->container_class->byval_arg;
817 g_assert (cfg->gshared);
818 if (mini_is_gsharedvt_type (type)) {
819 g_assert (cfg->gsharedvt);
820 inst->type = STACK_VTYPE;
822 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
826 g_error ("unknown type 0x%02x in eval stack type", type->type);
831 * The following tables are used to quickly validate the IL code in type_from_op ().
834 bin_num_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
840 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
848 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
851 /* reduce the size of this table */
853 bin_int_table [STACK_MAX] [STACK_MAX] = {
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
865 bin_comp_table [STACK_MAX] [STACK_MAX] = {
866 /* Inv i L p F & O vt r4 */
868 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
869 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
870 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
871 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
872 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
873 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
874 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
875 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
878 /* reduce the size of this table */
880 shift_table [STACK_MAX] [STACK_MAX] = {
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
892 * Tables to map from the non-specific opcode to the matching
893 * type-specific opcode.
895 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
897 binops_op_map [STACK_MAX] = {
898 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
901 /* handles from CEE_NEG to CEE_CONV_U8 */
903 unops_op_map [STACK_MAX] = {
904 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
907 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
909 ovfops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
913 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
915 ovf2ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
919 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
921 ovf3ops_op_map [STACK_MAX] = {
922 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
925 /* handles from CEE_BEQ to CEE_BLT_UN */
927 beqops_op_map [STACK_MAX] = {
928 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
931 /* handles from CEE_CEQ to CEE_CLT_UN */
933 ceqops_op_map [STACK_MAX] = {
934 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
938 * Sets ins->type (the type on the eval stack) according to the
939 * type of the opcode and the arguments to it.
940 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
942 * FIXME: this function sets ins->type unconditionally in some cases, but
943 * it should set it to invalid for some types (a conv.x on an object)
946 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
948 switch (ins->opcode) {
955 /* FIXME: check unverifiable args for STACK_MP */
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_int_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = shift_table [src1->type] [src2->type];
971 ins->opcode += binops_op_map [ins->type];
976 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
977 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
978 ins->opcode = OP_LCOMPARE;
979 else if (src1->type == STACK_R4)
980 ins->opcode = OP_RCOMPARE;
981 else if (src1->type == STACK_R8)
982 ins->opcode = OP_FCOMPARE;
984 ins->opcode = OP_ICOMPARE;
986 case OP_ICOMPARE_IMM:
987 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
988 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
989 ins->opcode = OP_LCOMPARE_IMM;
1001 ins->opcode += beqops_op_map [src1->type];
1004 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1011 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1012 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = neg_table [src1->type];
1017 ins->opcode += unops_op_map [ins->type];
1020 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1021 ins->type = src1->type;
1023 ins->type = STACK_INV;
1024 ins->opcode += unops_op_map [ins->type];
1030 ins->type = STACK_I4;
1031 ins->opcode += unops_op_map [src1->type];
1034 ins->type = STACK_R8;
1035 switch (src1->type) {
1038 ins->opcode = OP_ICONV_TO_R_UN;
1041 ins->opcode = OP_LCONV_TO_R_UN;
1045 case CEE_CONV_OVF_I1:
1046 case CEE_CONV_OVF_U1:
1047 case CEE_CONV_OVF_I2:
1048 case CEE_CONV_OVF_U2:
1049 case CEE_CONV_OVF_I4:
1050 case CEE_CONV_OVF_U4:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf3ops_op_map [src1->type];
1054 case CEE_CONV_OVF_I_UN:
1055 case CEE_CONV_OVF_U_UN:
1056 ins->type = STACK_PTR;
1057 ins->opcode += ovf2ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I1_UN:
1060 case CEE_CONV_OVF_I2_UN:
1061 case CEE_CONV_OVF_I4_UN:
1062 case CEE_CONV_OVF_U1_UN:
1063 case CEE_CONV_OVF_U2_UN:
1064 case CEE_CONV_OVF_U4_UN:
1065 ins->type = STACK_I4;
1066 ins->opcode += ovf2ops_op_map [src1->type];
1069 ins->type = STACK_PTR;
1070 switch (src1->type) {
1072 ins->opcode = OP_ICONV_TO_U;
1076 #if SIZEOF_VOID_P == 8
1077 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_MOVE;
1083 ins->opcode = OP_LCONV_TO_U;
1086 ins->opcode = OP_FCONV_TO_U;
1092 ins->type = STACK_I8;
1093 ins->opcode += unops_op_map [src1->type];
1095 case CEE_CONV_OVF_I8:
1096 case CEE_CONV_OVF_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += ovf3ops_op_map [src1->type];
1100 case CEE_CONV_OVF_U8_UN:
1101 case CEE_CONV_OVF_I8_UN:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf2ops_op_map [src1->type];
1106 ins->type = cfg->r4_stack_type;
1107 ins->opcode += unops_op_map [src1->type];
1110 ins->type = STACK_R8;
1111 ins->opcode += unops_op_map [src1->type];
1114 ins->type = STACK_R8;
1118 ins->type = STACK_I4;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_CONV_OVF_I:
1123 case CEE_CONV_OVF_U:
1124 ins->type = STACK_PTR;
1125 ins->opcode += ovfops_op_map [src1->type];
1128 case CEE_ADD_OVF_UN:
1130 case CEE_MUL_OVF_UN:
1132 case CEE_SUB_OVF_UN:
1133 ins->type = bin_num_table [src1->type] [src2->type];
1134 ins->opcode += ovfops_op_map [src1->type];
1135 if (ins->type == STACK_R8)
1136 ins->type = STACK_INV;
1138 case OP_LOAD_MEMBASE:
1139 ins->type = STACK_PTR;
1141 case OP_LOADI1_MEMBASE:
1142 case OP_LOADU1_MEMBASE:
1143 case OP_LOADI2_MEMBASE:
1144 case OP_LOADU2_MEMBASE:
1145 case OP_LOADI4_MEMBASE:
1146 case OP_LOADU4_MEMBASE:
1147 ins->type = STACK_PTR;
1149 case OP_LOADI8_MEMBASE:
1150 ins->type = STACK_I8;
1152 case OP_LOADR4_MEMBASE:
1153 ins->type = cfg->r4_stack_type;
1155 case OP_LOADR8_MEMBASE:
1156 ins->type = STACK_R8;
1159 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1163 if (ins->type == STACK_MP)
1164 ins->klass = mono_defaults.object_class;
1169 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1175 param_table [STACK_MAX] [STACK_MAX] = {
1180 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1185 switch (args->type) {
1195 for (i = 0; i < sig->param_count; ++i) {
1196 switch (args [i].type) {
1200 if (!sig->params [i]->byref)
1204 if (sig->params [i]->byref)
1206 switch (sig->params [i]->type) {
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1218 if (sig->params [i]->byref)
1220 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1229 /*if (!param_table [args [i].type] [sig->params [i]->type])
1237 * When we need a pointer to the current domain many times in a method, we
1238 * call mono_domain_get() once and we store the result in a local variable.
1239 * This function returns the variable that represents the MonoDomain*.
1241 inline static MonoInst *
1242 mono_get_domainvar (MonoCompile *cfg)
1244 if (!cfg->domainvar)
1245 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 return cfg->domainvar;
1250 * The got_var contains the address of the Global Offset Table when AOT
1254 mono_get_got_var (MonoCompile *cfg)
1256 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1258 if (!cfg->got_var) {
1259 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 return cfg->got_var;
1265 mono_create_rgctx_var (MonoCompile *cfg)
1267 if (!cfg->rgctx_var) {
1268 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1269 /* force the var to be stack allocated */
1270 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1275 mono_get_vtable_var (MonoCompile *cfg)
1277 g_assert (cfg->gshared);
1279 mono_create_rgctx_var (cfg);
1281 return cfg->rgctx_var;
1285 type_from_stack_type (MonoInst *ins) {
1286 switch (ins->type) {
1287 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1288 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1289 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1290 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1291 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1293 return &ins->klass->this_arg;
1294 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1295 case STACK_VTYPE: return &ins->klass->byval_arg;
1297 g_error ("stack type %d to monotype not handled\n", ins->type);
1302 static G_GNUC_UNUSED int
1303 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1305 t = mono_type_get_underlying_type (t);
1317 case MONO_TYPE_FNPTR:
1319 case MONO_TYPE_CLASS:
1320 case MONO_TYPE_STRING:
1321 case MONO_TYPE_OBJECT:
1322 case MONO_TYPE_SZARRAY:
1323 case MONO_TYPE_ARRAY:
1329 return cfg->r4_stack_type;
1332 case MONO_TYPE_VALUETYPE:
1333 case MONO_TYPE_TYPEDBYREF:
1335 case MONO_TYPE_GENERICINST:
1336 if (mono_type_generic_inst_is_valuetype (t))
1342 g_assert_not_reached ();
1349 array_access_to_klass (int opcode)
1353 return mono_defaults.byte_class;
1355 return mono_defaults.uint16_class;
1358 return mono_defaults.int_class;
1361 return mono_defaults.sbyte_class;
1364 return mono_defaults.int16_class;
1367 return mono_defaults.int32_class;
1369 return mono_defaults.uint32_class;
1372 return mono_defaults.int64_class;
1375 return mono_defaults.single_class;
1378 return mono_defaults.double_class;
1379 case CEE_LDELEM_REF:
1380 case CEE_STELEM_REF:
1381 return mono_defaults.object_class;
1383 g_assert_not_reached ();
1389 * We try to share variables when possible
1392 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1397 /* inlining can result in deeper stacks */
1398 if (slot >= cfg->header->max_stack)
1399 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 pos = ins->type - 1 + slot * STACK_MAX;
1403 switch (ins->type) {
1410 if ((vnum = cfg->intvars [pos]))
1411 return cfg->varinfo [vnum];
1412 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1413 cfg->intvars [pos] = res->inst_c0;
1416 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1422 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1425 * Don't use this if a generic_context is set, since that means AOT can't
1426 * look up the method using just the image+token.
1427 * table == 0 means this is a reference made from a wrapper.
1429 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1430 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1431 jump_info_token->image = image;
1432 jump_info_token->token = token;
1433 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1438 * This function is called to handle items that are left on the evaluation stack
1439 * at basic block boundaries. What happens is that we save the values to local variables
1440 * and we reload them later when first entering the target basic block (with the
1441 * handle_loaded_temps () function).
1442 * A single joint point will use the same variables (stored in the array bb->out_stack or
1443 * bb->in_stack, if the basic block is before or after the joint point).
1445 * This function needs to be called _before_ emitting the last instruction of
1446 * the bb (i.e. before emitting a branch).
1447 * If the stack merge fails at a join point, cfg->unverifiable is set.
1450 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1453 MonoBasicBlock *bb = cfg->cbb;
1454 MonoBasicBlock *outb;
1455 MonoInst *inst, **locals;
1460 if (cfg->verbose_level > 3)
1461 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1462 if (!bb->out_scount) {
1463 bb->out_scount = count;
1464 //printf ("bblock %d has out:", bb->block_num);
1466 for (i = 0; i < bb->out_count; ++i) {
1467 outb = bb->out_bb [i];
1468 /* exception handlers are linked, but they should not be considered for stack args */
1469 if (outb->flags & BB_EXCEPTION_HANDLER)
1471 //printf (" %d", outb->block_num);
1472 if (outb->in_stack) {
1474 bb->out_stack = outb->in_stack;
1480 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1481 for (i = 0; i < count; ++i) {
1483 * try to reuse temps already allocated for this purpouse, if they occupy the same
1484 * stack slot and if they are of the same type.
1485 * This won't cause conflicts since if 'local' is used to
1486 * store one of the values in the in_stack of a bblock, then
1487 * the same variable will be used for the same outgoing stack
1489 * This doesn't work when inlining methods, since the bblocks
1490 * in the inlined methods do not inherit their in_stack from
1491 * the bblock they are inlined to. See bug #58863 for an
1494 if (cfg->inlined_method)
1495 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1497 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1502 for (i = 0; i < bb->out_count; ++i) {
1503 outb = bb->out_bb [i];
1504 /* exception handlers are linked, but they should not be considered for stack args */
1505 if (outb->flags & BB_EXCEPTION_HANDLER)
1507 if (outb->in_scount) {
1508 if (outb->in_scount != bb->out_scount) {
1509 cfg->unverifiable = TRUE;
1512 continue; /* check they are the same locals */
1514 outb->in_scount = count;
1515 outb->in_stack = bb->out_stack;
1518 locals = bb->out_stack;
1520 for (i = 0; i < count; ++i) {
1521 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1522 inst->cil_code = sp [i]->cil_code;
1523 sp [i] = locals [i];
1524 if (cfg->verbose_level > 3)
1525 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1529 * It is possible that the out bblocks already have in_stack assigned, and
1530 * the in_stacks differ. In this case, we will store to all the different
1537 /* Find a bblock which has a different in_stack */
1539 while (bindex < bb->out_count) {
1540 outb = bb->out_bb [bindex];
1541 /* exception handlers are linked, but they should not be considered for stack args */
1542 if (outb->flags & BB_EXCEPTION_HANDLER) {
1546 if (outb->in_stack != locals) {
1547 for (i = 0; i < count; ++i) {
1548 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1549 inst->cil_code = sp [i]->cil_code;
1550 sp [i] = locals [i];
1551 if (cfg->verbose_level > 3)
1552 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1554 locals = outb->in_stack;
1564 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1568 if (cfg->compile_aot) {
1569 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1575 ji.type = patch_type;
1576 ji.data.target = data;
1577 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1578 mono_error_assert_ok (&error);
1580 EMIT_NEW_PCONST (cfg, ins, target);
1586 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1588 return emit_runtime_constant (cfg, patch_type, data);
1592 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1596 g_assert (val == 0);
1601 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1610 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1612 #if SIZEOF_REGISTER == 8
1614 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1620 val_reg = alloc_preg (cfg);
1622 if (SIZEOF_REGISTER == 8)
1623 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1625 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1628 /* This could be optimized further if neccesary */
1630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1637 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1675 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1676 g_assert (size < 10000);
1679 /* This could be optimized further if neccesary */
1681 cur_reg = alloc_preg (cfg);
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1690 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1692 cur_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1702 cur_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1728 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1730 int tls_offset = mono_tls_get_tls_offset (key);
1732 if (cfg->compile_aot)
1735 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1737 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1738 ins->dreg = mono_alloc_preg (cfg);
1739 ins->inst_offset = tls_offset;
1746 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1748 int tls_offset = mono_tls_get_tls_offset (key);
1750 if (cfg->compile_aot)
1753 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1755 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1756 ins->sreg1 = value->dreg;
1757 ins->inst_offset = tls_offset;
1765 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1767 MonoInst *fast_tls = NULL;
1769 if (!mini_get_debug_options ()->use_fallback_tls)
1770 fast_tls = mono_create_fast_tls_getter (cfg, key);
1773 MONO_ADD_INS (cfg->cbb, fast_tls);
1777 if (cfg->compile_aot) {
1780 * tls getters are critical pieces of code and we don't want to resolve them
1781 * through the standard plt/tramp mechanism since we might expose ourselves
1782 * to crashes and infinite recursions.
1784 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1785 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1787 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1788 return mono_emit_jit_icall (cfg, getter, NULL);
1793 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1795 MonoInst *fast_tls = NULL;
1797 if (!mini_get_debug_options ()->use_fallback_tls)
1798 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1801 MONO_ADD_INS (cfg->cbb, fast_tls);
1805 if (cfg->compile_aot) {
1807 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1808 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1810 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1811 return mono_emit_jit_icall (cfg, setter, &value);
1818 * Emit IR to push the current LMF onto the LMF stack.
1821 emit_push_lmf (MonoCompile *cfg)
1824 * Emit IR to push the LMF:
1825 * lmf_addr = <lmf_addr from tls>
1826 * lmf->lmf_addr = lmf_addr
1827 * lmf->prev_lmf = *lmf_addr
1830 MonoInst *ins, *lmf_ins;
1835 int lmf_reg, prev_lmf_reg;
1837 * Store lmf_addr in a variable, so it can be allocated to a global register.
1839 if (!cfg->lmf_addr_var)
1840 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1843 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1845 int jit_tls_dreg = ins->dreg;
1847 lmf_reg = alloc_preg (cfg);
1848 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1850 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1853 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1855 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1856 lmf_reg = ins->dreg;
1858 prev_lmf_reg = alloc_preg (cfg);
1859 /* Save previous_lmf */
1860 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1861 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1863 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1869 * Emit IR to pop the current LMF from the LMF stack.
1872 emit_pop_lmf (MonoCompile *cfg)
1874 int lmf_reg, lmf_addr_reg;
1880 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1881 lmf_reg = ins->dreg;
1885 * Emit IR to pop the LMF:
1886 * *(lmf->lmf_addr) = lmf->prev_lmf
1888 /* This could be called before emit_push_lmf () */
1889 if (!cfg->lmf_addr_var)
1890 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1891 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1893 prev_lmf_reg = alloc_preg (cfg);
1894 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1895 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1899 emit_instrumentation_call (MonoCompile *cfg, void *func)
1901 MonoInst *iargs [1];
1904 * Avoid instrumenting inlined methods since it can
1905 * distort profiling results.
1907 if (cfg->method != cfg->current_method)
1910 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1911 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1912 mono_emit_jit_icall (cfg, func, iargs);
1917 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1920 type = mini_get_underlying_type (type);
1921 switch (type->type) {
1922 case MONO_TYPE_VOID:
1923 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1930 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1934 case MONO_TYPE_FNPTR:
1935 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1936 case MONO_TYPE_CLASS:
1937 case MONO_TYPE_STRING:
1938 case MONO_TYPE_OBJECT:
1939 case MONO_TYPE_SZARRAY:
1940 case MONO_TYPE_ARRAY:
1941 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1944 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1947 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1949 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1951 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1952 case MONO_TYPE_VALUETYPE:
1953 if (type->data.klass->enumtype) {
1954 type = mono_class_enum_basetype (type->data.klass);
1957 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1958 case MONO_TYPE_TYPEDBYREF:
1959 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1960 case MONO_TYPE_GENERICINST:
1961 type = &type->data.generic_class->container_class->byval_arg;
1964 case MONO_TYPE_MVAR:
1966 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1968 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1973 //XXX this ignores if t is byref
1974 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1977 * target_type_is_incompatible:
1978 * @cfg: MonoCompile context
1980 * Check that the item @arg on the evaluation stack can be stored
1981 * in the target type (can be a local, or field, etc).
1982 * The cfg arg can be used to check if we need verification or just
1985 * Returns: non-0 value if arg can't be stored on a target.
1988 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1990 MonoType *simple_type;
1993 if (target->byref) {
1994 /* FIXME: check that the pointed to types match */
1995 if (arg->type == STACK_MP) {
1996 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1997 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1998 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2000 /* if the target is native int& or same type */
2001 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2004 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2005 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2006 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2010 if (arg->type == STACK_PTR)
2015 simple_type = mini_get_underlying_type (target);
2016 switch (simple_type->type) {
2017 case MONO_TYPE_VOID:
2025 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2029 /* STACK_MP is needed when setting pinned locals */
2030 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2035 case MONO_TYPE_FNPTR:
2037 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2038 * in native int. (#688008).
2040 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2043 case MONO_TYPE_CLASS:
2044 case MONO_TYPE_STRING:
2045 case MONO_TYPE_OBJECT:
2046 case MONO_TYPE_SZARRAY:
2047 case MONO_TYPE_ARRAY:
2048 if (arg->type != STACK_OBJ)
2050 /* FIXME: check type compatibility */
2054 if (arg->type != STACK_I8)
2058 if (arg->type != cfg->r4_stack_type)
2062 if (arg->type != STACK_R8)
2065 case MONO_TYPE_VALUETYPE:
2066 if (arg->type != STACK_VTYPE)
2068 klass = mono_class_from_mono_type (simple_type);
2069 if (klass != arg->klass)
2072 case MONO_TYPE_TYPEDBYREF:
2073 if (arg->type != STACK_VTYPE)
2075 klass = mono_class_from_mono_type (simple_type);
2076 if (klass != arg->klass)
2079 case MONO_TYPE_GENERICINST:
2080 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2081 MonoClass *target_class;
2082 if (arg->type != STACK_VTYPE)
2084 klass = mono_class_from_mono_type (simple_type);
2085 target_class = mono_class_from_mono_type (target);
2086 /* The second cases is needed when doing partial sharing */
2087 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2091 if (arg->type != STACK_OBJ)
2093 /* FIXME: check type compatibility */
2097 case MONO_TYPE_MVAR:
2098 g_assert (cfg->gshared);
2099 if (mini_type_var_is_vt (simple_type)) {
2100 if (arg->type != STACK_VTYPE)
2103 if (arg->type != STACK_OBJ)
2108 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2114 * Prepare arguments for passing to a function call.
2115 * Return a non-zero value if the arguments can't be passed to the given
2117 * The type checks are not yet complete and some conversions may need
2118 * casts on 32 or 64 bit architectures.
2120 * FIXME: implement this using target_type_is_incompatible ()
2123 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2125 MonoType *simple_type;
2129 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2133 for (i = 0; i < sig->param_count; ++i) {
2134 if (sig->params [i]->byref) {
2135 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2139 simple_type = mini_get_underlying_type (sig->params [i]);
2141 switch (simple_type->type) {
2142 case MONO_TYPE_VOID:
2151 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2157 case MONO_TYPE_FNPTR:
2158 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2161 case MONO_TYPE_CLASS:
2162 case MONO_TYPE_STRING:
2163 case MONO_TYPE_OBJECT:
2164 case MONO_TYPE_SZARRAY:
2165 case MONO_TYPE_ARRAY:
2166 if (args [i]->type != STACK_OBJ)
2171 if (args [i]->type != STACK_I8)
2175 if (args [i]->type != cfg->r4_stack_type)
2179 if (args [i]->type != STACK_R8)
2182 case MONO_TYPE_VALUETYPE:
2183 if (simple_type->data.klass->enumtype) {
2184 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2187 if (args [i]->type != STACK_VTYPE)
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (args [i]->type != STACK_VTYPE)
2194 case MONO_TYPE_GENERICINST:
2195 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2198 case MONO_TYPE_MVAR:
2200 if (args [i]->type != STACK_VTYPE)
2204 g_error ("unknown type 0x%02x in check_call_signature",
2212 callvirt_to_call (int opcode)
2215 case OP_CALL_MEMBASE:
2217 case OP_VOIDCALL_MEMBASE:
2219 case OP_FCALL_MEMBASE:
2221 case OP_RCALL_MEMBASE:
2223 case OP_VCALL_MEMBASE:
2225 case OP_LCALL_MEMBASE:
2228 g_assert_not_reached ();
2235 callvirt_to_call_reg (int opcode)
2238 case OP_CALL_MEMBASE:
2240 case OP_VOIDCALL_MEMBASE:
2241 return OP_VOIDCALL_REG;
2242 case OP_FCALL_MEMBASE:
2243 return OP_FCALL_REG;
2244 case OP_RCALL_MEMBASE:
2245 return OP_RCALL_REG;
2246 case OP_VCALL_MEMBASE:
2247 return OP_VCALL_REG;
2248 case OP_LCALL_MEMBASE:
2249 return OP_LCALL_REG;
2251 g_assert_not_reached ();
2257 /* Either METHOD or IMT_ARG needs to be set */
2259 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2263 if (COMPILE_LLVM (cfg)) {
2265 method_reg = alloc_preg (cfg);
2266 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2268 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2269 method_reg = ins->dreg;
2273 call->imt_arg_reg = method_reg;
2275 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2280 method_reg = alloc_preg (cfg);
2281 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2283 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2284 method_reg = ins->dreg;
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2290 static MonoJumpInfo *
2291 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2293 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2297 ji->data.target = target;
2303 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2306 return mono_class_check_context_used (klass);
2312 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2315 return mono_method_check_context_used (method);
2321 * check_method_sharing:
2323 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2326 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2328 gboolean pass_vtable = FALSE;
2329 gboolean pass_mrgctx = FALSE;
2331 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2332 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2333 gboolean sharable = FALSE;
2335 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2339 * Pass vtable iff target method might
2340 * be shared, which means that sharing
2341 * is enabled for its class and its
2342 * context is sharable (and it's not a
2345 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2349 if (mini_method_get_context (cmethod) &&
2350 mini_method_get_context (cmethod)->method_inst) {
2351 g_assert (!pass_vtable);
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2356 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2361 if (out_pass_vtable)
2362 *out_pass_vtable = pass_vtable;
2363 if (out_pass_mrgctx)
2364 *out_pass_mrgctx = pass_mrgctx;
2367 inline static MonoCallInst *
2368 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2369 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2373 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2381 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2383 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2385 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2388 call->signature = sig;
2389 call->rgctx_reg = rgctx;
2390 sig_ret = mini_get_underlying_type (sig->ret);
2392 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2395 if (mini_type_is_vtype (sig_ret)) {
2396 call->vret_var = cfg->vret_addr;
2397 //g_assert_not_reached ();
2399 } else if (mini_type_is_vtype (sig_ret)) {
2400 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2403 temp->backend.is_pinvoke = sig->pinvoke;
2406 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2407 * address of return value to increase optimization opportunities.
2408 * Before vtype decomposition, the dreg of the call ins itself represents the
2409 * fact the call modifies the return value. After decomposition, the call will
2410 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2411 * will be transformed into an LDADDR.
2413 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2414 loada->dreg = alloc_preg (cfg);
2415 loada->inst_p0 = temp;
2416 /* We reference the call too since call->dreg could change during optimization */
2417 loada->inst_p1 = call;
2418 MONO_ADD_INS (cfg->cbb, loada);
2420 call->inst.dreg = temp->dreg;
2422 call->vret_var = loada;
2423 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2424 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2426 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2427 if (COMPILE_SOFT_FLOAT (cfg)) {
2429 * If the call has a float argument, we would need to do an r8->r4 conversion using
2430 * an icall, but that cannot be done during the call sequence since it would clobber
2431 * the call registers + the stack. So we do it before emitting the call.
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2435 MonoInst *in = call->args [i];
2437 if (i >= sig->hasthis)
2438 t = sig->params [i - sig->hasthis];
2440 t = &mono_defaults.int_class->byval_arg;
2441 t = mono_type_get_underlying_type (t);
2443 if (!t->byref && t->type == MONO_TYPE_R4) {
2444 MonoInst *iargs [1];
2448 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2450 /* The result will be in an int vreg */
2451 call->args [i] = conv;
2457 call->need_unbox_trampoline = unbox_trampoline;
2460 if (COMPILE_LLVM (cfg))
2461 mono_llvm_emit_call (cfg, call);
2463 mono_arch_emit_call (cfg, call);
2465 mono_arch_emit_call (cfg, call);
2468 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2469 cfg->flags |= MONO_CFG_HAS_CALLS;
2475 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2477 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2478 cfg->uses_rgctx_reg = TRUE;
2479 call->rgctx_reg = TRUE;
2481 call->rgctx_arg_reg = rgctx_reg;
2485 inline static MonoInst*
2486 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2491 gboolean check_sp = FALSE;
2493 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2494 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2496 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2501 rgctx_reg = mono_alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (!cfg->stack_inbalance_var)
2507 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2509 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2510 ins->dreg = cfg->stack_inbalance_var->dreg;
2511 MONO_ADD_INS (cfg->cbb, ins);
2514 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2516 call->inst.sreg1 = addr->dreg;
2519 emit_imt_argument (cfg, call, NULL, imt_arg);
2521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 sp_reg = mono_alloc_preg (cfg);
2528 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2530 MONO_ADD_INS (cfg->cbb, ins);
2532 /* Restore the stack so we don't crash when throwing the exception */
2533 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2534 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2535 MONO_ADD_INS (cfg->cbb, ins);
2537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2538 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2542 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2544 return (MonoInst*)call;
2548 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2551 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2554 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2555 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2557 #ifndef DISABLE_REMOTING
2558 gboolean might_be_remote = FALSE;
2560 gboolean virtual_ = this_ins != NULL;
2561 gboolean enable_for_aot = TRUE;
2564 MonoInst *call_target = NULL;
2566 gboolean need_unbox_trampoline;
2569 sig = mono_method_signature (method);
2571 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2572 g_assert_not_reached ();
2575 rgctx_reg = mono_alloc_preg (cfg);
2576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2579 if (method->string_ctor) {
2580 /* Create the real signature */
2581 /* FIXME: Cache these */
2582 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2583 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2588 context_used = mini_method_check_context_used (cfg, method);
2590 #ifndef DISABLE_REMOTING
2591 might_be_remote = this_ins && sig->hasthis &&
2592 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2593 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2595 if (might_be_remote && context_used) {
2598 g_assert (cfg->gshared);
2600 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2602 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2606 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2607 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2609 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2611 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2613 #ifndef DISABLE_REMOTING
2614 if (might_be_remote)
2615 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2618 call->method = method;
2619 call->inst.flags |= MONO_INST_HAS_METHOD;
2620 call->inst.inst_left = this_ins;
2621 call->tail_call = tail;
2624 int vtable_reg, slot_reg, this_reg;
2627 this_reg = this_ins->dreg;
2629 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2630 MonoInst *dummy_use;
2632 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2634 /* Make a call to delegate->invoke_impl */
2635 call->inst.inst_basereg = this_reg;
2636 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2637 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2639 /* We must emit a dummy use here because the delegate trampoline will
2640 replace the 'this' argument with the delegate target making this activation
2641 no longer a root for the delegate.
2642 This is an issue for delegates that target collectible code such as dynamic
2643 methods of GC'able assemblies.
2645 For a test case look into #667921.
2647 FIXME: a dummy use is not the best way to do it as the local register allocator
2648 will put it on a caller save register and spil it around the call.
2649 Ideally, we would either put it on a callee save register or only do the store part.
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2653 return (MonoInst*)call;
2656 if ((!cfg->compile_aot || enable_for_aot) &&
2657 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2658 (MONO_METHOD_IS_FINAL (method) &&
2659 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2660 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2662 * the method is not virtual, we just need to ensure this is not null
2663 * and then we can call the method directly.
2665 #ifndef DISABLE_REMOTING
2666 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2668 * The check above ensures method is not gshared, this is needed since
2669 * gshared methods can't have wrappers.
2671 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2675 if (!method->string_ctor)
2676 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2678 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2679 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2681 * the method is virtual, but we can statically dispatch since either
2682 * it's class or the method itself are sealed.
2683 * But first we need to ensure it's not a null reference.
2685 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2687 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2688 } else if (call_target) {
2689 vtable_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2692 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2693 call->inst.sreg1 = call_target->dreg;
2694 call->inst.flags &= !MONO_INST_HAS_METHOD;
2696 vtable_reg = alloc_preg (cfg);
2697 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2698 if (mono_class_is_interface (method->klass)) {
2699 guint32 imt_slot = mono_method_get_imt_slot (method);
2700 emit_imt_argument (cfg, call, call->method, imt_arg);
2701 slot_reg = vtable_reg;
2702 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2704 slot_reg = vtable_reg;
2705 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2706 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2708 g_assert (mono_method_signature (method)->generic_param_count);
2709 emit_imt_argument (cfg, call, call->method, imt_arg);
2713 call->inst.sreg1 = slot_reg;
2714 call->inst.inst_offset = offset;
2715 call->is_virtual = TRUE;
2719 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2730 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2734 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2741 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2744 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2746 return (MonoInst*)call;
2750 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2752 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2756 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2760 * mono_emit_abs_call:
2762 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2764 inline static MonoInst*
2765 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2766 MonoMethodSignature *sig, MonoInst **args)
2768 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2772 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2775 if (cfg->abs_patches == NULL)
2776 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2777 g_hash_table_insert (cfg->abs_patches, ji, ji);
2778 ins = mono_emit_native_call (cfg, ji, sig, args);
2779 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2783 static MonoMethodSignature*
2784 sig_to_rgctx_sig (MonoMethodSignature *sig)
2786 // FIXME: memory allocation
2787 MonoMethodSignature *res;
2790 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2791 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2792 res->param_count = sig->param_count + 1;
2793 for (i = 0; i < sig->param_count; ++i)
2794 res->params [i] = sig->params [i];
2795 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2799 /* Make an indirect call to FSIG passing an additional argument */
2801 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2803 MonoMethodSignature *csig;
2804 MonoInst *args_buf [16];
2806 int i, pindex, tmp_reg;
2808 /* Make a call with an rgctx/extra arg */
2809 if (fsig->param_count + 2 < 16)
2812 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2815 args [pindex ++] = orig_args [0];
2816 for (i = 0; i < fsig->param_count; ++i)
2817 args [pindex ++] = orig_args [fsig->hasthis + i];
2818 tmp_reg = alloc_preg (cfg);
2819 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2820 csig = sig_to_rgctx_sig (fsig);
2821 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2824 /* Emit an indirect call to the function descriptor ADDR */
2826 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2828 int addr_reg, arg_reg;
2829 MonoInst *call_target;
2831 g_assert (cfg->llvm_only);
2834 * addr points to a <addr, arg> pair, load both of them, and
2835 * make a call to addr, passing arg as an extra arg.
2837 addr_reg = alloc_preg (cfg);
2838 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2839 arg_reg = alloc_preg (cfg);
2840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2842 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2846 direct_icalls_enabled (MonoCompile *cfg)
2850 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2852 if (cfg->compile_llvm && !cfg->llvm_only)
2855 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2861 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2864 * Call the jit icall without a wrapper if possible.
2865 * The wrapper is needed for the following reasons:
2866 * - to handle exceptions thrown using mono_raise_exceptions () from the
2867 * icall function. The EH code needs the lmf frame pushed by the
2868 * wrapper to be able to unwind back to managed code.
2869 * - to be able to do stack walks for asynchronously suspended
2870 * threads when debugging.
2872 if (info->no_raise && direct_icalls_enabled (cfg)) {
2876 if (!info->wrapper_method) {
2877 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2878 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2880 mono_memory_barrier ();
2884 * Inline the wrapper method, which is basically a call to the C icall, and
2885 * an exception check.
2887 costs = inline_method (cfg, info->wrapper_method, NULL,
2888 args, NULL, il_offset, TRUE);
2889 g_assert (costs > 0);
2890 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2894 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2899 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2901 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2902 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2906 * Native code might return non register sized integers
2907 * without initializing the upper bits.
2909 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2910 case OP_LOADI1_MEMBASE:
2911 widen_op = OP_ICONV_TO_I1;
2913 case OP_LOADU1_MEMBASE:
2914 widen_op = OP_ICONV_TO_U1;
2916 case OP_LOADI2_MEMBASE:
2917 widen_op = OP_ICONV_TO_I2;
2919 case OP_LOADU2_MEMBASE:
2920 widen_op = OP_ICONV_TO_U2;
2926 if (widen_op != -1) {
2927 int dreg = alloc_preg (cfg);
2930 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2931 widen->type = ins->type;
2942 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2944 MonoInst *args [16];
2946 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2947 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2949 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2997 if (!cfg->gen_write_barriers)
3000 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3002 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3004 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3007 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3008 wbarrier->sreg1 = ptr->dreg;
3009 wbarrier->sreg2 = value->dreg;
3010 MONO_ADD_INS (cfg->cbb, wbarrier);
3011 } else if (card_table) {
3012 int offset_reg = alloc_preg (cfg);
3017 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
3018 * collector case, so, for the serial collector, it might slightly slow down nursery
3019 * collections. We also expect that the host system and the target system have the same card
3020 * table configuration, which is the case if they have the same pointer size.
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3024 if (card_table_mask)
3025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3027 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3028 * IMM's larger than 32bits.
3030 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3031 card_reg = ins->dreg;
3033 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3034 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3036 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3037 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3040 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3044 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3046 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3047 unsigned need_wb = 0;
3052 /*types with references can't have alignment smaller than sizeof(void*) */
3053 if (align < SIZEOF_VOID_P)
3056 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3057 if (size > 32 * SIZEOF_VOID_P)
3060 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3062 /* We don't unroll more than 5 stores to avoid code bloat. */
3063 if (size > 5 * SIZEOF_VOID_P) {
3064 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3065 size += (SIZEOF_VOID_P - 1);
3066 size &= ~(SIZEOF_VOID_P - 1);
3068 EMIT_NEW_ICONST (cfg, iargs [2], size);
3069 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3070 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3074 destreg = iargs [0]->dreg;
3075 srcreg = iargs [1]->dreg;
3078 dest_ptr_reg = alloc_preg (cfg);
3079 tmp_reg = alloc_preg (cfg);
3082 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3084 while (size >= SIZEOF_VOID_P) {
3085 MonoInst *load_inst;
3086 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3087 load_inst->dreg = tmp_reg;
3088 load_inst->inst_basereg = srcreg;
3089 load_inst->inst_offset = offset;
3090 MONO_ADD_INS (cfg->cbb, load_inst);
3092 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3095 emit_write_barrier (cfg, iargs [0], load_inst);
3097 offset += SIZEOF_VOID_P;
3098 size -= SIZEOF_VOID_P;
3101 /*tmp += sizeof (void*)*/
3102 if (size >= SIZEOF_VOID_P) {
3103 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3104 MONO_ADD_INS (cfg->cbb, iargs [0]);
3108 /* Those cannot be references since size < sizeof (void*) */
3110 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3118 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3124 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3125 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3134 * Emit code to copy a valuetype of type @klass whose address is stored in
3135 * @src->dreg to memory whose address is stored at @dest->dreg.
3138 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3140 MonoInst *iargs [4];
3143 MonoMethod *memcpy_method;
3144 MonoInst *size_ins = NULL;
3145 MonoInst *memcpy_ins = NULL;
3149 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3152 * This check breaks with spilled vars... need to handle it during verification anyway.
3153 * g_assert (klass && klass == src->klass && klass == dest->klass);
3156 if (mini_is_gsharedvt_klass (klass)) {
3158 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3159 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3163 n = mono_class_native_size (klass, &align);
3165 n = mono_class_value_size (klass, &align);
3167 /* if native is true there should be no references in the struct */
3168 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3169 /* Avoid barriers when storing to the stack */
3170 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3171 (dest->opcode == OP_LDADDR))) {
3177 context_used = mini_class_check_context_used (cfg, klass);
3179 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3180 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3182 } else if (context_used) {
3183 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3185 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3186 if (!cfg->compile_aot)
3187 mono_class_compute_gc_descriptor (klass);
3191 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3193 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3198 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3199 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3200 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3205 iargs [2] = size_ins;
3207 EMIT_NEW_ICONST (cfg, iargs [2], n);
3209 memcpy_method = get_memcpy_method ();
3211 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3213 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3218 get_memset_method (void)
3220 static MonoMethod *memset_method = NULL;
3221 if (!memset_method) {
3222 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3224 g_error ("Old corlib found. Install a new one");
3226 return memset_method;
3230 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3232 MonoInst *iargs [3];
3235 MonoMethod *memset_method;
3236 MonoInst *size_ins = NULL;
3237 MonoInst *bzero_ins = NULL;
3238 static MonoMethod *bzero_method;
3240 /* FIXME: Optimize this for the case when dest is an LDADDR */
3241 mono_class_init (klass);
3242 if (mini_is_gsharedvt_klass (klass)) {
3243 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3244 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3246 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3247 g_assert (bzero_method);
3249 iargs [1] = size_ins;
3250 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3254 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3256 n = mono_class_value_size (klass, &align);
3258 if (n <= sizeof (gpointer) * 8) {
3259 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3262 memset_method = get_memset_method ();
3264 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3265 EMIT_NEW_ICONST (cfg, iargs [2], n);
3266 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3273 * Emit IR to return either the this pointer for instance method,
3274 * or the mrgctx for static methods.
3277 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3279 MonoInst *this_ins = NULL;
3281 g_assert (cfg->gshared);
3283 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3284 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3285 !method->klass->valuetype)
3286 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3288 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3289 MonoInst *mrgctx_loc, *mrgctx_var;
3291 g_assert (!this_ins);
3292 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3294 mrgctx_loc = mono_get_vtable_var (cfg);
3295 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3298 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
3299 MonoInst *mrgctx_loc, *mrgctx_var;
3301 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
3302 mrgctx_loc = mono_get_vtable_var (cfg);
3303 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3305 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
3308 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3309 MonoInst *vtable_loc, *vtable_var;
3311 g_assert (!this_ins);
3313 vtable_loc = mono_get_vtable_var (cfg);
3314 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3316 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3317 MonoInst *mrgctx_var = vtable_var;
3320 vtable_reg = alloc_preg (cfg);
3321 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3322 vtable_var->type = STACK_PTR;
3330 vtable_reg = alloc_preg (cfg);
3331 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3336 static MonoJumpInfoRgctxEntry *
3337 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3339 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3340 res->method = method;
3341 res->in_mrgctx = in_mrgctx;
3342 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3343 res->data->type = patch_type;
3344 res->data->data.target = patch_data;
3345 res->info_type = info_type;
3350 static inline MonoInst*
3351 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3353 MonoInst *args [16];
3356 // FIXME: No fastpath since the slot is not a compile time constant
3358 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3359 if (entry->in_mrgctx)
3360 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3362 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3366 * FIXME: This can be called during decompose, which is a problem since it creates
3368 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3370 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3372 MonoBasicBlock *is_null_bb, *end_bb;
3373 MonoInst *res, *ins, *call;
3376 slot = mini_get_rgctx_entry_slot (entry);
3378 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3379 index = MONO_RGCTX_SLOT_INDEX (slot);
3381 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3382 for (depth = 0; ; ++depth) {
3383 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3385 if (index < size - 1)
3390 NEW_BBLOCK (cfg, end_bb);
3391 NEW_BBLOCK (cfg, is_null_bb);
3394 rgctx_reg = rgctx->dreg;
3396 rgctx_reg = alloc_preg (cfg);
3398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3399 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3400 NEW_BBLOCK (cfg, is_null_bb);
3402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3403 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3406 for (i = 0; i < depth; ++i) {
3407 int array_reg = alloc_preg (cfg);
3409 /* load ptr to next array */
3410 if (mrgctx && i == 0)
3411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3414 rgctx_reg = array_reg;
3415 /* is the ptr null? */
3416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3417 /* if yes, jump to actual trampoline */
3418 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3422 val_reg = alloc_preg (cfg);
3423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3424 /* is the slot null? */
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3426 /* if yes, jump to actual trampoline */
3427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3430 res_reg = alloc_preg (cfg);
3431 MONO_INST_NEW (cfg, ins, OP_MOVE);
3432 ins->dreg = res_reg;
3433 ins->sreg1 = val_reg;
3434 MONO_ADD_INS (cfg->cbb, ins);
3436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3439 MONO_START_BB (cfg, is_null_bb);
3441 EMIT_NEW_ICONST (cfg, args [1], index);
3443 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3445 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3446 MONO_INST_NEW (cfg, ins, OP_MOVE);
3447 ins->dreg = res_reg;
3448 ins->sreg1 = call->dreg;
3449 MONO_ADD_INS (cfg->cbb, ins);
3450 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3452 MONO_START_BB (cfg, end_bb);
3461 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3464 static inline MonoInst*
3465 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3468 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3470 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3474 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3475 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3477 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3478 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3480 return emit_rgctx_fetch (cfg, rgctx, entry);
3484 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3485 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3487 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3488 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3490 return emit_rgctx_fetch (cfg, rgctx, entry);
3494 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3495 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3497 MonoJumpInfoGSharedVtCall *call_info;
3498 MonoJumpInfoRgctxEntry *entry;
3501 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3502 call_info->sig = sig;
3503 call_info->method = cmethod;
3505 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3506 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3508 return emit_rgctx_fetch (cfg, rgctx, entry);
3512 * emit_get_rgctx_virt_method:
3514 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3517 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3518 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3520 MonoJumpInfoVirtMethod *info;
3521 MonoJumpInfoRgctxEntry *entry;
3524 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3525 info->klass = klass;
3526 info->method = virt_method;
3528 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3529 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3531 return emit_rgctx_fetch (cfg, rgctx, entry);
3535 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3536 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3538 MonoJumpInfoRgctxEntry *entry;
3541 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3542 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3544 return emit_rgctx_fetch (cfg, rgctx, entry);
3548 * emit_get_rgctx_method:
3550 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3551 * normal constants, else emit a load from the rgctx.
3554 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3555 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3557 if (!context_used) {
3560 switch (rgctx_type) {
3561 case MONO_RGCTX_INFO_METHOD:
3562 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3564 case MONO_RGCTX_INFO_METHOD_RGCTX:
3565 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3568 g_assert_not_reached ();
3571 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3572 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3574 return emit_rgctx_fetch (cfg, rgctx, entry);
3579 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3580 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3582 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3583 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3585 return emit_rgctx_fetch (cfg, rgctx, entry);
3589 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3591 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3592 MonoRuntimeGenericContextInfoTemplate *template_;
3597 for (i = 0; i < info->num_entries; ++i) {
3598 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3600 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3604 if (info->num_entries == info->count_entries) {
3605 MonoRuntimeGenericContextInfoTemplate *new_entries;
3606 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3608 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3610 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3611 info->entries = new_entries;
3612 info->count_entries = new_count_entries;
3615 idx = info->num_entries;
3616 template_ = &info->entries [idx];
3617 template_->info_type = rgctx_type;
3618 template_->data = data;
3620 info->num_entries ++;
3626 * emit_get_gsharedvt_info:
3628 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3631 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3636 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3637 /* Load info->entries [idx] */
3638 dreg = alloc_preg (cfg);
3639 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3645 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3647 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3651 * On return the caller must check @klass for load errors.
3654 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3656 MonoInst *vtable_arg;
3659 context_used = mini_class_check_context_used (cfg, klass);
3662 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3663 klass, MONO_RGCTX_INFO_VTABLE);
3665 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3669 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3672 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3676 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3677 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3679 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3680 ins->sreg1 = vtable_arg->dreg;
3681 MONO_ADD_INS (cfg->cbb, ins);
3684 MonoBasicBlock *inited_bb;
3685 MonoInst *args [16];
3687 inited_reg = alloc_ireg (cfg);
3689 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3691 NEW_BBLOCK (cfg, inited_bb);
3693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3696 args [0] = vtable_arg;
3697 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3699 MONO_START_BB (cfg, inited_bb);
3704 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3708 if (cfg->gen_seq_points && cfg->method == method) {
3709 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3711 ins->flags |= MONO_INST_NONEMPTY_STACK;
3712 MONO_ADD_INS (cfg->cbb, ins);
3717 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3719 if (mini_get_debug_options ()->better_cast_details) {
3720 int vtable_reg = alloc_preg (cfg);
3721 int klass_reg = alloc_preg (cfg);
3722 MonoBasicBlock *is_null_bb = NULL;
3724 int to_klass_reg, context_used;
3727 NEW_BBLOCK (cfg, is_null_bb);
3729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3733 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3735 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3744 context_used = mini_class_check_context_used (cfg, klass);
3746 MonoInst *class_ins;
3748 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3749 to_klass_reg = class_ins->dreg;
3751 to_klass_reg = alloc_preg (cfg);
3752 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3757 MONO_START_BB (cfg, is_null_bb);
3762 mini_reset_cast_details (MonoCompile *cfg)
3764 /* Reset the variables holding the cast details */
3765 if (mini_get_debug_options ()->better_cast_details) {
3766 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3767 /* It is enough to reset the from field */
3768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3773 * On return the caller must check @array_class for load errors
3776 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3778 int vtable_reg = alloc_preg (cfg);
3781 context_used = mini_class_check_context_used (cfg, array_class);
3783 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3785 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3787 if (cfg->opt & MONO_OPT_SHARED) {
3788 int class_reg = alloc_preg (cfg);
3791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3792 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3793 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3794 } else if (context_used) {
3795 MonoInst *vtable_ins;
3797 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3798 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3800 if (cfg->compile_aot) {
3804 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3806 vt_reg = alloc_preg (cfg);
3807 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3808 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3811 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3817 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3819 mini_reset_cast_details (cfg);
3823 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3824 * generic code is generated.
3827 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3829 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3832 MonoInst *rgctx, *addr;
3834 /* FIXME: What if the class is shared? We might not
3835 have to get the address of the method from the
3837 addr = emit_get_rgctx_method (cfg, context_used, method,
3838 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3839 if (cfg->llvm_only) {
3840 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3841 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3843 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3845 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3848 gboolean pass_vtable, pass_mrgctx;
3849 MonoInst *rgctx_arg = NULL;
3851 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3852 g_assert (!pass_mrgctx);
3855 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3858 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3861 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3866 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3870 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3871 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3872 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3873 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3875 obj_reg = sp [0]->dreg;
3876 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3879 /* FIXME: generics */
3880 g_assert (klass->rank == 0);
3883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3884 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3890 MonoInst *element_class;
3892 /* This assertion is from the unboxcast insn */
3893 g_assert (klass->rank == 0);
3895 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3896 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3898 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3899 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3901 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3902 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3903 mini_reset_cast_details (cfg);
3906 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3907 MONO_ADD_INS (cfg->cbb, add);
3908 add->type = STACK_MP;
3915 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3917 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3918 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3922 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3928 args [1] = klass_inst;
3931 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3933 NEW_BBLOCK (cfg, is_ref_bb);
3934 NEW_BBLOCK (cfg, is_nullable_bb);
3935 NEW_BBLOCK (cfg, end_bb);
3936 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3943 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3944 addr_reg = alloc_dreg (cfg, STACK_MP);
3948 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3949 MONO_ADD_INS (cfg->cbb, addr);
3951 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3954 MONO_START_BB (cfg, is_ref_bb);
3956 /* Save the ref to a temporary */
3957 dreg = alloc_ireg (cfg);
3958 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3959 addr->dreg = addr_reg;
3960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3964 MONO_START_BB (cfg, is_nullable_bb);
3967 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3968 MonoInst *unbox_call;
3969 MonoMethodSignature *unbox_sig;
3971 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3972 unbox_sig->ret = &klass->byval_arg;
3973 unbox_sig->param_count = 1;
3974 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3977 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3979 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3981 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3982 addr->dreg = addr_reg;
3985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3988 MONO_START_BB (cfg, end_bb);
3991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3997 * Returns NULL and set the cfg exception on error.
4000 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4002 MonoInst *iargs [2];
4007 MonoRgctxInfoType rgctx_info;
4008 MonoInst *iargs [2];
4009 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4011 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4013 if (cfg->opt & MONO_OPT_SHARED)
4014 rgctx_info = MONO_RGCTX_INFO_KLASS;
4016 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4017 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4019 if (cfg->opt & MONO_OPT_SHARED) {
4020 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4022 alloc_ftn = ves_icall_object_new;
4025 alloc_ftn = ves_icall_object_new_specific;
4028 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4029 if (known_instance_size) {
4030 int size = mono_class_instance_size (klass);
4031 if (size < sizeof (MonoObject))
4032 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4034 EMIT_NEW_ICONST (cfg, iargs [1], size);
4036 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4039 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4042 if (cfg->opt & MONO_OPT_SHARED) {
4043 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4044 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4046 alloc_ftn = ves_icall_object_new;
4047 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4048 /* This happens often in argument checking code, eg. throw new FooException... */
4049 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4050 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4051 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4053 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4054 MonoMethod *managed_alloc = NULL;
4058 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4059 cfg->exception_ptr = klass;
4063 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4065 if (managed_alloc) {
4066 int size = mono_class_instance_size (klass);
4067 if (size < sizeof (MonoObject))
4068 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4070 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4071 EMIT_NEW_ICONST (cfg, iargs [1], size);
4072 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4074 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4076 guint32 lw = vtable->klass->instance_size;
4077 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4078 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4079 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4082 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4086 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4090 * Returns NULL and set the cfg exception on error.
4093 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4095 MonoInst *alloc, *ins;
4097 if (mono_class_is_nullable (klass)) {
4098 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4101 if (cfg->llvm_only && cfg->gsharedvt) {
4102 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4103 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4104 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4106 /* FIXME: What if the class is shared? We might not
4107 have to get the method address from the RGCTX. */
4108 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4109 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4110 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4112 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4115 gboolean pass_vtable, pass_mrgctx;
4116 MonoInst *rgctx_arg = NULL;
4118 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4119 g_assert (!pass_mrgctx);
4122 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4125 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4128 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4132 if (mini_is_gsharedvt_klass (klass)) {
4133 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4134 MonoInst *res, *is_ref, *src_var, *addr;
4137 dreg = alloc_ireg (cfg);
4139 NEW_BBLOCK (cfg, is_ref_bb);
4140 NEW_BBLOCK (cfg, is_nullable_bb);
4141 NEW_BBLOCK (cfg, end_bb);
4142 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4150 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4153 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4154 ins->opcode = OP_STOREV_MEMBASE;
4156 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4157 res->type = STACK_OBJ;
4159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4162 MONO_START_BB (cfg, is_ref_bb);
4164 /* val is a vtype, so has to load the value manually */
4165 src_var = get_vreg_to_inst (cfg, val->dreg);
4167 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4168 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4173 MONO_START_BB (cfg, is_nullable_bb);
4176 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4177 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4179 MonoMethodSignature *box_sig;
4182 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4183 * construct that method at JIT time, so have to do things by hand.
4185 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4186 box_sig->ret = &mono_defaults.object_class->byval_arg;
4187 box_sig->param_count = 1;
4188 box_sig->params [0] = &klass->byval_arg;
4191 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4193 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4194 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4195 res->type = STACK_OBJ;
4199 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4201 MONO_START_BB (cfg, end_bb);
4205 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4209 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4214 static GHashTable* direct_icall_type_hash;
4217 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4219 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4220 if (!direct_icalls_enabled (cfg))
4224 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4225 * Whitelist a few icalls for now.
4227 if (!direct_icall_type_hash) {
4228 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4230 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4231 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4232 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4233 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4234 mono_memory_barrier ();
4235 direct_icall_type_hash = h;
4238 if (cmethod->klass == mono_defaults.math_class)
4240 /* No locking needed */
4241 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4247 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4249 if (cmethod->klass == mono_defaults.systemtype_class) {
4250 if (!strcmp (cmethod->name, "GetType"))
4256 static G_GNUC_UNUSED MonoInst*
4257 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4259 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4260 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4263 switch (enum_type->type) {
4266 #if SIZEOF_REGISTER == 8
4278 MonoInst *load, *and_, *cmp, *ceq;
4279 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4280 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4281 int dest_reg = alloc_ireg (cfg);
4283 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4284 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4285 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4286 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4288 ceq->type = STACK_I4;
4291 load = mono_decompose_opcode (cfg, load);
4292 and_ = mono_decompose_opcode (cfg, and_);
4293 cmp = mono_decompose_opcode (cfg, cmp);
4294 ceq = mono_decompose_opcode (cfg, ceq);
4302 * Returns NULL and set the cfg exception on error.
4304 static G_GNUC_UNUSED MonoInst*
4305 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4309 gpointer trampoline;
4310 MonoInst *obj, *method_ins, *tramp_ins;
4314 if (virtual_ && !cfg->llvm_only) {
4315 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4318 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4322 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4326 /* Inline the contents of mono_delegate_ctor */
4328 /* Set target field */
4329 /* Optimize away setting of NULL target */
4330 if (!MONO_INS_IS_PCONST_NULL (target)) {
4331 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4332 if (cfg->gen_write_barriers) {
4333 dreg = alloc_preg (cfg);
4334 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4335 emit_write_barrier (cfg, ptr, target);
4339 /* Set method field */
4340 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4344 * To avoid looking up the compiled code belonging to the target method
4345 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4346 * store it, and we fill it after the method has been compiled.
4348 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4349 MonoInst *code_slot_ins;
4352 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4354 domain = mono_domain_get ();
4355 mono_domain_lock (domain);
4356 if (!domain_jit_info (domain)->method_code_hash)
4357 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4358 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4360 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4361 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4363 mono_domain_unlock (domain);
4365 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4370 if (cfg->llvm_only) {
4371 MonoInst *args [16];
4376 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4377 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4380 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4386 if (cfg->compile_aot) {
4387 MonoDelegateClassMethodPair *del_tramp;
4389 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4390 del_tramp->klass = klass;
4391 del_tramp->method = context_used ? NULL : method;
4392 del_tramp->is_virtual = virtual_;
4393 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4396 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4398 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4399 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4402 /* Set invoke_impl field */
4404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4406 dreg = alloc_preg (cfg);
4407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4408 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4410 dreg = alloc_preg (cfg);
4411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4412 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4415 dreg = alloc_preg (cfg);
4416 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4417 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4419 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4425 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4427 MonoJitICallInfo *info;
4429 /* Need to register the icall so it gets an icall wrapper */
4430 info = mono_get_array_new_va_icall (rank);
4432 cfg->flags |= MONO_CFG_HAS_VARARGS;
4434 /* mono_array_new_va () needs a vararg calling convention */
4435 cfg->exception_message = g_strdup ("array-new");
4436 cfg->disable_llvm = TRUE;
4438 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4439 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4443 * handle_constrained_gsharedvt_call:
4445 * Handle constrained calls where the receiver is a gsharedvt type.
4446 * Return the instruction representing the call. Set the cfg exception on failure.
4449 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4450 gboolean *ref_emit_widen)
4452 MonoInst *ins = NULL;
4453 gboolean emit_widen = *ref_emit_widen;
4456 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4457 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4458 * pack the arguments into an array, and do the rest of the work in in an icall.
4460 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4461 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4462 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4463 MonoInst *args [16];
4466 * This case handles calls to
4467 * - object:ToString()/Equals()/GetHashCode(),
4468 * - System.IComparable<T>:CompareTo()
4469 * - System.IEquatable<T>:Equals ()
4470 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4474 if (mono_method_check_context_used (cmethod))
4475 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4477 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4478 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4480 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4481 if (fsig->hasthis && fsig->param_count) {
4482 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4483 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4484 ins->dreg = alloc_preg (cfg);
4485 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4486 MONO_ADD_INS (cfg->cbb, ins);
4489 if (mini_is_gsharedvt_type (fsig->params [0])) {
4490 int addr_reg, deref_arg_reg;
4492 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4493 deref_arg_reg = alloc_preg (cfg);
4494 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4495 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4497 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4498 addr_reg = ins->dreg;
4499 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4501 EMIT_NEW_ICONST (cfg, args [3], 0);
4502 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4505 EMIT_NEW_ICONST (cfg, args [3], 0);
4506 EMIT_NEW_ICONST (cfg, args [4], 0);
4508 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4511 if (mini_is_gsharedvt_type (fsig->ret)) {
4512 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4513 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4517 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4518 MONO_ADD_INS (cfg->cbb, add);
4520 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4521 MONO_ADD_INS (cfg->cbb, ins);
4522 /* ins represents the call result */
4525 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4528 *ref_emit_widen = emit_widen;
4537 mono_emit_load_got_addr (MonoCompile *cfg)
4539 MonoInst *getaddr, *dummy_use;
4541 if (!cfg->got_var || cfg->got_var_allocated)
4544 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4545 getaddr->cil_code = cfg->header->code;
4546 getaddr->dreg = cfg->got_var->dreg;
4548 /* Add it to the start of the first bblock */
4549 if (cfg->bb_entry->code) {
4550 getaddr->next = cfg->bb_entry->code;
4551 cfg->bb_entry->code = getaddr;
4554 MONO_ADD_INS (cfg->bb_entry, getaddr);
4556 cfg->got_var_allocated = TRUE;
4559 * Add a dummy use to keep the got_var alive, since real uses might
4560 * only be generated by the back ends.
4561 * Add it to end_bblock, so the variable's lifetime covers the whole
4563 * It would be better to make the usage of the got var explicit in all
4564 * cases when the backend needs it (i.e. calls, throw etc.), so this
4565 * wouldn't be needed.
4567 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4568 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4571 static int inline_limit;
4572 static gboolean inline_limit_inited;
4575 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4577 MonoMethodHeaderSummary header;
4579 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4580 MonoMethodSignature *sig = mono_method_signature (method);
4584 if (cfg->disable_inline)
4589 if (cfg->inline_depth > 10)
4592 if (!mono_method_get_header_summary (method, &header))
4595 /*runtime, icall and pinvoke are checked by summary call*/
4596 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4597 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4598 (mono_class_is_marshalbyref (method->klass)) ||
4602 /* also consider num_locals? */
4603 /* Do the size check early to avoid creating vtables */
4604 if (!inline_limit_inited) {
4606 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4607 inline_limit = atoi (inlinelimit);
4608 g_free (inlinelimit);
4610 inline_limit = INLINE_LENGTH_LIMIT;
4611 inline_limit_inited = TRUE;
4613 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4617 * if we can initialize the class of the method right away, we do,
4618 * otherwise we don't allow inlining if the class needs initialization,
4619 * since it would mean inserting a call to mono_runtime_class_init()
4620 * inside the inlined code
4622 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4625 if (!(cfg->opt & MONO_OPT_SHARED)) {
4626 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4627 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4628 if (method->klass->has_cctor) {
4629 vtable = mono_class_vtable (cfg->domain, method->klass);
4632 if (!cfg->compile_aot) {
4634 if (!mono_runtime_class_init_full (vtable, &error)) {
4635 mono_error_cleanup (&error);
4640 } else if (mono_class_is_before_field_init (method->klass)) {
4641 if (cfg->run_cctors && method->klass->has_cctor) {
4642 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4643 if (!method->klass->runtime_info)
4644 /* No vtable created yet */
4646 vtable = mono_class_vtable (cfg->domain, method->klass);
4649 /* This makes so that inline cannot trigger */
4650 /* .cctors: too many apps depend on them */
4651 /* running with a specific order... */
4652 if (! vtable->initialized)
4655 if (!mono_runtime_class_init_full (vtable, &error)) {
4656 mono_error_cleanup (&error);
4660 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4661 if (!method->klass->runtime_info)
4662 /* No vtable created yet */
4664 vtable = mono_class_vtable (cfg->domain, method->klass);
4667 if (!vtable->initialized)
4672 * If we're compiling for shared code
4673 * the cctor will need to be run at aot method load time, for example,
4674 * or at the end of the compilation of the inlining method.
4676 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4680 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4681 if (mono_arch_is_soft_float ()) {
4683 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4685 for (i = 0; i < sig->param_count; ++i)
4686 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4691 if (g_list_find (cfg->dont_inline, method))
4698 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4700 if (!cfg->compile_aot) {
4702 if (vtable->initialized)
4706 if (mono_class_is_before_field_init (klass)) {
4707 if (cfg->method == method)
4711 if (!mono_class_needs_cctor_run (klass, method))
4714 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4715 /* The initialization is already done before the method is called */
4722 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4726 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4729 if (mini_is_gsharedvt_variable_klass (klass)) {
4732 mono_class_init (klass);
4733 size = mono_class_array_element_size (klass);
4736 mult_reg = alloc_preg (cfg);
4737 array_reg = arr->dreg;
4738 index_reg = index->dreg;
4740 #if SIZEOF_REGISTER == 8
4741 /* The array reg is 64 bits but the index reg is only 32 */
4742 if (COMPILE_LLVM (cfg)) {
4744 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4745 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4746 * of OP_X86_LEA for llvm.
4748 index2_reg = index_reg;
4750 index2_reg = alloc_preg (cfg);
4751 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4754 if (index->type == STACK_I8) {
4755 index2_reg = alloc_preg (cfg);
4756 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4758 index2_reg = index_reg;
4763 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4765 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4766 if (size == 1 || size == 2 || size == 4 || size == 8) {
4767 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4769 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4770 ins->klass = mono_class_get_element_class (klass);
4771 ins->type = STACK_MP;
4777 add_reg = alloc_ireg_mp (cfg);
4780 MonoInst *rgctx_ins;
4783 g_assert (cfg->gshared);
4784 context_used = mini_class_check_context_used (cfg, klass);
4785 g_assert (context_used);
4786 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4787 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4791 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4792 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4793 ins->klass = mono_class_get_element_class (klass);
4794 ins->type = STACK_MP;
4795 MONO_ADD_INS (cfg->cbb, ins);
4801 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4803 int bounds_reg = alloc_preg (cfg);
4804 int add_reg = alloc_ireg_mp (cfg);
4805 int mult_reg = alloc_preg (cfg);
4806 int mult2_reg = alloc_preg (cfg);
4807 int low1_reg = alloc_preg (cfg);
4808 int low2_reg = alloc_preg (cfg);
4809 int high1_reg = alloc_preg (cfg);
4810 int high2_reg = alloc_preg (cfg);
4811 int realidx1_reg = alloc_preg (cfg);
4812 int realidx2_reg = alloc_preg (cfg);
4813 int sum_reg = alloc_preg (cfg);
4814 int index1, index2, tmpreg;
4818 mono_class_init (klass);
4819 size = mono_class_array_element_size (klass);
4821 index1 = index_ins1->dreg;
4822 index2 = index_ins2->dreg;
4824 #if SIZEOF_REGISTER == 8
4825 /* The array reg is 64 bits but the index reg is only 32 */
4826 if (COMPILE_LLVM (cfg)) {
4829 tmpreg = alloc_preg (cfg);
4830 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4832 tmpreg = alloc_preg (cfg);
4833 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4837 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4841 /* range checking */
4842 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4843 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4846 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4847 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4848 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4849 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4850 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4851 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4854 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4855 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4856 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4857 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4858 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4859 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4861 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4862 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4863 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4864 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4865 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4867 ins->type = STACK_MP;
4869 MONO_ADD_INS (cfg->cbb, ins);
4875 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4879 MonoMethod *addr_method;
4881 MonoClass *eclass = cmethod->klass->element_class;
4883 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4886 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4888 /* emit_ldelema_2 depends on OP_LMUL */
4889 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4890 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4893 if (mini_is_gsharedvt_variable_klass (eclass))
4896 element_size = mono_class_array_element_size (eclass);
4897 addr_method = mono_marshal_get_array_address (rank, element_size);
4898 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4903 static MonoBreakPolicy
4904 always_insert_breakpoint (MonoMethod *method)
4906 return MONO_BREAK_POLICY_ALWAYS;
4909 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4912 * mono_set_break_policy:
4913 * \param policy_callback the new callback function
4915 * Allow embedders to decide wherther to actually obey breakpoint instructions
4916 * (both break IL instructions and \c Debugger.Break method calls), for example
4917 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4918 * untrusted or semi-trusted code.
4920 * \p policy_callback will be called every time a break point instruction needs to
4921 * be inserted with the method argument being the method that calls \c Debugger.Break
4922 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4923 * if it wants the breakpoint to not be effective in the given method.
4924 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4927 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4929 if (policy_callback)
4930 break_policy_func = policy_callback;
4932 break_policy_func = always_insert_breakpoint;
4936 should_insert_brekpoint (MonoMethod *method) {
4937 switch (break_policy_func (method)) {
4938 case MONO_BREAK_POLICY_ALWAYS:
4940 case MONO_BREAK_POLICY_NEVER:
4942 case MONO_BREAK_POLICY_ON_DBG:
4943 g_warning ("mdb no longer supported");
4946 g_warning ("Incorrect value returned from break policy callback");
4951 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4953 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4955 MonoInst *addr, *store, *load;
4956 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4958 /* the bounds check is already done by the callers */
4959 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4961 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4962 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4963 if (mini_type_is_reference (&eklass->byval_arg))
4964 emit_write_barrier (cfg, addr, load);
4966 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4967 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4974 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4976 return mini_type_is_reference (&klass->byval_arg);
4980 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4982 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4983 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4984 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4985 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4986 MonoInst *iargs [3];
4989 mono_class_setup_vtable (obj_array);
4990 g_assert (helper->slot);
4992 if (sp [0]->type != STACK_OBJ)
4994 if (sp [2]->type != STACK_OBJ)
5001 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5005 if (mini_is_gsharedvt_variable_klass (klass)) {
5008 // FIXME-VT: OP_ICONST optimization
5009 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5010 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5011 ins->opcode = OP_STOREV_MEMBASE;
5012 } else if (sp [1]->opcode == OP_ICONST) {
5013 int array_reg = sp [0]->dreg;
5014 int index_reg = sp [1]->dreg;
5015 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5017 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5018 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5021 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5022 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5024 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5025 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5026 if (generic_class_is_reference_type (cfg, klass))
5027 emit_write_barrier (cfg, addr, sp [2]);
5034 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5039 eklass = mono_class_from_mono_type (fsig->params [2]);
5041 eklass = mono_class_from_mono_type (fsig->ret);
5044 return emit_array_store (cfg, eklass, args, FALSE);
5046 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5047 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5053 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5056 int param_size, return_size;
5058 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5059 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5061 if (cfg->verbose_level > 3)
5062 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5064 //Don't allow mixing reference types with value types
5065 if (param_klass->valuetype != return_klass->valuetype) {
5066 if (cfg->verbose_level > 3)
5067 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5071 if (!param_klass->valuetype) {
5072 if (cfg->verbose_level > 3)
5073 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5078 if (param_klass->has_references || return_klass->has_references)
5081 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5082 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5083 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5084 if (cfg->verbose_level > 3)
5085 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5089 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5090 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5091 if (cfg->verbose_level > 3)
5092 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5096 param_size = mono_class_value_size (param_klass, &align);
5097 return_size = mono_class_value_size (return_klass, &align);
5099 //We can do it if sizes match
5100 if (param_size == return_size) {
5101 if (cfg->verbose_level > 3)
5102 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5106 //No simple way to handle struct if sizes don't match
5107 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5108 if (cfg->verbose_level > 3)
5109 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5114 * Same reg size category.
5115 * A quick note on why we don't require widening here.
5116 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5118 * Since the source value comes from a function argument, the JIT will already have
5119 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5121 if (param_size <= 4 && return_size <= 4) {
5122 if (cfg->verbose_level > 3)
5123 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5131 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5133 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5134 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5136 if (mini_is_gsharedvt_variable_type (fsig->ret))
5139 //Valuetypes that are semantically equivalent or numbers than can be widened to
5140 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5143 //Arrays of valuetypes that are semantically equivalent
5144 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5151 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5153 #ifdef MONO_ARCH_SIMD_INTRINSICS
5154 MonoInst *ins = NULL;
5156 if (cfg->opt & MONO_OPT_SIMD) {
5157 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5163 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5167 emit_memory_barrier (MonoCompile *cfg, int kind)
5169 MonoInst *ins = NULL;
5170 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5171 MONO_ADD_INS (cfg->cbb, ins);
5172 ins->backend.memory_barrier_kind = kind;
5178 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5180 MonoInst *ins = NULL;
5183 /* The LLVM backend supports these intrinsics */
5184 if (cmethod->klass == mono_defaults.math_class) {
5185 if (strcmp (cmethod->name, "Sin") == 0) {
5187 } else if (strcmp (cmethod->name, "Cos") == 0) {
5189 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5191 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5195 if (opcode && fsig->param_count == 1) {
5196 MONO_INST_NEW (cfg, ins, opcode);
5197 ins->type = STACK_R8;
5198 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5199 ins->sreg1 = args [0]->dreg;
5200 MONO_ADD_INS (cfg->cbb, ins);
5204 if (cfg->opt & MONO_OPT_CMOV) {
5205 if (strcmp (cmethod->name, "Min") == 0) {
5206 if (fsig->params [0]->type == MONO_TYPE_I4)
5208 if (fsig->params [0]->type == MONO_TYPE_U4)
5209 opcode = OP_IMIN_UN;
5210 else if (fsig->params [0]->type == MONO_TYPE_I8)
5212 else if (fsig->params [0]->type == MONO_TYPE_U8)
5213 opcode = OP_LMIN_UN;
5214 } else if (strcmp (cmethod->name, "Max") == 0) {
5215 if (fsig->params [0]->type == MONO_TYPE_I4)
5217 if (fsig->params [0]->type == MONO_TYPE_U4)
5218 opcode = OP_IMAX_UN;
5219 else if (fsig->params [0]->type == MONO_TYPE_I8)
5221 else if (fsig->params [0]->type == MONO_TYPE_U8)
5222 opcode = OP_LMAX_UN;
5226 if (opcode && fsig->param_count == 2) {
5227 MONO_INST_NEW (cfg, ins, opcode);
5228 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5229 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5230 ins->sreg1 = args [0]->dreg;
5231 ins->sreg2 = args [1]->dreg;
5232 MONO_ADD_INS (cfg->cbb, ins);
5240 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5242 if (cmethod->klass == mono_defaults.array_class) {
5243 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5244 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5245 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5246 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5247 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5248 return emit_array_unsafe_mov (cfg, fsig, args);
5255 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5257 MonoInst *ins = NULL;
5258 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5260 if (cmethod->klass == mono_defaults.string_class) {
5261 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5262 int dreg = alloc_ireg (cfg);
5263 int index_reg = alloc_preg (cfg);
5264 int add_reg = alloc_preg (cfg);
5266 #if SIZEOF_REGISTER == 8
5267 if (COMPILE_LLVM (cfg)) {
5268 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5270 /* The array reg is 64 bits but the index reg is only 32 */
5271 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5274 index_reg = args [1]->dreg;
5276 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5278 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5279 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5280 add_reg = ins->dreg;
5281 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5284 int mult_reg = alloc_preg (cfg);
5285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5286 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5287 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5288 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5290 type_from_op (cfg, ins, NULL, NULL);
5292 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5293 int dreg = alloc_ireg (cfg);
5294 /* Decompose later to allow more optimizations */
5295 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5296 ins->type = STACK_I4;
5297 ins->flags |= MONO_INST_FAULT;
5298 cfg->cbb->has_array_access = TRUE;
5299 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5304 } else if (cmethod->klass == mono_defaults.object_class) {
5305 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5306 int dreg = alloc_ireg_ref (cfg);
5307 int vt_reg = alloc_preg (cfg);
5308 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5309 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5310 type_from_op (cfg, ins, NULL, NULL);
5313 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5314 int dreg = alloc_ireg (cfg);
5315 int t1 = alloc_ireg (cfg);
5317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5318 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5319 ins->type = STACK_I4;
5322 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5323 MONO_INST_NEW (cfg, ins, OP_NOP);
5324 MONO_ADD_INS (cfg->cbb, ins);
5328 } else if (cmethod->klass == mono_defaults.array_class) {
5329 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5330 return emit_array_generic_access (cfg, fsig, args, FALSE);
5331 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5332 return emit_array_generic_access (cfg, fsig, args, TRUE);
5334 #ifndef MONO_BIG_ARRAYS
5336 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5339 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5340 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5341 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5342 int dreg = alloc_ireg (cfg);
5343 int bounds_reg = alloc_ireg_mp (cfg);
5344 MonoBasicBlock *end_bb, *szarray_bb;
5345 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5347 NEW_BBLOCK (cfg, end_bb);
5348 NEW_BBLOCK (cfg, szarray_bb);
5350 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5351 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5354 /* Non-szarray case */
5356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5357 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5360 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5362 MONO_START_BB (cfg, szarray_bb);
5365 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5366 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5368 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5369 MONO_START_BB (cfg, end_bb);
5371 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5372 ins->type = STACK_I4;
5378 if (cmethod->name [0] != 'g')
5381 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5382 int dreg = alloc_ireg (cfg);
5383 int vtable_reg = alloc_preg (cfg);
5384 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5385 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5386 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5387 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5388 type_from_op (cfg, ins, NULL, NULL);
5391 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5392 int dreg = alloc_ireg (cfg);
5394 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5395 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5396 type_from_op (cfg, ins, NULL, NULL);
5401 } else if (cmethod->klass == runtime_helpers_class) {
5402 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5403 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5405 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5406 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5408 g_assert (ctx->method_inst);
5409 g_assert (ctx->method_inst->type_argc == 1);
5410 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5411 MonoClass *klass = mono_class_from_mono_type (t);
5415 mono_class_init (klass);
5416 if (MONO_TYPE_IS_REFERENCE (t))
5417 EMIT_NEW_ICONST (cfg, ins, 1);
5418 else if (MONO_TYPE_IS_PRIMITIVE (t))
5419 EMIT_NEW_ICONST (cfg, ins, 0);
5420 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5421 EMIT_NEW_ICONST (cfg, ins, 1);
5422 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5423 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5425 g_assert (cfg->gshared);
5427 int context_used = mini_class_check_context_used (cfg, klass);
5429 /* This returns 1 or 2 */
5430 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5431 int dreg = alloc_ireg (cfg);
5432 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5438 } else if (cmethod->klass == mono_defaults.monitor_class) {
5439 gboolean is_enter = FALSE;
5440 gboolean is_v4 = FALSE;
5442 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5446 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5451 * To make async stack traces work, icalls which can block should have a wrapper.
5452 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5454 MonoBasicBlock *end_bb;
5456 NEW_BBLOCK (cfg, end_bb);
5458 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5461 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5462 MONO_START_BB (cfg, end_bb);
5465 } else if (cmethod->klass == mono_defaults.thread_class) {
5466 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5467 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5468 MONO_ADD_INS (cfg->cbb, ins);
5470 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5471 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5472 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5474 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5476 if (fsig->params [0]->type == MONO_TYPE_I1)
5477 opcode = OP_LOADI1_MEMBASE;
5478 else if (fsig->params [0]->type == MONO_TYPE_U1)
5479 opcode = OP_LOADU1_MEMBASE;
5480 else if (fsig->params [0]->type == MONO_TYPE_I2)
5481 opcode = OP_LOADI2_MEMBASE;
5482 else if (fsig->params [0]->type == MONO_TYPE_U2)
5483 opcode = OP_LOADU2_MEMBASE;
5484 else if (fsig->params [0]->type == MONO_TYPE_I4)
5485 opcode = OP_LOADI4_MEMBASE;
5486 else if (fsig->params [0]->type == MONO_TYPE_U4)
5487 opcode = OP_LOADU4_MEMBASE;
5488 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5489 opcode = OP_LOADI8_MEMBASE;
5490 else if (fsig->params [0]->type == MONO_TYPE_R4)
5491 opcode = OP_LOADR4_MEMBASE;
5492 else if (fsig->params [0]->type == MONO_TYPE_R8)
5493 opcode = OP_LOADR8_MEMBASE;
5494 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5495 opcode = OP_LOAD_MEMBASE;
5498 MONO_INST_NEW (cfg, ins, opcode);
5499 ins->inst_basereg = args [0]->dreg;
5500 ins->inst_offset = 0;
5501 MONO_ADD_INS (cfg->cbb, ins);
5503 switch (fsig->params [0]->type) {
5510 ins->dreg = mono_alloc_ireg (cfg);
5511 ins->type = STACK_I4;
5515 ins->dreg = mono_alloc_lreg (cfg);
5516 ins->type = STACK_I8;
5520 ins->dreg = mono_alloc_ireg (cfg);
5521 #if SIZEOF_REGISTER == 8
5522 ins->type = STACK_I8;
5524 ins->type = STACK_I4;
5529 ins->dreg = mono_alloc_freg (cfg);
5530 ins->type = STACK_R8;
5533 g_assert (mini_type_is_reference (fsig->params [0]));
5534 ins->dreg = mono_alloc_ireg_ref (cfg);
5535 ins->type = STACK_OBJ;
5539 if (opcode == OP_LOADI8_MEMBASE)
5540 ins = mono_decompose_opcode (cfg, ins);
5542 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5546 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5548 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5550 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5551 opcode = OP_STOREI1_MEMBASE_REG;
5552 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5553 opcode = OP_STOREI2_MEMBASE_REG;
5554 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5555 opcode = OP_STOREI4_MEMBASE_REG;
5556 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5557 opcode = OP_STOREI8_MEMBASE_REG;
5558 else if (fsig->params [0]->type == MONO_TYPE_R4)
5559 opcode = OP_STORER4_MEMBASE_REG;
5560 else if (fsig->params [0]->type == MONO_TYPE_R8)
5561 opcode = OP_STORER8_MEMBASE_REG;
5562 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5563 opcode = OP_STORE_MEMBASE_REG;
5566 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5568 MONO_INST_NEW (cfg, ins, opcode);
5569 ins->sreg1 = args [1]->dreg;
5570 ins->inst_destbasereg = args [0]->dreg;
5571 ins->inst_offset = 0;
5572 MONO_ADD_INS (cfg->cbb, ins);
5574 if (opcode == OP_STOREI8_MEMBASE_REG)
5575 ins = mono_decompose_opcode (cfg, ins);
5580 } else if (cmethod->klass->image == mono_defaults.corlib &&
5581 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5582 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5585 #if SIZEOF_REGISTER == 8
5586 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5587 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5588 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5589 ins->dreg = mono_alloc_preg (cfg);
5590 ins->sreg1 = args [0]->dreg;
5591 ins->type = STACK_I8;
5592 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5593 MONO_ADD_INS (cfg->cbb, ins);
5597 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5599 /* 64 bit reads are already atomic */
5600 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5601 load_ins->dreg = mono_alloc_preg (cfg);
5602 load_ins->inst_basereg = args [0]->dreg;
5603 load_ins->inst_offset = 0;
5604 load_ins->type = STACK_I8;
5605 MONO_ADD_INS (cfg->cbb, load_ins);
5607 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5614 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5615 MonoInst *ins_iconst;
5618 if (fsig->params [0]->type == MONO_TYPE_I4) {
5619 opcode = OP_ATOMIC_ADD_I4;
5620 cfg->has_atomic_add_i4 = TRUE;
5622 #if SIZEOF_REGISTER == 8
5623 else if (fsig->params [0]->type == MONO_TYPE_I8)
5624 opcode = OP_ATOMIC_ADD_I8;
5627 if (!mono_arch_opcode_supported (opcode))
5629 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5630 ins_iconst->inst_c0 = 1;
5631 ins_iconst->dreg = mono_alloc_ireg (cfg);
5632 MONO_ADD_INS (cfg->cbb, ins_iconst);
5634 MONO_INST_NEW (cfg, ins, opcode);
5635 ins->dreg = mono_alloc_ireg (cfg);
5636 ins->inst_basereg = args [0]->dreg;
5637 ins->inst_offset = 0;
5638 ins->sreg2 = ins_iconst->dreg;
5639 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5640 MONO_ADD_INS (cfg->cbb, ins);
5642 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5643 MonoInst *ins_iconst;
5646 if (fsig->params [0]->type == MONO_TYPE_I4) {
5647 opcode = OP_ATOMIC_ADD_I4;
5648 cfg->has_atomic_add_i4 = TRUE;
5650 #if SIZEOF_REGISTER == 8
5651 else if (fsig->params [0]->type == MONO_TYPE_I8)
5652 opcode = OP_ATOMIC_ADD_I8;
5655 if (!mono_arch_opcode_supported (opcode))
5657 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5658 ins_iconst->inst_c0 = -1;
5659 ins_iconst->dreg = mono_alloc_ireg (cfg);
5660 MONO_ADD_INS (cfg->cbb, ins_iconst);
5662 MONO_INST_NEW (cfg, ins, opcode);
5663 ins->dreg = mono_alloc_ireg (cfg);
5664 ins->inst_basereg = args [0]->dreg;
5665 ins->inst_offset = 0;
5666 ins->sreg2 = ins_iconst->dreg;
5667 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5668 MONO_ADD_INS (cfg->cbb, ins);
5670 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5673 if (fsig->params [0]->type == MONO_TYPE_I4) {
5674 opcode = OP_ATOMIC_ADD_I4;
5675 cfg->has_atomic_add_i4 = TRUE;
5677 #if SIZEOF_REGISTER == 8
5678 else if (fsig->params [0]->type == MONO_TYPE_I8)
5679 opcode = OP_ATOMIC_ADD_I8;
5682 if (!mono_arch_opcode_supported (opcode))
5684 MONO_INST_NEW (cfg, ins, opcode);
5685 ins->dreg = mono_alloc_ireg (cfg);
5686 ins->inst_basereg = args [0]->dreg;
5687 ins->inst_offset = 0;
5688 ins->sreg2 = args [1]->dreg;
5689 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5690 MONO_ADD_INS (cfg->cbb, ins);
5693 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5694 MonoInst *f2i = NULL, *i2f;
5695 guint32 opcode, f2i_opcode, i2f_opcode;
5696 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5697 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5699 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5700 fsig->params [0]->type == MONO_TYPE_R4) {
5701 opcode = OP_ATOMIC_EXCHANGE_I4;
5702 f2i_opcode = OP_MOVE_F_TO_I4;
5703 i2f_opcode = OP_MOVE_I4_TO_F;
5704 cfg->has_atomic_exchange_i4 = TRUE;
5706 #if SIZEOF_REGISTER == 8
5708 fsig->params [0]->type == MONO_TYPE_I8 ||
5709 fsig->params [0]->type == MONO_TYPE_R8 ||
5710 fsig->params [0]->type == MONO_TYPE_I) {
5711 opcode = OP_ATOMIC_EXCHANGE_I8;
5712 f2i_opcode = OP_MOVE_F_TO_I8;
5713 i2f_opcode = OP_MOVE_I8_TO_F;
5716 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5717 opcode = OP_ATOMIC_EXCHANGE_I4;
5718 cfg->has_atomic_exchange_i4 = TRUE;
5724 if (!mono_arch_opcode_supported (opcode))
5728 /* TODO: Decompose these opcodes instead of bailing here. */
5729 if (COMPILE_SOFT_FLOAT (cfg))
5732 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5733 f2i->dreg = mono_alloc_ireg (cfg);
5734 f2i->sreg1 = args [1]->dreg;
5735 if (f2i_opcode == OP_MOVE_F_TO_I4)
5736 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5737 MONO_ADD_INS (cfg->cbb, f2i);
5740 MONO_INST_NEW (cfg, ins, opcode);
5741 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5742 ins->inst_basereg = args [0]->dreg;
5743 ins->inst_offset = 0;
5744 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5745 MONO_ADD_INS (cfg->cbb, ins);
5747 switch (fsig->params [0]->type) {
5749 ins->type = STACK_I4;
5752 ins->type = STACK_I8;
5755 #if SIZEOF_REGISTER == 8
5756 ins->type = STACK_I8;
5758 ins->type = STACK_I4;
5763 ins->type = STACK_R8;
5766 g_assert (mini_type_is_reference (fsig->params [0]));
5767 ins->type = STACK_OBJ;
5772 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5773 i2f->dreg = mono_alloc_freg (cfg);
5774 i2f->sreg1 = ins->dreg;
5775 i2f->type = STACK_R8;
5776 if (i2f_opcode == OP_MOVE_I4_TO_F)
5777 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5778 MONO_ADD_INS (cfg->cbb, i2f);
5783 if (cfg->gen_write_barriers && is_ref)
5784 emit_write_barrier (cfg, args [0], args [1]);
5786 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5787 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5788 guint32 opcode, f2i_opcode, i2f_opcode;
5789 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5790 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5792 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5793 fsig->params [1]->type == MONO_TYPE_R4) {
5794 opcode = OP_ATOMIC_CAS_I4;
5795 f2i_opcode = OP_MOVE_F_TO_I4;
5796 i2f_opcode = OP_MOVE_I4_TO_F;
5797 cfg->has_atomic_cas_i4 = TRUE;
5799 #if SIZEOF_REGISTER == 8
5801 fsig->params [1]->type == MONO_TYPE_I8 ||
5802 fsig->params [1]->type == MONO_TYPE_R8 ||
5803 fsig->params [1]->type == MONO_TYPE_I) {
5804 opcode = OP_ATOMIC_CAS_I8;
5805 f2i_opcode = OP_MOVE_F_TO_I8;
5806 i2f_opcode = OP_MOVE_I8_TO_F;
5809 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5810 opcode = OP_ATOMIC_CAS_I4;
5811 cfg->has_atomic_cas_i4 = TRUE;
5817 if (!mono_arch_opcode_supported (opcode))
5821 /* TODO: Decompose these opcodes instead of bailing here. */
5822 if (COMPILE_SOFT_FLOAT (cfg))
5825 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5826 f2i_new->dreg = mono_alloc_ireg (cfg);
5827 f2i_new->sreg1 = args [1]->dreg;
5828 if (f2i_opcode == OP_MOVE_F_TO_I4)
5829 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5830 MONO_ADD_INS (cfg->cbb, f2i_new);
5832 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5833 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5834 f2i_cmp->sreg1 = args [2]->dreg;
5835 if (f2i_opcode == OP_MOVE_F_TO_I4)
5836 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5837 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5840 MONO_INST_NEW (cfg, ins, opcode);
5841 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5842 ins->sreg1 = args [0]->dreg;
5843 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5844 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5845 MONO_ADD_INS (cfg->cbb, ins);
5847 switch (fsig->params [1]->type) {
5849 ins->type = STACK_I4;
5852 ins->type = STACK_I8;
5855 #if SIZEOF_REGISTER == 8
5856 ins->type = STACK_I8;
5858 ins->type = STACK_I4;
5862 ins->type = cfg->r4_stack_type;
5865 ins->type = STACK_R8;
5868 g_assert (mini_type_is_reference (fsig->params [1]));
5869 ins->type = STACK_OBJ;
5874 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5875 i2f->dreg = mono_alloc_freg (cfg);
5876 i2f->sreg1 = ins->dreg;
5877 i2f->type = STACK_R8;
5878 if (i2f_opcode == OP_MOVE_I4_TO_F)
5879 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5880 MONO_ADD_INS (cfg->cbb, i2f);
5885 if (cfg->gen_write_barriers && is_ref)
5886 emit_write_barrier (cfg, args [0], args [1]);
5888 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5889 fsig->params [1]->type == MONO_TYPE_I4) {
5890 MonoInst *cmp, *ceq;
5892 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5895 /* int32 r = CAS (location, value, comparand); */
5896 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5897 ins->dreg = alloc_ireg (cfg);
5898 ins->sreg1 = args [0]->dreg;
5899 ins->sreg2 = args [1]->dreg;
5900 ins->sreg3 = args [2]->dreg;
5901 ins->type = STACK_I4;
5902 MONO_ADD_INS (cfg->cbb, ins);
5904 /* bool result = r == comparand; */
5905 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5906 cmp->sreg1 = ins->dreg;
5907 cmp->sreg2 = args [2]->dreg;
5908 cmp->type = STACK_I4;
5909 MONO_ADD_INS (cfg->cbb, cmp);
5911 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5912 ceq->dreg = alloc_ireg (cfg);
5913 ceq->type = STACK_I4;
5914 MONO_ADD_INS (cfg->cbb, ceq);
5916 /* *success = result; */
5917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5919 cfg->has_atomic_cas_i4 = TRUE;
5921 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5922 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5926 } else if (cmethod->klass->image == mono_defaults.corlib &&
5927 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5928 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5931 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5933 MonoType *t = fsig->params [0];
5935 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5937 g_assert (t->byref);
5938 /* t is a byref type, so the reference check is more complicated */
5939 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5940 if (t->type == MONO_TYPE_I1)
5941 opcode = OP_ATOMIC_LOAD_I1;
5942 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5943 opcode = OP_ATOMIC_LOAD_U1;
5944 else if (t->type == MONO_TYPE_I2)
5945 opcode = OP_ATOMIC_LOAD_I2;
5946 else if (t->type == MONO_TYPE_U2)
5947 opcode = OP_ATOMIC_LOAD_U2;
5948 else if (t->type == MONO_TYPE_I4)
5949 opcode = OP_ATOMIC_LOAD_I4;
5950 else if (t->type == MONO_TYPE_U4)
5951 opcode = OP_ATOMIC_LOAD_U4;
5952 else if (t->type == MONO_TYPE_R4)
5953 opcode = OP_ATOMIC_LOAD_R4;
5954 else if (t->type == MONO_TYPE_R8)
5955 opcode = OP_ATOMIC_LOAD_R8;
5956 #if SIZEOF_REGISTER == 8
5957 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5958 opcode = OP_ATOMIC_LOAD_I8;
5959 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5960 opcode = OP_ATOMIC_LOAD_U8;
5962 else if (t->type == MONO_TYPE_I)
5963 opcode = OP_ATOMIC_LOAD_I4;
5964 else if (is_ref || t->type == MONO_TYPE_U)
5965 opcode = OP_ATOMIC_LOAD_U4;
5969 if (!mono_arch_opcode_supported (opcode))
5972 MONO_INST_NEW (cfg, ins, opcode);
5973 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5974 ins->sreg1 = args [0]->dreg;
5975 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5976 MONO_ADD_INS (cfg->cbb, ins);
5979 case MONO_TYPE_BOOLEAN:
5986 ins->type = STACK_I4;
5990 ins->type = STACK_I8;
5994 #if SIZEOF_REGISTER == 8
5995 ins->type = STACK_I8;
5997 ins->type = STACK_I4;
6001 ins->type = cfg->r4_stack_type;
6004 ins->type = STACK_R8;
6008 ins->type = STACK_OBJ;
6014 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6016 MonoType *t = fsig->params [0];
6019 g_assert (t->byref);
6020 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6021 if (t->type == MONO_TYPE_I1)
6022 opcode = OP_ATOMIC_STORE_I1;
6023 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6024 opcode = OP_ATOMIC_STORE_U1;
6025 else if (t->type == MONO_TYPE_I2)
6026 opcode = OP_ATOMIC_STORE_I2;
6027 else if (t->type == MONO_TYPE_U2)
6028 opcode = OP_ATOMIC_STORE_U2;
6029 else if (t->type == MONO_TYPE_I4)
6030 opcode = OP_ATOMIC_STORE_I4;
6031 else if (t->type == MONO_TYPE_U4)
6032 opcode = OP_ATOMIC_STORE_U4;
6033 else if (t->type == MONO_TYPE_R4)
6034 opcode = OP_ATOMIC_STORE_R4;
6035 else if (t->type == MONO_TYPE_R8)
6036 opcode = OP_ATOMIC_STORE_R8;
6037 #if SIZEOF_REGISTER == 8
6038 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6039 opcode = OP_ATOMIC_STORE_I8;
6040 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6041 opcode = OP_ATOMIC_STORE_U8;
6043 else if (t->type == MONO_TYPE_I)
6044 opcode = OP_ATOMIC_STORE_I4;
6045 else if (is_ref || t->type == MONO_TYPE_U)
6046 opcode = OP_ATOMIC_STORE_U4;
6050 if (!mono_arch_opcode_supported (opcode))
6053 MONO_INST_NEW (cfg, ins, opcode);
6054 ins->dreg = args [0]->dreg;
6055 ins->sreg1 = args [1]->dreg;
6056 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6057 MONO_ADD_INS (cfg->cbb, ins);
6059 if (cfg->gen_write_barriers && is_ref)
6060 emit_write_barrier (cfg, args [0], args [1]);
6066 } else if (cmethod->klass->image == mono_defaults.corlib &&
6067 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6068 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6069 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6070 if (should_insert_brekpoint (cfg->method)) {
6071 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6073 MONO_INST_NEW (cfg, ins, OP_NOP);
6074 MONO_ADD_INS (cfg->cbb, ins);
6078 } else if (cmethod->klass->image == mono_defaults.corlib &&
6079 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6080 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6081 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6083 EMIT_NEW_ICONST (cfg, ins, 1);
6085 EMIT_NEW_ICONST (cfg, ins, 0);
6088 } else if (cmethod->klass->image == mono_defaults.corlib &&
6089 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6090 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6091 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6092 /* No stack walks are currently available, so implement this as an intrinsic */
6093 MonoInst *assembly_ins;
6095 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6096 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6099 } else if (cmethod->klass->image == mono_defaults.corlib &&
6100 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6101 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6102 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6103 /* No stack walks are currently available, so implement this as an intrinsic */
6104 MonoInst *method_ins;
6105 MonoMethod *declaring = cfg->method;
6107 /* This returns the declaring generic method */
6108 if (declaring->is_inflated)
6109 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6110 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6111 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6112 cfg->no_inline = TRUE;
6113 if (cfg->method != cfg->current_method)
6114 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6117 } else if (cmethod->klass == mono_defaults.math_class) {
6119 * There is general branchless code for Min/Max, but it does not work for
6121 * http://everything2.com/?node_id=1051618
6123 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6124 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6125 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6126 ins->dreg = alloc_preg (cfg);
6127 ins->type = STACK_I4;
6128 MONO_ADD_INS (cfg->cbb, ins);
6130 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6131 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6132 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6133 !strcmp (cmethod->klass->name, "Selector")) ||
6134 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6135 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6136 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6137 !strcmp (cmethod->klass->name, "Selector"))
6139 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6140 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6141 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6144 MonoJumpInfoToken *ji;
6147 if (args [0]->opcode == OP_GOT_ENTRY) {
6148 pi = (MonoInst *)args [0]->inst_p1;
6149 g_assert (pi->opcode == OP_PATCH_INFO);
6150 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6151 ji = (MonoJumpInfoToken *)pi->inst_p0;
6153 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6154 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6157 NULLIFY_INS (args [0]);
6159 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6160 return_val_if_nok (&cfg->error, NULL);
6162 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6163 ins->dreg = mono_alloc_ireg (cfg);
6166 MONO_ADD_INS (cfg->cbb, ins);
6171 #ifdef MONO_ARCH_SIMD_INTRINSICS
6172 if (cfg->opt & MONO_OPT_SIMD) {
6173 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6179 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6183 if (COMPILE_LLVM (cfg)) {
6184 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6189 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6193 * This entry point could be used later for arbitrary method
6196 inline static MonoInst*
6197 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6198 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6200 if (method->klass == mono_defaults.string_class) {
6201 /* managed string allocation support */
6202 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6203 MonoInst *iargs [2];
6204 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6205 MonoMethod *managed_alloc = NULL;
6207 g_assert (vtable); /*Should not fail since it System.String*/
6208 #ifndef MONO_CROSS_COMPILE
6209 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6213 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6214 iargs [1] = args [0];
6215 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6222 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6224 MonoInst *store, *temp;
6227 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6228 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6231 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6232 * would be different than the MonoInst's used to represent arguments, and
6233 * the ldelema implementation can't deal with that.
6234 * Solution: When ldelema is used on an inline argument, create a var for
6235 * it, emit ldelema on that var, and emit the saving code below in
6236 * inline_method () if needed.
6238 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6239 cfg->args [i] = temp;
6240 /* This uses cfg->args [i] which is set by the preceeding line */
6241 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6242 store->cil_code = sp [0]->cil_code;
6247 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6248 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6250 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6252 check_inline_called_method_name_limit (MonoMethod *called_method)
6255 static const char *limit = NULL;
6257 if (limit == NULL) {
6258 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6260 if (limit_string != NULL)
6261 limit = limit_string;
6266 if (limit [0] != '\0') {
6267 char *called_method_name = mono_method_full_name (called_method, TRUE);
6269 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6270 g_free (called_method_name);
6272 //return (strncmp_result <= 0);
6273 return (strncmp_result == 0);
6280 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6282 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6285 static const char *limit = NULL;
6287 if (limit == NULL) {
6288 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6289 if (limit_string != NULL) {
6290 limit = limit_string;
6296 if (limit [0] != '\0') {
6297 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6299 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6300 g_free (caller_method_name);
6302 //return (strncmp_result <= 0);
6303 return (strncmp_result == 0);
6311 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6313 static double r8_0 = 0.0;
6314 static float r4_0 = 0.0;
6318 rtype = mini_get_underlying_type (rtype);
6322 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6323 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6324 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6325 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6326 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6327 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6328 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6329 ins->type = STACK_R4;
6330 ins->inst_p0 = (void*)&r4_0;
6332 MONO_ADD_INS (cfg->cbb, ins);
6333 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6334 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6335 ins->type = STACK_R8;
6336 ins->inst_p0 = (void*)&r8_0;
6338 MONO_ADD_INS (cfg->cbb, ins);
6339 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6340 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6341 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6342 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6343 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6345 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6350 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6354 rtype = mini_get_underlying_type (rtype);
6358 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6359 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6360 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6361 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6362 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6363 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6364 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6365 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6366 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6367 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6368 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6369 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6370 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6371 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6373 emit_init_rvar (cfg, dreg, rtype);
6377 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6379 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6381 MonoInst *var = cfg->locals [local];
6382 if (COMPILE_SOFT_FLOAT (cfg)) {
6384 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6385 emit_init_rvar (cfg, reg, type);
6386 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6389 emit_init_rvar (cfg, var->dreg, type);
6391 emit_dummy_init_rvar (cfg, var->dreg, type);
6396 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6398 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6404 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6407 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6408 guchar *ip, guint real_offset, gboolean inline_always)
6411 MonoInst *ins, *rvar = NULL;
6412 MonoMethodHeader *cheader;
6413 MonoBasicBlock *ebblock, *sbblock;
6415 MonoMethod *prev_inlined_method;
6416 MonoInst **prev_locals, **prev_args;
6417 MonoType **prev_arg_types;
6418 guint prev_real_offset;
6419 GHashTable *prev_cbb_hash;
6420 MonoBasicBlock **prev_cil_offset_to_bb;
6421 MonoBasicBlock *prev_cbb;
6422 const unsigned char *prev_ip;
6423 unsigned char *prev_cil_start;
6424 guint32 prev_cil_offset_to_bb_len;
6425 MonoMethod *prev_current_method;
6426 MonoGenericContext *prev_generic_context;
6427 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6429 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6431 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6432 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6435 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6436 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6441 fsig = mono_method_signature (cmethod);
6443 if (cfg->verbose_level > 2)
6444 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6446 if (!cmethod->inline_info) {
6447 cfg->stat_inlineable_methods++;
6448 cmethod->inline_info = 1;
6451 /* allocate local variables */
6452 cheader = mono_method_get_header_checked (cmethod, &error);
6454 if (inline_always) {
6455 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6456 mono_error_move (&cfg->error, &error);
6458 mono_error_cleanup (&error);
6463 /*Must verify before creating locals as it can cause the JIT to assert.*/
6464 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6465 mono_metadata_free_mh (cheader);
6469 /* allocate space to store the return value */
6470 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6471 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6474 prev_locals = cfg->locals;
6475 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6476 for (i = 0; i < cheader->num_locals; ++i)
6477 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6479 /* allocate start and end blocks */
6480 /* This is needed so if the inline is aborted, we can clean up */
6481 NEW_BBLOCK (cfg, sbblock);
6482 sbblock->real_offset = real_offset;
6484 NEW_BBLOCK (cfg, ebblock);
6485 ebblock->block_num = cfg->num_bblocks++;
6486 ebblock->real_offset = real_offset;
6488 prev_args = cfg->args;
6489 prev_arg_types = cfg->arg_types;
6490 prev_inlined_method = cfg->inlined_method;
6491 cfg->inlined_method = cmethod;
6492 cfg->ret_var_set = FALSE;
6493 cfg->inline_depth ++;
6494 prev_real_offset = cfg->real_offset;
6495 prev_cbb_hash = cfg->cbb_hash;
6496 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6497 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6498 prev_cil_start = cfg->cil_start;
6500 prev_cbb = cfg->cbb;
6501 prev_current_method = cfg->current_method;
6502 prev_generic_context = cfg->generic_context;
6503 prev_ret_var_set = cfg->ret_var_set;
6504 prev_disable_inline = cfg->disable_inline;
6506 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6509 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6511 ret_var_set = cfg->ret_var_set;
6513 cfg->inlined_method = prev_inlined_method;
6514 cfg->real_offset = prev_real_offset;
6515 cfg->cbb_hash = prev_cbb_hash;
6516 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6517 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6518 cfg->cil_start = prev_cil_start;
6520 cfg->locals = prev_locals;
6521 cfg->args = prev_args;
6522 cfg->arg_types = prev_arg_types;
6523 cfg->current_method = prev_current_method;
6524 cfg->generic_context = prev_generic_context;
6525 cfg->ret_var_set = prev_ret_var_set;
6526 cfg->disable_inline = prev_disable_inline;
6527 cfg->inline_depth --;
6529 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6530 if (cfg->verbose_level > 2)
6531 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6533 cfg->stat_inlined_methods++;
6535 /* always add some code to avoid block split failures */
6536 MONO_INST_NEW (cfg, ins, OP_NOP);
6537 MONO_ADD_INS (prev_cbb, ins);
6539 prev_cbb->next_bb = sbblock;
6540 link_bblock (cfg, prev_cbb, sbblock);
6543 * Get rid of the begin and end bblocks if possible to aid local
6546 if (prev_cbb->out_count == 1)
6547 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6549 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6550 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6552 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6553 MonoBasicBlock *prev = ebblock->in_bb [0];
6555 if (prev->next_bb == ebblock) {
6556 mono_merge_basic_blocks (cfg, prev, ebblock);
6558 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6559 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6560 cfg->cbb = prev_cbb;
6563 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6568 * Its possible that the rvar is set in some prev bblock, but not in others.
6574 for (i = 0; i < ebblock->in_count; ++i) {
6575 bb = ebblock->in_bb [i];
6577 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6580 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6590 * If the inlined method contains only a throw, then the ret var is not
6591 * set, so set it to a dummy value.
6594 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6596 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6599 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6602 if (cfg->verbose_level > 2)
6603 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6604 cfg->exception_type = MONO_EXCEPTION_NONE;
6606 /* This gets rid of the newly added bblocks */
6607 cfg->cbb = prev_cbb;
6609 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6614 * Some of these comments may well be out-of-date.
6615 * Design decisions: we do a single pass over the IL code (and we do bblock
6616 * splitting/merging in the few cases when it's required: a back jump to an IL
6617 * address that was not already seen as bblock starting point).
6618 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6619 * Complex operations are decomposed in simpler ones right away. We need to let the
6620 * arch-specific code peek and poke inside this process somehow (except when the
6621 * optimizations can take advantage of the full semantic info of coarse opcodes).
6622 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6623 * MonoInst->opcode initially is the IL opcode or some simplification of that
6624 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6625 * opcode with value bigger than OP_LAST.
6626 * At this point the IR can be handed over to an interpreter, a dumb code generator
6627 * or to the optimizing code generator that will translate it to SSA form.
6629 * Profiling directed optimizations.
6630 * We may compile by default with few or no optimizations and instrument the code
6631 * or the user may indicate what methods to optimize the most either in a config file
6632 * or through repeated runs where the compiler applies offline the optimizations to
6633 * each method and then decides if it was worth it.
6636 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6637 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6638 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6639 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6640 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6641 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6642 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6643 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6645 /* offset from br.s -> br like opcodes */
6646 #define BIG_BRANCH_OFFSET 13
6649 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6651 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6653 return b == NULL || b == bb;
6657 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6659 unsigned char *ip = start;
6660 unsigned char *target;
6663 MonoBasicBlock *bblock;
6664 const MonoOpcode *opcode;
6667 cli_addr = ip - start;
6668 i = mono_opcode_value ((const guint8 **)&ip, end);
6671 opcode = &mono_opcodes [i];
6672 switch (opcode->argument) {
6673 case MonoInlineNone:
6676 case MonoInlineString:
6677 case MonoInlineType:
6678 case MonoInlineField:
6679 case MonoInlineMethod:
6682 case MonoShortInlineR:
6689 case MonoShortInlineVar:
6690 case MonoShortInlineI:
6693 case MonoShortInlineBrTarget:
6694 target = start + cli_addr + 2 + (signed char)ip [1];
6695 GET_BBLOCK (cfg, bblock, target);
6698 GET_BBLOCK (cfg, bblock, ip);
6700 case MonoInlineBrTarget:
6701 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6702 GET_BBLOCK (cfg, bblock, target);
6705 GET_BBLOCK (cfg, bblock, ip);
6707 case MonoInlineSwitch: {
6708 guint32 n = read32 (ip + 1);
6711 cli_addr += 5 + 4 * n;
6712 target = start + cli_addr;
6713 GET_BBLOCK (cfg, bblock, target);
6715 for (j = 0; j < n; ++j) {
6716 target = start + cli_addr + (gint32)read32 (ip);
6717 GET_BBLOCK (cfg, bblock, target);
6727 g_assert_not_reached ();
6730 if (i == CEE_THROW) {
6731 unsigned char *bb_start = ip - 1;
6733 /* Find the start of the bblock containing the throw */
6735 while ((bb_start >= start) && !bblock) {
6736 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6740 bblock->out_of_line = 1;
6750 static inline MonoMethod *
6751 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6757 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6758 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6760 method = mono_class_inflate_generic_method_checked (method, context, error);
6763 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6769 static inline MonoMethod *
6770 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6773 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6775 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6776 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6780 if (!method && !cfg)
6781 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6786 static inline MonoClass*
6787 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6792 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6793 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6795 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6796 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6799 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6800 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6803 mono_class_init (klass);
6807 static inline MonoMethodSignature*
6808 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6810 MonoMethodSignature *fsig;
6813 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6814 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6816 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6817 return_val_if_nok (error, NULL);
6820 fsig = mono_inflate_generic_signature(fsig, context, error);
6826 throw_exception (void)
6828 static MonoMethod *method = NULL;
6831 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6832 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6839 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6841 MonoMethod *thrower = throw_exception ();
6844 EMIT_NEW_PCONST (cfg, args [0], ex);
6845 mono_emit_method_call (cfg, thrower, args, NULL);
6849 * Return the original method is a wrapper is specified. We can only access
6850 * the custom attributes from the original method.
6853 get_original_method (MonoMethod *method)
6855 if (method->wrapper_type == MONO_WRAPPER_NONE)
6858 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6859 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6862 /* in other cases we need to find the original method */
6863 return mono_marshal_method_from_wrapper (method);
6867 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6869 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6870 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6872 emit_throw_exception (cfg, ex);
6876 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6878 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6879 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6881 emit_throw_exception (cfg, ex);
6885 * Check that the IL instructions at ip are the array initialization
6886 * sequence and return the pointer to the data and the size.
6889 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6892 * newarr[System.Int32]
6894 * ldtoken field valuetype ...
6895 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6897 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6899 guint32 token = read32 (ip + 7);
6900 guint32 field_token = read32 (ip + 2);
6901 guint32 field_index = field_token & 0xffffff;
6903 const char *data_ptr;
6905 MonoMethod *cmethod;
6906 MonoClass *dummy_class;
6907 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6911 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6915 *out_field_token = field_token;
6917 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6920 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6922 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6926 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6927 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6944 if (size > mono_type_size (field->type, &dummy_align))
6947 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6948 if (!image_is_dynamic (method->klass->image)) {
6949 field_index = read32 (ip + 2) & 0xffffff;
6950 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6951 data_ptr = mono_image_rva_map (method->klass->image, rva);
6952 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6953 /* for aot code we do the lookup on load */
6954 if (aot && data_ptr)
6955 return (const char *)GUINT_TO_POINTER (rva);
6957 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6959 data_ptr = mono_field_get_data (field);
6967 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6970 char *method_fname = mono_method_full_name (method, TRUE);
6972 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6975 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6976 mono_error_cleanup (&error);
6977 } else if (header->code_size == 0)
6978 method_code = g_strdup ("method body is empty.");
6980 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6981 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6982 g_free (method_fname);
6983 g_free (method_code);
6984 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6988 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6991 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6992 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6993 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6994 /* Optimize reg-reg moves away */
6996 * Can't optimize other opcodes, since sp[0] might point to
6997 * the last ins of a decomposed opcode.
6999 sp [0]->dreg = (cfg)->locals [n]->dreg;
7001 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7006 * ldloca inhibits many optimizations so try to get rid of it in common
7009 static inline unsigned char *
7010 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7020 local = read16 (ip + 2);
7024 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7025 /* From the INITOBJ case */
7026 token = read32 (ip + 2);
7027 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7028 CHECK_TYPELOAD (klass);
7029 type = mini_get_underlying_type (&klass->byval_arg);
7030 emit_init_local (cfg, local, type, TRUE);
7038 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7040 MonoInst *icall_args [16];
7041 MonoInst *call_target, *ins, *vtable_ins;
7042 int arg_reg, this_reg, vtable_reg;
7043 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7044 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7045 gboolean variant_iface = FALSE;
7048 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7051 * In llvm-only mode, vtables contain function descriptors instead of
7052 * method addresses/trampolines.
7054 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7057 slot = mono_method_get_imt_slot (cmethod);
7059 slot = mono_method_get_vtable_index (cmethod);
7061 this_reg = sp [0]->dreg;
7063 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7064 variant_iface = TRUE;
7066 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7068 * The simplest case, a normal virtual call.
7070 int slot_reg = alloc_preg (cfg);
7071 int addr_reg = alloc_preg (cfg);
7072 int arg_reg = alloc_preg (cfg);
7073 MonoBasicBlock *non_null_bb;
7075 vtable_reg = alloc_preg (cfg);
7076 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7077 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7079 /* Load the vtable slot, which contains a function descriptor. */
7080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7082 NEW_BBLOCK (cfg, non_null_bb);
7084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7085 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7089 // FIXME: Make the wrapper use the preserveall cconv
7090 // FIXME: Use one icall per slot for small slot numbers ?
7091 icall_args [0] = vtable_ins;
7092 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7093 /* Make the icall return the vtable slot value to save some code space */
7094 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7095 ins->dreg = slot_reg;
7096 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7099 MONO_START_BB (cfg, non_null_bb);
7100 /* Load the address + arg from the vtable slot */
7101 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7104 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7107 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7109 * A simple interface call
7111 * We make a call through an imt slot to obtain the function descriptor we need to call.
7112 * The imt slot contains a function descriptor for a runtime function + arg.
7114 int slot_reg = alloc_preg (cfg);
7115 int addr_reg = alloc_preg (cfg);
7116 int arg_reg = alloc_preg (cfg);
7117 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7119 vtable_reg = alloc_preg (cfg);
7120 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7121 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7124 * The slot is already initialized when the vtable is created so there is no need
7128 /* Load the imt slot, which contains a function descriptor. */
7129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7131 /* Load the address + arg of the imt thunk from the imt slot */
7132 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7133 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7135 * IMT thunks in llvm-only mode are C functions which take an info argument
7136 * plus the imt method and return the ftndesc to call.
7138 icall_args [0] = thunk_arg_ins;
7139 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7140 cmethod, MONO_RGCTX_INFO_METHOD);
7141 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7143 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7146 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7148 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7149 * dynamically extended as more instantiations are discovered.
7150 * This handles generic virtual methods both on classes and interfaces.
7152 int slot_reg = alloc_preg (cfg);
7153 int addr_reg = alloc_preg (cfg);
7154 int arg_reg = alloc_preg (cfg);
7155 int ftndesc_reg = alloc_preg (cfg);
7156 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7157 MonoBasicBlock *slowpath_bb, *end_bb;
7159 NEW_BBLOCK (cfg, slowpath_bb);
7160 NEW_BBLOCK (cfg, end_bb);
7162 vtable_reg = alloc_preg (cfg);
7163 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7165 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7167 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7169 /* Load the slot, which contains a function descriptor. */
7170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7172 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7173 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7174 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7178 /* Same as with iface calls */
7179 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7180 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7181 icall_args [0] = thunk_arg_ins;
7182 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7183 cmethod, MONO_RGCTX_INFO_METHOD);
7184 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7185 ftndesc_ins->dreg = ftndesc_reg;
7187 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7188 * they don't know about yet. Fall back to the slowpath in that case.
7190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7193 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7196 MONO_START_BB (cfg, slowpath_bb);
7197 icall_args [0] = vtable_ins;
7198 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7199 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7200 cmethod, MONO_RGCTX_INFO_METHOD);
7202 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7204 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7205 ftndesc_ins->dreg = ftndesc_reg;
7206 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7209 MONO_START_BB (cfg, end_bb);
7210 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7214 * Non-optimized cases
7216 icall_args [0] = sp [0];
7217 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7219 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7220 cmethod, MONO_RGCTX_INFO_METHOD);
7222 arg_reg = alloc_preg (cfg);
7223 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7224 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7226 g_assert (is_gsharedvt);
7228 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7230 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7233 * Pass the extra argument even if the callee doesn't receive it, most
7234 * calling conventions allow this.
7236 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7240 is_exception_class (MonoClass *klass)
7243 if (klass == mono_defaults.exception_class)
7245 klass = klass->parent;
7251 * is_jit_optimizer_disabled:
7253 * Determine whenever M's assembly has a DebuggableAttribute with the
7254 * IsJITOptimizerDisabled flag set.
7257 is_jit_optimizer_disabled (MonoMethod *m)
7260 MonoAssembly *ass = m->klass->image->assembly;
7261 MonoCustomAttrInfo* attrs;
7264 gboolean val = FALSE;
7267 if (ass->jit_optimizer_disabled_inited)
7268 return ass->jit_optimizer_disabled;
7270 klass = mono_class_try_get_debuggable_attribute_class ();
7274 ass->jit_optimizer_disabled = FALSE;
7275 mono_memory_barrier ();
7276 ass->jit_optimizer_disabled_inited = TRUE;
7280 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7281 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7283 for (i = 0; i < attrs->num_attrs; ++i) {
7284 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7286 MonoMethodSignature *sig;
7288 if (!attr->ctor || attr->ctor->klass != klass)
7290 /* Decode the attribute. See reflection.c */
7291 p = (const char*)attr->data;
7292 g_assert (read16 (p) == 0x0001);
7295 // FIXME: Support named parameters
7296 sig = mono_method_signature (attr->ctor);
7297 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7299 /* Two boolean arguments */
7303 mono_custom_attrs_free (attrs);
7306 ass->jit_optimizer_disabled = val;
7307 mono_memory_barrier ();
7308 ass->jit_optimizer_disabled_inited = TRUE;
7314 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7316 gboolean supported_tail_call;
7319 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7321 for (i = 0; i < fsig->param_count; ++i) {
7322 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7323 /* These can point to the current method's stack */
7324 supported_tail_call = FALSE;
7326 if (fsig->hasthis && cmethod->klass->valuetype)
7327 /* this might point to the current method's stack */
7328 supported_tail_call = FALSE;
7329 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7330 supported_tail_call = FALSE;
7331 if (cfg->method->save_lmf)
7332 supported_tail_call = FALSE;
7333 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7334 supported_tail_call = FALSE;
7335 if (call_opcode != CEE_CALL)
7336 supported_tail_call = FALSE;
7338 /* Debugging support */
7340 if (supported_tail_call) {
7341 if (!mono_debug_count ())
7342 supported_tail_call = FALSE;
7346 return supported_tail_call;
7352 * Handle calls made to ctors from NEWOBJ opcodes.
7355 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7356 MonoInst **sp, guint8 *ip, int *inline_costs)
7358 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7360 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7361 mono_method_is_generic_sharable (cmethod, TRUE)) {
7362 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7363 mono_class_vtable (cfg->domain, cmethod->klass);
7364 CHECK_TYPELOAD (cmethod->klass);
7366 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7367 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7370 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7371 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7373 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7375 CHECK_TYPELOAD (cmethod->klass);
7376 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7381 /* Avoid virtual calls to ctors if possible */
7382 if (mono_class_is_marshalbyref (cmethod->klass))
7383 callvirt_this_arg = sp [0];
7385 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7386 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7387 CHECK_CFG_EXCEPTION;
7388 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7389 mono_method_check_inlining (cfg, cmethod) &&
7390 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7393 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7394 cfg->real_offset += 5;
7396 *inline_costs += costs - 5;
7398 INLINE_FAILURE ("inline failure");
7399 // FIXME-VT: Clean this up
7400 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7401 GSHAREDVT_FAILURE(*ip);
7402 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7404 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7407 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7409 if (cfg->llvm_only) {
7410 // FIXME: Avoid initializing vtable_arg
7411 emit_llvmonly_calli (cfg, fsig, sp, addr);
7413 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7415 } else if (context_used &&
7416 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7417 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7418 MonoInst *cmethod_addr;
7420 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7422 if (cfg->llvm_only) {
7423 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7424 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7425 emit_llvmonly_calli (cfg, fsig, sp, addr);
7427 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7428 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7430 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7433 INLINE_FAILURE ("ctor call");
7434 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7435 callvirt_this_arg, NULL, vtable_arg);
7442 emit_setret (MonoCompile *cfg, MonoInst *val)
7444 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7447 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7450 if (!cfg->vret_addr) {
7451 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7453 EMIT_NEW_RETLOADA (cfg, ret_addr);
7455 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7456 ins->klass = mono_class_from_mono_type (ret_type);
7459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7460 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7461 MonoInst *iargs [1];
7465 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7466 mono_arch_emit_setret (cfg, cfg->method, conv);
7468 mono_arch_emit_setret (cfg, cfg->method, val);
7471 mono_arch_emit_setret (cfg, cfg->method, val);
7477 * mono_method_to_ir:
7479 * Translate the .net IL into linear IR.
7481 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7482 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7483 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7484 * @inline_args: if not NULL, contains the arguments to the inline call
7485 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7486 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7488 * This method is used to turn ECMA IL into Mono's internal Linear IR
7489 * reprensetation. It is used both for entire methods, as well as
7490 * inlining existing methods. In the former case, the @start_bblock,
7491 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7492 * inline_offset is set to zero.
7494 * Returns: the inline cost, or -1 if there was an error processing this method.
7497 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7498 MonoInst *return_var, MonoInst **inline_args,
7499 guint inline_offset, gboolean is_virtual_call)
7502 MonoInst *ins, **sp, **stack_start;
7503 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7504 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7505 MonoMethod *cmethod, *method_definition;
7506 MonoInst **arg_array;
7507 MonoMethodHeader *header;
7509 guint32 token, ins_flag;
7511 MonoClass *constrained_class = NULL;
7512 unsigned char *ip, *end, *target, *err_pos;
7513 MonoMethodSignature *sig;
7514 MonoGenericContext *generic_context = NULL;
7515 MonoGenericContainer *generic_container = NULL;
7516 MonoType **param_types;
7517 int i, n, start_new_bblock, dreg;
7518 int num_calls = 0, inline_costs = 0;
7519 int breakpoint_id = 0;
7521 GSList *class_inits = NULL;
7522 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7524 gboolean init_locals, seq_points, skip_dead_blocks;
7525 gboolean sym_seq_points = FALSE;
7526 MonoDebugMethodInfo *minfo;
7527 MonoBitSet *seq_point_locs = NULL;
7528 MonoBitSet *seq_point_set_locs = NULL;
7530 cfg->disable_inline = is_jit_optimizer_disabled (method);
7532 /* serialization and xdomain stuff may need access to private fields and methods */
7533 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7534 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7535 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7536 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7537 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7538 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7540 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7541 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7542 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7543 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7544 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7546 image = method->klass->image;
7547 header = mono_method_get_header_checked (method, &cfg->error);
7549 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7550 goto exception_exit;
7552 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7555 generic_container = mono_method_get_generic_container (method);
7556 sig = mono_method_signature (method);
7557 num_args = sig->hasthis + sig->param_count;
7558 ip = (unsigned char*)header->code;
7559 cfg->cil_start = ip;
7560 end = ip + header->code_size;
7561 cfg->stat_cil_code_size += header->code_size;
7563 seq_points = cfg->gen_seq_points && cfg->method == method;
7565 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7566 /* We could hit a seq point before attaching to the JIT (#8338) */
7570 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7571 minfo = mono_debug_lookup_method (method);
7573 MonoSymSeqPoint *sps;
7574 int i, n_il_offsets;
7576 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7577 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7578 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7579 sym_seq_points = TRUE;
7580 for (i = 0; i < n_il_offsets; ++i) {
7581 if (sps [i].il_offset < header->code_size)
7582 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7586 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7588 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7590 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7591 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7593 mono_debug_free_method_async_debug_info (asyncMethod);
7595 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7596 /* Methods without line number info like auto-generated property accessors */
7597 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7598 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7599 sym_seq_points = TRUE;
7604 * Methods without init_locals set could cause asserts in various passes
7605 * (#497220). To work around this, we emit dummy initialization opcodes
7606 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7607 * on some platforms.
7609 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7610 init_locals = header->init_locals;
7614 method_definition = method;
7615 while (method_definition->is_inflated) {
7616 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7617 method_definition = imethod->declaring;
7620 /* SkipVerification is not allowed if core-clr is enabled */
7621 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7623 dont_verify_stloc = TRUE;
7626 if (sig->is_inflated)
7627 generic_context = mono_method_get_context (method);
7628 else if (generic_container)
7629 generic_context = &generic_container->context;
7630 cfg->generic_context = generic_context;
7633 g_assert (!sig->has_type_parameters);
7635 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7636 g_assert (method->is_inflated);
7637 g_assert (mono_method_get_context (method)->method_inst);
7639 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7640 g_assert (sig->generic_param_count);
7642 if (cfg->method == method) {
7643 cfg->real_offset = 0;
7645 cfg->real_offset = inline_offset;
7648 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7649 cfg->cil_offset_to_bb_len = header->code_size;
7651 cfg->current_method = method;
7653 if (cfg->verbose_level > 2)
7654 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7656 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7658 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7659 for (n = 0; n < sig->param_count; ++n)
7660 param_types [n + sig->hasthis] = sig->params [n];
7661 cfg->arg_types = param_types;
7663 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7664 if (cfg->method == method) {
7666 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7667 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7670 NEW_BBLOCK (cfg, start_bblock);
7671 cfg->bb_entry = start_bblock;
7672 start_bblock->cil_code = NULL;
7673 start_bblock->cil_length = 0;
7676 NEW_BBLOCK (cfg, end_bblock);
7677 cfg->bb_exit = end_bblock;
7678 end_bblock->cil_code = NULL;
7679 end_bblock->cil_length = 0;
7680 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7681 g_assert (cfg->num_bblocks == 2);
7683 arg_array = cfg->args;
7685 if (header->num_clauses) {
7686 cfg->spvars = g_hash_table_new (NULL, NULL);
7687 cfg->exvars = g_hash_table_new (NULL, NULL);
7689 /* handle exception clauses */
7690 for (i = 0; i < header->num_clauses; ++i) {
7691 MonoBasicBlock *try_bb;
7692 MonoExceptionClause *clause = &header->clauses [i];
7693 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7695 try_bb->real_offset = clause->try_offset;
7696 try_bb->try_start = TRUE;
7697 try_bb->region = ((i + 1) << 8) | clause->flags;
7698 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7699 tblock->real_offset = clause->handler_offset;
7700 tblock->flags |= BB_EXCEPTION_HANDLER;
7703 * Linking the try block with the EH block hinders inlining as we won't be able to
7704 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7706 if (COMPILE_LLVM (cfg))
7707 link_bblock (cfg, try_bb, tblock);
7709 if (*(ip + clause->handler_offset) == CEE_POP)
7710 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7712 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7713 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7714 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7715 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7716 MONO_ADD_INS (tblock, ins);
7718 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7719 /* finally clauses already have a seq point */
7720 /* seq points for filter clauses are emitted below */
7721 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7722 MONO_ADD_INS (tblock, ins);
7725 /* todo: is a fault block unsafe to optimize? */
7726 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7727 tblock->flags |= BB_EXCEPTION_UNSAFE;
7730 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7732 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7734 /* catch and filter blocks get the exception object on the stack */
7735 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7736 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7738 /* mostly like handle_stack_args (), but just sets the input args */
7739 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7740 tblock->in_scount = 1;
7741 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7742 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7746 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7747 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7748 if (!cfg->compile_llvm) {
7749 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7750 ins->dreg = tblock->in_stack [0]->dreg;
7751 MONO_ADD_INS (tblock, ins);
7754 MonoInst *dummy_use;
7757 * Add a dummy use for the exvar so its liveness info will be
7760 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7763 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7764 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7765 MONO_ADD_INS (tblock, ins);
7768 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7769 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7770 tblock->flags |= BB_EXCEPTION_HANDLER;
7771 tblock->real_offset = clause->data.filter_offset;
7772 tblock->in_scount = 1;
7773 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7774 /* The filter block shares the exvar with the handler block */
7775 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7776 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7777 MONO_ADD_INS (tblock, ins);
7781 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7782 clause->data.catch_class &&
7784 mono_class_check_context_used (clause->data.catch_class)) {
7786 * In shared generic code with catch
7787 * clauses containing type variables
7788 * the exception handling code has to
7789 * be able to get to the rgctx.
7790 * Therefore we have to make sure that
7791 * the vtable/mrgctx argument (for
7792 * static or generic methods) or the
7793 * "this" argument (for non-static
7794 * methods) are live.
7796 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7797 mini_method_get_context (method)->method_inst ||
7798 method->klass->valuetype) {
7799 mono_get_vtable_var (cfg);
7801 MonoInst *dummy_use;
7803 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7808 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7809 cfg->cbb = start_bblock;
7810 cfg->args = arg_array;
7811 mono_save_args (cfg, sig, inline_args);
7814 /* FIRST CODE BLOCK */
7815 NEW_BBLOCK (cfg, tblock);
7816 tblock->cil_code = ip;
7820 ADD_BBLOCK (cfg, tblock);
7822 if (cfg->method == method) {
7823 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7824 if (breakpoint_id) {
7825 MONO_INST_NEW (cfg, ins, OP_BREAK);
7826 MONO_ADD_INS (cfg->cbb, ins);
7830 /* we use a separate basic block for the initialization code */
7831 NEW_BBLOCK (cfg, init_localsbb);
7832 if (cfg->method == method)
7833 cfg->bb_init = init_localsbb;
7834 init_localsbb->real_offset = cfg->real_offset;
7835 start_bblock->next_bb = init_localsbb;
7836 init_localsbb->next_bb = cfg->cbb;
7837 link_bblock (cfg, start_bblock, init_localsbb);
7838 link_bblock (cfg, init_localsbb, cfg->cbb);
7840 cfg->cbb = init_localsbb;
7842 if (cfg->gsharedvt && cfg->method == method) {
7843 MonoGSharedVtMethodInfo *info;
7844 MonoInst *var, *locals_var;
7847 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7848 info->method = cfg->method;
7849 info->count_entries = 16;
7850 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7851 cfg->gsharedvt_info = info;
7853 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7854 /* prevent it from being register allocated */
7855 //var->flags |= MONO_INST_VOLATILE;
7856 cfg->gsharedvt_info_var = var;
7858 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7859 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7861 /* Allocate locals */
7862 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7863 /* prevent it from being register allocated */
7864 //locals_var->flags |= MONO_INST_VOLATILE;
7865 cfg->gsharedvt_locals_var = locals_var;
7867 dreg = alloc_ireg (cfg);
7868 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7870 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7871 ins->dreg = locals_var->dreg;
7873 MONO_ADD_INS (cfg->cbb, ins);
7874 cfg->gsharedvt_locals_var_ins = ins;
7876 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7879 ins->flags |= MONO_INST_INIT;
7883 if (mono_security_core_clr_enabled ()) {
7884 /* check if this is native code, e.g. an icall or a p/invoke */
7885 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7886 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7888 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7889 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7891 /* if this ia a native call then it can only be JITted from platform code */
7892 if ((icall || pinvk) && method->klass && method->klass->image) {
7893 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7894 MonoException *ex = icall ? mono_get_exception_security () :
7895 mono_get_exception_method_access ();
7896 emit_throw_exception (cfg, ex);
7903 CHECK_CFG_EXCEPTION;
7905 if (header->code_size == 0)
7908 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7913 if (cfg->method == method)
7914 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7916 for (n = 0; n < header->num_locals; ++n) {
7917 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7922 /* We force the vtable variable here for all shared methods
7923 for the possibility that they might show up in a stack
7924 trace where their exact instantiation is needed. */
7925 if (cfg->gshared && method == cfg->method) {
7926 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7927 mini_method_get_context (method)->method_inst ||
7928 method->klass->valuetype) {
7929 mono_get_vtable_var (cfg);
7931 /* FIXME: Is there a better way to do this?
7932 We need the variable live for the duration
7933 of the whole method. */
7934 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7938 /* add a check for this != NULL to inlined methods */
7939 if (is_virtual_call) {
7942 NEW_ARGLOAD (cfg, arg_ins, 0);
7943 MONO_ADD_INS (cfg->cbb, arg_ins);
7944 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7947 skip_dead_blocks = !dont_verify;
7948 if (skip_dead_blocks) {
7949 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7954 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7955 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7958 start_new_bblock = 0;
7960 if (cfg->method == method)
7961 cfg->real_offset = ip - header->code;
7963 cfg->real_offset = inline_offset;
7968 if (start_new_bblock) {
7969 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7970 if (start_new_bblock == 2) {
7971 g_assert (ip == tblock->cil_code);
7973 GET_BBLOCK (cfg, tblock, ip);
7975 cfg->cbb->next_bb = tblock;
7977 start_new_bblock = 0;
7978 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7979 if (cfg->verbose_level > 3)
7980 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7981 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7985 g_slist_free (class_inits);
7988 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7989 link_bblock (cfg, cfg->cbb, tblock);
7990 if (sp != stack_start) {
7991 handle_stack_args (cfg, stack_start, sp - stack_start);
7993 CHECK_UNVERIFIABLE (cfg);
7995 cfg->cbb->next_bb = tblock;
7997 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7998 if (cfg->verbose_level > 3)
7999 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8000 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8003 g_slist_free (class_inits);
8008 if (skip_dead_blocks) {
8009 int ip_offset = ip - header->code;
8011 if (ip_offset == bb->end)
8015 int op_size = mono_opcode_size (ip, end);
8016 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8018 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8020 if (ip_offset + op_size == bb->end) {
8021 MONO_INST_NEW (cfg, ins, OP_NOP);
8022 MONO_ADD_INS (cfg->cbb, ins);
8023 start_new_bblock = 1;
8031 * Sequence points are points where the debugger can place a breakpoint.
8032 * Currently, we generate these automatically at points where the IL
8035 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8037 * Make methods interruptable at the beginning, and at the targets of
8038 * backward branches.
8039 * Also, do this at the start of every bblock in methods with clauses too,
8040 * to be able to handle instructions with inprecise control flow like
8042 * Backward branches are handled at the end of method-to-ir ().
8044 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8045 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8047 /* Avoid sequence points on empty IL like .volatile */
8048 // FIXME: Enable this
8049 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8050 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8051 if ((sp != stack_start) && !sym_seq_point)
8052 ins->flags |= MONO_INST_NONEMPTY_STACK;
8053 MONO_ADD_INS (cfg->cbb, ins);
8056 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8059 cfg->cbb->real_offset = cfg->real_offset;
8061 if ((cfg->method == method) && cfg->coverage_info) {
8062 guint32 cil_offset = ip - header->code;
8063 cfg->coverage_info->data [cil_offset].cil_code = ip;
8065 /* TODO: Use an increment here */
8066 #if defined(TARGET_X86)
8067 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8068 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8070 MONO_ADD_INS (cfg->cbb, ins);
8072 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8073 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8077 if (cfg->verbose_level > 3)
8078 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8082 if (seq_points && !sym_seq_points && sp != stack_start) {
8084 * The C# compiler uses these nops to notify the JIT that it should
8085 * insert seq points.
8087 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8088 MONO_ADD_INS (cfg->cbb, ins);
8090 if (cfg->keep_cil_nops)
8091 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8093 MONO_INST_NEW (cfg, ins, OP_NOP);
8095 MONO_ADD_INS (cfg->cbb, ins);
8098 if (should_insert_brekpoint (cfg->method)) {
8099 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8101 MONO_INST_NEW (cfg, ins, OP_NOP);
8104 MONO_ADD_INS (cfg->cbb, ins);
8110 CHECK_STACK_OVF (1);
8111 n = (*ip)-CEE_LDARG_0;
8113 EMIT_NEW_ARGLOAD (cfg, ins, n);
8121 CHECK_STACK_OVF (1);
8122 n = (*ip)-CEE_LDLOC_0;
8124 EMIT_NEW_LOCLOAD (cfg, ins, n);
8133 n = (*ip)-CEE_STLOC_0;
8136 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8138 emit_stloc_ir (cfg, sp, header, n);
8145 CHECK_STACK_OVF (1);
8148 EMIT_NEW_ARGLOAD (cfg, ins, n);
8154 CHECK_STACK_OVF (1);
8157 NEW_ARGLOADA (cfg, ins, n);
8158 MONO_ADD_INS (cfg->cbb, ins);
8168 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8170 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8175 CHECK_STACK_OVF (1);
8178 EMIT_NEW_LOCLOAD (cfg, ins, n);
8182 case CEE_LDLOCA_S: {
8183 unsigned char *tmp_ip;
8185 CHECK_STACK_OVF (1);
8186 CHECK_LOCAL (ip [1]);
8188 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8194 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8203 CHECK_LOCAL (ip [1]);
8204 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8206 emit_stloc_ir (cfg, sp, header, ip [1]);
8211 CHECK_STACK_OVF (1);
8212 EMIT_NEW_PCONST (cfg, ins, NULL);
8213 ins->type = STACK_OBJ;
8218 CHECK_STACK_OVF (1);
8219 EMIT_NEW_ICONST (cfg, ins, -1);
8232 CHECK_STACK_OVF (1);
8233 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8239 CHECK_STACK_OVF (1);
8241 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8247 CHECK_STACK_OVF (1);
8248 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8254 CHECK_STACK_OVF (1);
8255 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8256 ins->type = STACK_I8;
8257 ins->dreg = alloc_dreg (cfg, STACK_I8);
8259 ins->inst_l = (gint64)read64 (ip);
8260 MONO_ADD_INS (cfg->cbb, ins);
8266 gboolean use_aotconst = FALSE;
8268 #ifdef TARGET_POWERPC
8269 /* FIXME: Clean this up */
8270 if (cfg->compile_aot)
8271 use_aotconst = TRUE;
8274 /* FIXME: we should really allocate this only late in the compilation process */
8275 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8277 CHECK_STACK_OVF (1);
8283 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8285 dreg = alloc_freg (cfg);
8286 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8287 ins->type = cfg->r4_stack_type;
8289 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8290 ins->type = cfg->r4_stack_type;
8291 ins->dreg = alloc_dreg (cfg, STACK_R8);
8293 MONO_ADD_INS (cfg->cbb, ins);
8303 gboolean use_aotconst = FALSE;
8305 #ifdef TARGET_POWERPC
8306 /* FIXME: Clean this up */
8307 if (cfg->compile_aot)
8308 use_aotconst = TRUE;
8311 /* FIXME: we should really allocate this only late in the compilation process */
8312 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8314 CHECK_STACK_OVF (1);
8320 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8322 dreg = alloc_freg (cfg);
8323 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8324 ins->type = STACK_R8;
8326 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8327 ins->type = STACK_R8;
8328 ins->dreg = alloc_dreg (cfg, STACK_R8);
8330 MONO_ADD_INS (cfg->cbb, ins);
8339 MonoInst *temp, *store;
8341 CHECK_STACK_OVF (1);
8345 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8346 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8348 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8351 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8364 if (sp [0]->type == STACK_R8)
8365 /* we need to pop the value from the x86 FP stack */
8366 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8371 MonoMethodSignature *fsig;
8374 INLINE_FAILURE ("jmp");
8375 GSHAREDVT_FAILURE (*ip);
8378 if (stack_start != sp)
8380 token = read32 (ip + 1);
8381 /* FIXME: check the signature matches */
8382 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8385 if (cfg->gshared && mono_method_check_context_used (cmethod))
8386 GENERIC_SHARING_FAILURE (CEE_JMP);
8388 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8390 fsig = mono_method_signature (cmethod);
8391 n = fsig->param_count + fsig->hasthis;
8392 if (cfg->llvm_only) {
8395 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8396 for (i = 0; i < n; ++i)
8397 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8398 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8400 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8401 * have to emit a normal return since llvm expects it.
8404 emit_setret (cfg, ins);
8405 MONO_INST_NEW (cfg, ins, OP_BR);
8406 ins->inst_target_bb = end_bblock;
8407 MONO_ADD_INS (cfg->cbb, ins);
8408 link_bblock (cfg, cfg->cbb, end_bblock);
8411 } else if (cfg->backend->have_op_tail_call) {
8412 /* Handle tail calls similarly to calls */
8415 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8416 call->method = cmethod;
8417 call->tail_call = TRUE;
8418 call->signature = mono_method_signature (cmethod);
8419 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8420 call->inst.inst_p0 = cmethod;
8421 for (i = 0; i < n; ++i)
8422 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8424 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8425 call->vret_var = cfg->vret_addr;
8427 mono_arch_emit_call (cfg, call);
8428 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8429 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8431 for (i = 0; i < num_args; ++i)
8432 /* Prevent arguments from being optimized away */
8433 arg_array [i]->flags |= MONO_INST_VOLATILE;
8435 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8436 ins = (MonoInst*)call;
8437 ins->inst_p0 = cmethod;
8438 MONO_ADD_INS (cfg->cbb, ins);
8442 start_new_bblock = 1;
8447 MonoMethodSignature *fsig;
8450 token = read32 (ip + 1);
8454 //GSHAREDVT_FAILURE (*ip);
8459 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8462 if (method->dynamic && fsig->pinvoke) {
8466 * This is a call through a function pointer using a pinvoke
8467 * signature. Have to create a wrapper and call that instead.
8468 * FIXME: This is very slow, need to create a wrapper at JIT time
8469 * instead based on the signature.
8471 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8472 EMIT_NEW_PCONST (cfg, args [1], fsig);
8474 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8477 n = fsig->param_count + fsig->hasthis;
8481 //g_assert (!virtual_ || fsig->hasthis);
8485 inline_costs += 10 * num_calls++;
8488 * Making generic calls out of gsharedvt methods.
8489 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8490 * patching gshared method addresses into a gsharedvt method.
8492 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8494 * We pass the address to the gsharedvt trampoline in the rgctx reg
8496 MonoInst *callee = addr;
8498 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8500 GSHAREDVT_FAILURE (*ip);
8504 GSHAREDVT_FAILURE (*ip);
8506 addr = emit_get_rgctx_sig (cfg, context_used,
8507 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8508 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8512 /* Prevent inlining of methods with indirect calls */
8513 INLINE_FAILURE ("indirect call");
8515 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8516 MonoJumpInfoType info_type;
8520 * Instead of emitting an indirect call, emit a direct call
8521 * with the contents of the aotconst as the patch info.
8523 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8524 info_type = (MonoJumpInfoType)addr->inst_c1;
8525 info_data = addr->inst_p0;
8527 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8528 info_data = addr->inst_right->inst_left;
8531 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8532 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8535 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8536 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8541 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8545 /* End of call, INS should contain the result of the call, if any */
8547 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8549 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8552 CHECK_CFG_EXCEPTION;
8556 constrained_class = NULL;
8560 case CEE_CALLVIRT: {
8561 MonoInst *addr = NULL;
8562 MonoMethodSignature *fsig = NULL;
8564 int virtual_ = *ip == CEE_CALLVIRT;
8565 gboolean pass_imt_from_rgctx = FALSE;
8566 MonoInst *imt_arg = NULL;
8567 MonoInst *keep_this_alive = NULL;
8568 gboolean pass_vtable = FALSE;
8569 gboolean pass_mrgctx = FALSE;
8570 MonoInst *vtable_arg = NULL;
8571 gboolean check_this = FALSE;
8572 gboolean supported_tail_call = FALSE;
8573 gboolean tail_call = FALSE;
8574 gboolean need_seq_point = FALSE;
8575 guint32 call_opcode = *ip;
8576 gboolean emit_widen = TRUE;
8577 gboolean push_res = TRUE;
8578 gboolean skip_ret = FALSE;
8579 gboolean delegate_invoke = FALSE;
8580 gboolean direct_icall = FALSE;
8581 gboolean constrained_partial_call = FALSE;
8582 MonoMethod *cil_method;
8585 token = read32 (ip + 1);
8589 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8592 cil_method = cmethod;
8594 if (constrained_class) {
8595 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8596 if (!mini_is_gsharedvt_klass (constrained_class)) {
8597 g_assert (!cmethod->klass->valuetype);
8598 if (!mini_type_is_reference (&constrained_class->byval_arg))
8599 constrained_partial_call = TRUE;
8603 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8604 if (cfg->verbose_level > 2)
8605 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8606 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8607 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8609 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8613 if (cfg->verbose_level > 2)
8614 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8616 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8618 * This is needed since get_method_constrained can't find
8619 * the method in klass representing a type var.
8620 * The type var is guaranteed to be a reference type in this
8623 if (!mini_is_gsharedvt_klass (constrained_class))
8624 g_assert (!cmethod->klass->valuetype);
8626 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8631 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8632 /* Use the corresponding method from the base type to avoid boxing */
8633 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8634 g_assert (base_type);
8635 constrained_class = mono_class_from_mono_type (base_type);
8636 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8641 if (!dont_verify && !cfg->skip_visibility) {
8642 MonoMethod *target_method = cil_method;
8643 if (method->is_inflated) {
8644 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8647 if (!mono_method_can_access_method (method_definition, target_method) &&
8648 !mono_method_can_access_method (method, cil_method))
8649 emit_method_access_failure (cfg, method, cil_method);
8652 if (mono_security_core_clr_enabled ())
8653 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8655 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8656 /* MS.NET seems to silently convert this to a callvirt */
8661 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8662 * converts to a callvirt.
8664 * tests/bug-515884.il is an example of this behavior
8666 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8667 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8668 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8672 if (!cmethod->klass->inited)
8673 if (!mono_class_init (cmethod->klass))
8674 TYPE_LOAD_ERROR (cmethod->klass);
8676 fsig = mono_method_signature (cmethod);
8679 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8680 mini_class_is_system_array (cmethod->klass)) {
8681 array_rank = cmethod->klass->rank;
8682 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8683 direct_icall = TRUE;
8684 } else if (fsig->pinvoke) {
8685 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8686 fsig = mono_method_signature (wrapper);
8687 } else if (constrained_class) {
8689 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8693 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8694 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8696 /* See code below */
8697 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8698 MonoBasicBlock *tbb;
8700 GET_BBLOCK (cfg, tbb, ip + 5);
8701 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8703 * We want to extend the try block to cover the call, but we can't do it if the
8704 * call is made directly since its followed by an exception check.
8706 direct_icall = FALSE;
8710 mono_save_token_info (cfg, image, token, cil_method);
8712 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8713 need_seq_point = TRUE;
8715 /* Don't support calls made using type arguments for now */
8717 if (cfg->gsharedvt) {
8718 if (mini_is_gsharedvt_signature (fsig))
8719 GSHAREDVT_FAILURE (*ip);
8723 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8724 g_assert_not_reached ();
8726 n = fsig->param_count + fsig->hasthis;
8728 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8732 g_assert (!mono_method_check_context_used (cmethod));
8736 //g_assert (!virtual_ || fsig->hasthis);
8741 * We have the `constrained.' prefix opcode.
8743 if (constrained_class) {
8744 if (mini_is_gsharedvt_klass (constrained_class)) {
8745 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8746 /* The 'Own method' case below */
8747 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8748 /* 'The type parameter is instantiated as a reference type' case below. */
8750 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8751 CHECK_CFG_EXCEPTION;
8757 if (constrained_partial_call) {
8758 gboolean need_box = TRUE;
8761 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8762 * called method is not known at compile time either. The called method could end up being
8763 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8764 * to box the receiver.
8765 * A simple solution would be to box always and make a normal virtual call, but that would
8766 * be bad performance wise.
8768 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8770 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8775 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8776 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8777 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8778 ins->klass = constrained_class;
8779 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8780 CHECK_CFG_EXCEPTION;
8781 } else if (need_box) {
8783 MonoBasicBlock *is_ref_bb, *end_bb;
8784 MonoInst *nonbox_call;
8787 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8789 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8790 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8792 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8794 NEW_BBLOCK (cfg, is_ref_bb);
8795 NEW_BBLOCK (cfg, end_bb);
8797 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8802 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8807 MONO_START_BB (cfg, is_ref_bb);
8808 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8809 ins->klass = constrained_class;
8810 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8811 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8815 MONO_START_BB (cfg, end_bb);
8818 nonbox_call->dreg = ins->dreg;
8821 g_assert (mono_class_is_interface (cmethod->klass));
8822 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8823 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8826 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8828 * The type parameter is instantiated as a valuetype,
8829 * but that type doesn't override the method we're
8830 * calling, so we need to box `this'.
8832 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8833 ins->klass = constrained_class;
8834 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8835 CHECK_CFG_EXCEPTION;
8836 } else if (!constrained_class->valuetype) {
8837 int dreg = alloc_ireg_ref (cfg);
8840 * The type parameter is instantiated as a reference
8841 * type. We have a managed pointer on the stack, so
8842 * we need to dereference it here.
8844 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8845 ins->type = STACK_OBJ;
8848 if (cmethod->klass->valuetype) {
8851 /* Interface method */
8854 mono_class_setup_vtable (constrained_class);
8855 CHECK_TYPELOAD (constrained_class);
8856 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8858 TYPE_LOAD_ERROR (constrained_class);
8859 slot = mono_method_get_vtable_slot (cmethod);
8861 TYPE_LOAD_ERROR (cmethod->klass);
8862 cmethod = constrained_class->vtable [ioffset + slot];
8864 if (cmethod->klass == mono_defaults.enum_class) {
8865 /* Enum implements some interfaces, so treat this as the first case */
8866 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8867 ins->klass = constrained_class;
8868 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8869 CHECK_CFG_EXCEPTION;
8874 constrained_class = NULL;
8877 if (check_call_signature (cfg, fsig, sp))
8880 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8881 delegate_invoke = TRUE;
8883 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8884 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8885 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8893 * If the callee is a shared method, then its static cctor
8894 * might not get called after the call was patched.
8896 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8897 emit_class_init (cfg, cmethod->klass);
8898 CHECK_TYPELOAD (cmethod->klass);
8901 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8904 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8906 context_used = mini_method_check_context_used (cfg, cmethod);
8908 if (context_used && mono_class_is_interface (cmethod->klass)) {
8909 /* Generic method interface
8910 calls are resolved via a
8911 helper function and don't
8913 if (!cmethod_context || !cmethod_context->method_inst)
8914 pass_imt_from_rgctx = TRUE;
8918 * If a shared method calls another
8919 * shared method then the caller must
8920 * have a generic sharing context
8921 * because the magic trampoline
8922 * requires it. FIXME: We shouldn't
8923 * have to force the vtable/mrgctx
8924 * variable here. Instead there
8925 * should be a flag in the cfg to
8926 * request a generic sharing context.
8929 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8930 mono_get_vtable_var (cfg);
8935 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8937 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8939 CHECK_TYPELOAD (cmethod->klass);
8940 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8945 g_assert (!vtable_arg);
8947 if (!cfg->compile_aot) {
8949 * emit_get_rgctx_method () calls mono_class_vtable () so check
8950 * for type load errors before.
8952 mono_class_setup_vtable (cmethod->klass);
8953 CHECK_TYPELOAD (cmethod->klass);
8956 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8958 /* !marshalbyref is needed to properly handle generic methods + remoting */
8959 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8960 MONO_METHOD_IS_FINAL (cmethod)) &&
8961 !mono_class_is_marshalbyref (cmethod->klass)) {
8968 if (pass_imt_from_rgctx) {
8969 g_assert (!pass_vtable);
8971 imt_arg = emit_get_rgctx_method (cfg, context_used,
8972 cmethod, MONO_RGCTX_INFO_METHOD);
8976 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8978 /* Calling virtual generic methods */
8979 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8980 !(MONO_METHOD_IS_FINAL (cmethod) &&
8981 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8982 fsig->generic_param_count &&
8983 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8985 MonoInst *this_temp, *this_arg_temp, *store;
8986 MonoInst *iargs [4];
8988 g_assert (fsig->is_inflated);
8990 /* Prevent inlining of methods that contain indirect calls */
8991 INLINE_FAILURE ("virtual generic call");
8993 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8994 GSHAREDVT_FAILURE (*ip);
8996 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8997 g_assert (!imt_arg);
8999 g_assert (cmethod->is_inflated);
9000 imt_arg = emit_get_rgctx_method (cfg, context_used,
9001 cmethod, MONO_RGCTX_INFO_METHOD);
9002 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9004 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9005 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9006 MONO_ADD_INS (cfg->cbb, store);
9008 /* FIXME: This should be a managed pointer */
9009 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9011 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9012 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9013 cmethod, MONO_RGCTX_INFO_METHOD);
9014 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9015 addr = mono_emit_jit_icall (cfg,
9016 mono_helper_compile_generic_method, iargs);
9018 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9020 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9027 * Implement a workaround for the inherent races involved in locking:
9033 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9034 * try block, the Exit () won't be executed, see:
9035 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9036 * To work around this, we extend such try blocks to include the last x bytes
9037 * of the Monitor.Enter () call.
9039 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9040 MonoBasicBlock *tbb;
9042 GET_BBLOCK (cfg, tbb, ip + 5);
9044 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9045 * from Monitor.Enter like ArgumentNullException.
9047 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9048 /* Mark this bblock as needing to be extended */
9049 tbb->extend_try_block = TRUE;
9053 /* Conversion to a JIT intrinsic */
9054 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9055 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9056 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9064 if ((cfg->opt & MONO_OPT_INLINE) &&
9065 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9066 mono_method_check_inlining (cfg, cmethod)) {
9068 gboolean always = FALSE;
9070 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9071 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9072 /* Prevent inlining of methods that call wrappers */
9073 INLINE_FAILURE ("wrapper call");
9074 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9078 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9080 cfg->real_offset += 5;
9082 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9083 /* *sp is already set by inline_method */
9088 inline_costs += costs;
9094 /* Tail recursion elimination */
9095 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9096 gboolean has_vtargs = FALSE;
9099 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9100 INLINE_FAILURE ("tail call");
9102 /* keep it simple */
9103 for (i = fsig->param_count - 1; i >= 0; i--) {
9104 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9109 if (need_seq_point) {
9110 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9111 need_seq_point = FALSE;
9113 for (i = 0; i < n; ++i)
9114 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9115 MONO_INST_NEW (cfg, ins, OP_BR);
9116 MONO_ADD_INS (cfg->cbb, ins);
9117 tblock = start_bblock->out_bb [0];
9118 link_bblock (cfg, cfg->cbb, tblock);
9119 ins->inst_target_bb = tblock;
9120 start_new_bblock = 1;
9122 /* skip the CEE_RET, too */
9123 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9130 inline_costs += 10 * num_calls++;
9133 * Synchronized wrappers.
9134 * Its hard to determine where to replace a method with its synchronized
9135 * wrapper without causing an infinite recursion. The current solution is
9136 * to add the synchronized wrapper in the trampolines, and to
9137 * change the called method to a dummy wrapper, and resolve that wrapper
9138 * to the real method in mono_jit_compile_method ().
9140 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9141 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9142 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9143 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9147 * Making generic calls out of gsharedvt methods.
9148 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9149 * patching gshared method addresses into a gsharedvt method.
9151 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9152 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9153 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9154 MonoRgctxInfoType info_type;
9157 //if (mono_class_is_interface (cmethod->klass))
9158 //GSHAREDVT_FAILURE (*ip);
9159 // disable for possible remoting calls
9160 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9161 GSHAREDVT_FAILURE (*ip);
9162 if (fsig->generic_param_count) {
9163 /* virtual generic call */
9164 g_assert (!imt_arg);
9165 /* Same as the virtual generic case above */
9166 imt_arg = emit_get_rgctx_method (cfg, context_used,
9167 cmethod, MONO_RGCTX_INFO_METHOD);
9168 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9170 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9171 /* This can happen when we call a fully instantiated iface method */
9172 imt_arg = emit_get_rgctx_method (cfg, context_used,
9173 cmethod, MONO_RGCTX_INFO_METHOD);
9178 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9179 keep_this_alive = sp [0];
9181 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9182 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9184 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9185 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9187 if (cfg->llvm_only) {
9188 // FIXME: Avoid initializing vtable_arg
9189 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9191 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9196 /* Generic sharing */
9199 * Use this if the callee is gsharedvt sharable too, since
9200 * at runtime we might find an instantiation so the call cannot
9201 * be patched (the 'no_patch' code path in mini-trampolines.c).
9203 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9204 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9205 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9206 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9207 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9208 INLINE_FAILURE ("gshared");
9210 g_assert (cfg->gshared && cmethod);
9214 * We are compiling a call to a
9215 * generic method from shared code,
9216 * which means that we have to look up
9217 * the method in the rgctx and do an
9221 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9223 if (cfg->llvm_only) {
9224 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9225 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9227 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9228 // FIXME: Avoid initializing imt_arg/vtable_arg
9229 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9231 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9232 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9237 /* Direct calls to icalls */
9239 MonoMethod *wrapper;
9242 /* Inline the wrapper */
9243 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9245 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9246 g_assert (costs > 0);
9247 cfg->real_offset += 5;
9249 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9250 /* *sp is already set by inline_method */
9255 inline_costs += costs;
9264 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9265 MonoInst *val = sp [fsig->param_count];
9267 if (val->type == STACK_OBJ) {
9268 MonoInst *iargs [2];
9273 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9276 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9277 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9278 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9279 emit_write_barrier (cfg, addr, val);
9280 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9281 GSHAREDVT_FAILURE (*ip);
9282 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9283 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9285 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9286 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9287 if (!cmethod->klass->element_class->valuetype && !readonly)
9288 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9289 CHECK_TYPELOAD (cmethod->klass);
9292 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9295 g_assert_not_reached ();
9302 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9306 /* Tail prefix / tail call optimization */
9308 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9309 /* FIXME: runtime generic context pointer for jumps? */
9310 /* FIXME: handle this for generic sharing eventually */
9311 if ((ins_flag & MONO_INST_TAILCALL) &&
9312 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9313 supported_tail_call = TRUE;
9315 if (supported_tail_call) {
9318 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9319 INLINE_FAILURE ("tail call");
9321 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9323 if (cfg->backend->have_op_tail_call) {
9324 /* Handle tail calls similarly to normal calls */
9327 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9329 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9330 call->tail_call = TRUE;
9331 call->method = cmethod;
9332 call->signature = mono_method_signature (cmethod);
9335 * We implement tail calls by storing the actual arguments into the
9336 * argument variables, then emitting a CEE_JMP.
9338 for (i = 0; i < n; ++i) {
9339 /* Prevent argument from being register allocated */
9340 arg_array [i]->flags |= MONO_INST_VOLATILE;
9341 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9343 ins = (MonoInst*)call;
9344 ins->inst_p0 = cmethod;
9345 ins->inst_p1 = arg_array [0];
9346 MONO_ADD_INS (cfg->cbb, ins);
9347 link_bblock (cfg, cfg->cbb, end_bblock);
9348 start_new_bblock = 1;
9350 // FIXME: Eliminate unreachable epilogs
9353 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9354 * only reachable from this call.
9356 GET_BBLOCK (cfg, tblock, ip + 5);
9357 if (tblock == cfg->cbb || tblock->in_count == 0)
9366 * Virtual calls in llvm-only mode.
9368 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9369 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9374 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9375 INLINE_FAILURE ("call");
9376 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9377 imt_arg, vtable_arg);
9379 if (tail_call && !cfg->llvm_only) {
9380 link_bblock (cfg, cfg->cbb, end_bblock);
9381 start_new_bblock = 1;
9383 // FIXME: Eliminate unreachable epilogs
9386 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9387 * only reachable from this call.
9389 GET_BBLOCK (cfg, tblock, ip + 5);
9390 if (tblock == cfg->cbb || tblock->in_count == 0)
9397 /* End of call, INS should contain the result of the call, if any */
9399 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9402 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9407 if (keep_this_alive) {
9408 MonoInst *dummy_use;
9410 /* See mono_emit_method_call_full () */
9411 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9414 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9416 * Clang can convert these calls to tail calls which screw up the stack
9417 * walk. This happens even when the -fno-optimize-sibling-calls
9418 * option is passed to clang.
9419 * Work around this by emitting a dummy call.
9421 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9424 CHECK_CFG_EXCEPTION;
9428 g_assert (*ip == CEE_RET);
9432 constrained_class = NULL;
9434 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9438 if (cfg->method != method) {
9439 /* return from inlined method */
9441 * If in_count == 0, that means the ret is unreachable due to
9442 * being preceeded by a throw. In that case, inline_method () will
9443 * handle setting the return value
9444 * (test case: test_0_inline_throw ()).
9446 if (return_var && cfg->cbb->in_count) {
9447 MonoType *ret_type = mono_method_signature (method)->ret;
9453 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9456 //g_assert (returnvar != -1);
9457 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9458 cfg->ret_var_set = TRUE;
9461 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9463 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9467 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9469 if (seq_points && !sym_seq_points) {
9471 * Place a seq point here too even through the IL stack is not
9472 * empty, so a step over on
9475 * will work correctly.
9477 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9478 MONO_ADD_INS (cfg->cbb, ins);
9481 g_assert (!return_var);
9485 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9488 emit_setret (cfg, *sp);
9491 if (sp != stack_start)
9493 MONO_INST_NEW (cfg, ins, OP_BR);
9495 ins->inst_target_bb = end_bblock;
9496 MONO_ADD_INS (cfg->cbb, ins);
9497 link_bblock (cfg, cfg->cbb, end_bblock);
9498 start_new_bblock = 1;
9502 MONO_INST_NEW (cfg, ins, OP_BR);
9504 target = ip + 1 + (signed char)(*ip);
9506 GET_BBLOCK (cfg, tblock, target);
9507 link_bblock (cfg, cfg->cbb, tblock);
9508 ins->inst_target_bb = tblock;
9509 if (sp != stack_start) {
9510 handle_stack_args (cfg, stack_start, sp - stack_start);
9512 CHECK_UNVERIFIABLE (cfg);
9514 MONO_ADD_INS (cfg->cbb, ins);
9515 start_new_bblock = 1;
9516 inline_costs += BRANCH_COST;
9530 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9532 target = ip + 1 + *(signed char*)ip;
9538 inline_costs += BRANCH_COST;
9542 MONO_INST_NEW (cfg, ins, OP_BR);
9545 target = ip + 4 + (gint32)read32(ip);
9547 GET_BBLOCK (cfg, tblock, target);
9548 link_bblock (cfg, cfg->cbb, tblock);
9549 ins->inst_target_bb = tblock;
9550 if (sp != stack_start) {
9551 handle_stack_args (cfg, stack_start, sp - stack_start);
9553 CHECK_UNVERIFIABLE (cfg);
9556 MONO_ADD_INS (cfg->cbb, ins);
9558 start_new_bblock = 1;
9559 inline_costs += BRANCH_COST;
9566 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9567 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9568 guint32 opsize = is_short ? 1 : 4;
9570 CHECK_OPSIZE (opsize);
9572 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9575 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9580 GET_BBLOCK (cfg, tblock, target);
9581 link_bblock (cfg, cfg->cbb, tblock);
9582 GET_BBLOCK (cfg, tblock, ip);
9583 link_bblock (cfg, cfg->cbb, tblock);
9585 if (sp != stack_start) {
9586 handle_stack_args (cfg, stack_start, sp - stack_start);
9587 CHECK_UNVERIFIABLE (cfg);
9590 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9591 cmp->sreg1 = sp [0]->dreg;
9592 type_from_op (cfg, cmp, sp [0], NULL);
9595 #if SIZEOF_REGISTER == 4
9596 if (cmp->opcode == OP_LCOMPARE_IMM) {
9597 /* Convert it to OP_LCOMPARE */
9598 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9599 ins->type = STACK_I8;
9600 ins->dreg = alloc_dreg (cfg, STACK_I8);
9602 MONO_ADD_INS (cfg->cbb, ins);
9603 cmp->opcode = OP_LCOMPARE;
9604 cmp->sreg2 = ins->dreg;
9607 MONO_ADD_INS (cfg->cbb, cmp);
9609 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9610 type_from_op (cfg, ins, sp [0], NULL);
9611 MONO_ADD_INS (cfg->cbb, ins);
9612 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9613 GET_BBLOCK (cfg, tblock, target);
9614 ins->inst_true_bb = tblock;
9615 GET_BBLOCK (cfg, tblock, ip);
9616 ins->inst_false_bb = tblock;
9617 start_new_bblock = 2;
9620 inline_costs += BRANCH_COST;
9635 MONO_INST_NEW (cfg, ins, *ip);
9637 target = ip + 4 + (gint32)read32(ip);
9643 inline_costs += BRANCH_COST;
9647 MonoBasicBlock **targets;
9648 MonoBasicBlock *default_bblock;
9649 MonoJumpInfoBBTable *table;
9650 int offset_reg = alloc_preg (cfg);
9651 int target_reg = alloc_preg (cfg);
9652 int table_reg = alloc_preg (cfg);
9653 int sum_reg = alloc_preg (cfg);
9654 gboolean use_op_switch;
9658 n = read32 (ip + 1);
9661 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9665 CHECK_OPSIZE (n * sizeof (guint32));
9666 target = ip + n * sizeof (guint32);
9668 GET_BBLOCK (cfg, default_bblock, target);
9669 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9671 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9672 for (i = 0; i < n; ++i) {
9673 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9674 targets [i] = tblock;
9675 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9679 if (sp != stack_start) {
9681 * Link the current bb with the targets as well, so handle_stack_args
9682 * will set their in_stack correctly.
9684 link_bblock (cfg, cfg->cbb, default_bblock);
9685 for (i = 0; i < n; ++i)
9686 link_bblock (cfg, cfg->cbb, targets [i]);
9688 handle_stack_args (cfg, stack_start, sp - stack_start);
9690 CHECK_UNVERIFIABLE (cfg);
9692 /* Undo the links */
9693 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9694 for (i = 0; i < n; ++i)
9695 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9701 for (i = 0; i < n; ++i)
9702 link_bblock (cfg, cfg->cbb, targets [i]);
9704 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9705 table->table = targets;
9706 table->table_size = n;
9708 use_op_switch = FALSE;
9710 /* ARM implements SWITCH statements differently */
9711 /* FIXME: Make it use the generic implementation */
9712 if (!cfg->compile_aot)
9713 use_op_switch = TRUE;
9716 if (COMPILE_LLVM (cfg))
9717 use_op_switch = TRUE;
9719 cfg->cbb->has_jump_table = 1;
9721 if (use_op_switch) {
9722 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9723 ins->sreg1 = src1->dreg;
9724 ins->inst_p0 = table;
9725 ins->inst_many_bb = targets;
9726 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9727 MONO_ADD_INS (cfg->cbb, ins);
9729 if (sizeof (gpointer) == 8)
9730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9734 #if SIZEOF_REGISTER == 8
9735 /* The upper word might not be zero, and we add it to a 64 bit address later */
9736 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9739 if (cfg->compile_aot) {
9740 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9742 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9743 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9744 ins->inst_p0 = table;
9745 ins->dreg = table_reg;
9746 MONO_ADD_INS (cfg->cbb, ins);
9749 /* FIXME: Use load_memindex */
9750 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9752 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9754 start_new_bblock = 1;
9755 inline_costs += (BRANCH_COST * 2);
9775 dreg = alloc_freg (cfg);
9778 dreg = alloc_lreg (cfg);
9781 dreg = alloc_ireg_ref (cfg);
9784 dreg = alloc_preg (cfg);
9787 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9788 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9789 if (*ip == CEE_LDIND_R4)
9790 ins->type = cfg->r4_stack_type;
9791 ins->flags |= ins_flag;
9792 MONO_ADD_INS (cfg->cbb, ins);
9794 if (ins_flag & MONO_INST_VOLATILE) {
9795 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9796 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9812 if (ins_flag & MONO_INST_VOLATILE) {
9813 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9814 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9817 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9818 ins->flags |= ins_flag;
9821 MONO_ADD_INS (cfg->cbb, ins);
9823 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9824 emit_write_barrier (cfg, sp [0], sp [1]);
9833 MONO_INST_NEW (cfg, ins, (*ip));
9835 ins->sreg1 = sp [0]->dreg;
9836 ins->sreg2 = sp [1]->dreg;
9837 type_from_op (cfg, ins, sp [0], sp [1]);
9839 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9841 /* Use the immediate opcodes if possible */
9842 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9843 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9844 if (imm_opcode != -1) {
9845 ins->opcode = imm_opcode;
9846 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9849 NULLIFY_INS (sp [1]);
9853 MONO_ADD_INS ((cfg)->cbb, (ins));
9855 *sp++ = mono_decompose_opcode (cfg, ins);
9872 MONO_INST_NEW (cfg, ins, (*ip));
9874 ins->sreg1 = sp [0]->dreg;
9875 ins->sreg2 = sp [1]->dreg;
9876 type_from_op (cfg, ins, sp [0], sp [1]);
9878 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9879 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9881 /* FIXME: Pass opcode to is_inst_imm */
9883 /* Use the immediate opcodes if possible */
9884 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9885 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9886 if (imm_opcode != -1) {
9887 ins->opcode = imm_opcode;
9888 if (sp [1]->opcode == OP_I8CONST) {
9889 #if SIZEOF_REGISTER == 8
9890 ins->inst_imm = sp [1]->inst_l;
9892 ins->inst_ls_word = sp [1]->inst_ls_word;
9893 ins->inst_ms_word = sp [1]->inst_ms_word;
9897 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9900 /* Might be followed by an instruction added by add_widen_op */
9901 if (sp [1]->next == NULL)
9902 NULLIFY_INS (sp [1]);
9905 MONO_ADD_INS ((cfg)->cbb, (ins));
9907 *sp++ = mono_decompose_opcode (cfg, ins);
9920 case CEE_CONV_OVF_I8:
9921 case CEE_CONV_OVF_U8:
9925 /* Special case this earlier so we have long constants in the IR */
9926 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9927 int data = sp [-1]->inst_c0;
9928 sp [-1]->opcode = OP_I8CONST;
9929 sp [-1]->type = STACK_I8;
9930 #if SIZEOF_REGISTER == 8
9931 if ((*ip) == CEE_CONV_U8)
9932 sp [-1]->inst_c0 = (guint32)data;
9934 sp [-1]->inst_c0 = data;
9936 sp [-1]->inst_ls_word = data;
9937 if ((*ip) == CEE_CONV_U8)
9938 sp [-1]->inst_ms_word = 0;
9940 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9942 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9949 case CEE_CONV_OVF_I4:
9950 case CEE_CONV_OVF_I1:
9951 case CEE_CONV_OVF_I2:
9952 case CEE_CONV_OVF_I:
9953 case CEE_CONV_OVF_U:
9956 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9957 ADD_UNOP (CEE_CONV_OVF_I8);
9964 case CEE_CONV_OVF_U1:
9965 case CEE_CONV_OVF_U2:
9966 case CEE_CONV_OVF_U4:
9969 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9970 ADD_UNOP (CEE_CONV_OVF_U8);
9977 case CEE_CONV_OVF_I1_UN:
9978 case CEE_CONV_OVF_I2_UN:
9979 case CEE_CONV_OVF_I4_UN:
9980 case CEE_CONV_OVF_I8_UN:
9981 case CEE_CONV_OVF_U1_UN:
9982 case CEE_CONV_OVF_U2_UN:
9983 case CEE_CONV_OVF_U4_UN:
9984 case CEE_CONV_OVF_U8_UN:
9985 case CEE_CONV_OVF_I_UN:
9986 case CEE_CONV_OVF_U_UN:
9993 CHECK_CFG_EXCEPTION;
9997 case CEE_ADD_OVF_UN:
9999 case CEE_MUL_OVF_UN:
10001 case CEE_SUB_OVF_UN:
10007 GSHAREDVT_FAILURE (*ip);
10010 token = read32 (ip + 1);
10011 klass = mini_get_class (method, token, generic_context);
10012 CHECK_TYPELOAD (klass);
10014 if (generic_class_is_reference_type (cfg, klass)) {
10015 MonoInst *store, *load;
10016 int dreg = alloc_ireg_ref (cfg);
10018 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10019 load->flags |= ins_flag;
10020 MONO_ADD_INS (cfg->cbb, load);
10022 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10023 store->flags |= ins_flag;
10024 MONO_ADD_INS (cfg->cbb, store);
10026 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10027 emit_write_barrier (cfg, sp [0], sp [1]);
10029 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10035 int loc_index = -1;
10041 token = read32 (ip + 1);
10042 klass = mini_get_class (method, token, generic_context);
10043 CHECK_TYPELOAD (klass);
10045 /* Optimize the common ldobj+stloc combination */
10048 loc_index = ip [6];
10055 loc_index = ip [5] - CEE_STLOC_0;
10062 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10063 CHECK_LOCAL (loc_index);
10065 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10066 ins->dreg = cfg->locals [loc_index]->dreg;
10067 ins->flags |= ins_flag;
10070 if (ins_flag & MONO_INST_VOLATILE) {
10071 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10072 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10078 /* Optimize the ldobj+stobj combination */
10079 /* The reference case ends up being a load+store anyway */
10080 /* Skip this if the operation is volatile. */
10081 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10086 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10093 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10094 ins->flags |= ins_flag;
10097 if (ins_flag & MONO_INST_VOLATILE) {
10098 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10099 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10108 CHECK_STACK_OVF (1);
10110 n = read32 (ip + 1);
10112 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10113 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10114 ins->type = STACK_OBJ;
10117 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10118 MonoInst *iargs [1];
10119 char *str = (char *)mono_method_get_wrapper_data (method, n);
10121 if (cfg->compile_aot)
10122 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10124 EMIT_NEW_PCONST (cfg, iargs [0], str);
10125 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10127 if (cfg->opt & MONO_OPT_SHARED) {
10128 MonoInst *iargs [3];
10130 if (cfg->compile_aot) {
10131 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10133 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10134 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10135 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10136 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10137 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10140 if (cfg->cbb->out_of_line) {
10141 MonoInst *iargs [2];
10143 if (image == mono_defaults.corlib) {
10145 * Avoid relocations in AOT and save some space by using a
10146 * version of helper_ldstr specialized to mscorlib.
10148 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10149 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10151 /* Avoid creating the string object */
10152 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10153 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10154 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10158 if (cfg->compile_aot) {
10159 NEW_LDSTRCONST (cfg, ins, image, n);
10161 MONO_ADD_INS (cfg->cbb, ins);
10164 NEW_PCONST (cfg, ins, NULL);
10165 ins->type = STACK_OBJ;
10166 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10170 OUT_OF_MEMORY_FAILURE;
10173 MONO_ADD_INS (cfg->cbb, ins);
10182 MonoInst *iargs [2];
10183 MonoMethodSignature *fsig;
10186 MonoInst *vtable_arg = NULL;
10189 token = read32 (ip + 1);
10190 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10193 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10196 mono_save_token_info (cfg, image, token, cmethod);
10198 if (!mono_class_init (cmethod->klass))
10199 TYPE_LOAD_ERROR (cmethod->klass);
10201 context_used = mini_method_check_context_used (cfg, cmethod);
10203 if (mono_security_core_clr_enabled ())
10204 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10206 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10207 emit_class_init (cfg, cmethod->klass);
10208 CHECK_TYPELOAD (cmethod->klass);
10212 if (cfg->gsharedvt) {
10213 if (mini_is_gsharedvt_variable_signature (sig))
10214 GSHAREDVT_FAILURE (*ip);
10218 n = fsig->param_count;
10222 * Generate smaller code for the common newobj <exception> instruction in
10223 * argument checking code.
10225 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10226 is_exception_class (cmethod->klass) && n <= 2 &&
10227 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10228 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10229 MonoInst *iargs [3];
10233 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10236 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10239 iargs [1] = sp [0];
10240 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10243 iargs [1] = sp [0];
10244 iargs [2] = sp [1];
10245 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10248 g_assert_not_reached ();
10256 /* move the args to allow room for 'this' in the first position */
10262 /* check_call_signature () requires sp[0] to be set */
10263 this_ins.type = STACK_OBJ;
10264 sp [0] = &this_ins;
10265 if (check_call_signature (cfg, fsig, sp))
10270 if (mini_class_is_system_array (cmethod->klass)) {
10271 *sp = emit_get_rgctx_method (cfg, context_used,
10272 cmethod, MONO_RGCTX_INFO_METHOD);
10274 /* Avoid varargs in the common case */
10275 if (fsig->param_count == 1)
10276 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10277 else if (fsig->param_count == 2)
10278 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10279 else if (fsig->param_count == 3)
10280 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10281 else if (fsig->param_count == 4)
10282 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10284 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10285 } else if (cmethod->string_ctor) {
10286 g_assert (!context_used);
10287 g_assert (!vtable_arg);
10288 /* we simply pass a null pointer */
10289 EMIT_NEW_PCONST (cfg, *sp, NULL);
10290 /* now call the string ctor */
10291 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10293 if (cmethod->klass->valuetype) {
10294 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10295 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10296 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10301 * The code generated by mini_emit_virtual_call () expects
10302 * iargs [0] to be a boxed instance, but luckily the vcall
10303 * will be transformed into a normal call there.
10305 } else if (context_used) {
10306 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10309 MonoVTable *vtable = NULL;
10311 if (!cfg->compile_aot)
10312 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10313 CHECK_TYPELOAD (cmethod->klass);
10316 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10317 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10318 * As a workaround, we call class cctors before allocating objects.
10320 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10321 emit_class_init (cfg, cmethod->klass);
10322 if (cfg->verbose_level > 2)
10323 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10324 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10327 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10330 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10333 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10335 /* Now call the actual ctor */
10336 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10337 CHECK_CFG_EXCEPTION;
10340 if (alloc == NULL) {
10342 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10343 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10351 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10352 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10355 case CEE_CASTCLASS:
10360 token = read32 (ip + 1);
10361 klass = mini_get_class (method, token, generic_context);
10362 CHECK_TYPELOAD (klass);
10363 if (sp [0]->type != STACK_OBJ)
10366 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10367 ins->dreg = alloc_preg (cfg);
10368 ins->sreg1 = (*sp)->dreg;
10369 ins->klass = klass;
10370 ins->type = STACK_OBJ;
10371 MONO_ADD_INS (cfg->cbb, ins);
10373 CHECK_CFG_EXCEPTION;
10377 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10380 case CEE_UNBOX_ANY: {
10381 MonoInst *res, *addr;
10386 token = read32 (ip + 1);
10387 klass = mini_get_class (method, token, generic_context);
10388 CHECK_TYPELOAD (klass);
10390 mono_save_token_info (cfg, image, token, klass);
10392 context_used = mini_class_check_context_used (cfg, klass);
10394 if (mini_is_gsharedvt_klass (klass)) {
10395 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10397 } else if (generic_class_is_reference_type (cfg, klass)) {
10398 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10399 EMIT_NEW_PCONST (cfg, res, NULL);
10400 res->type = STACK_OBJ;
10402 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10403 res->dreg = alloc_preg (cfg);
10404 res->sreg1 = (*sp)->dreg;
10405 res->klass = klass;
10406 res->type = STACK_OBJ;
10407 MONO_ADD_INS (cfg->cbb, res);
10408 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10410 } else if (mono_class_is_nullable (klass)) {
10411 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10413 addr = handle_unbox (cfg, klass, sp, context_used);
10415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10426 MonoClass *enum_class;
10427 MonoMethod *has_flag;
10433 token = read32 (ip + 1);
10434 klass = mini_get_class (method, token, generic_context);
10435 CHECK_TYPELOAD (klass);
10437 mono_save_token_info (cfg, image, token, klass);
10439 context_used = mini_class_check_context_used (cfg, klass);
10441 if (generic_class_is_reference_type (cfg, klass)) {
10447 if (klass == mono_defaults.void_class)
10449 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10451 /* frequent check in generic code: box (struct), brtrue */
10456 * <push int/long ptr>
10459 * constrained. MyFlags
10460 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10462 * If we find this sequence and the operand types on box and constrained
10463 * are equal, we can emit a specialized instruction sequence instead of
10464 * the very slow HasFlag () call.
10466 if ((cfg->opt & MONO_OPT_INTRINS) &&
10467 /* Cheap checks first. */
10468 ip + 5 + 6 + 5 < end &&
10469 ip [5] == CEE_PREFIX1 &&
10470 ip [6] == CEE_CONSTRAINED_ &&
10471 ip [11] == CEE_CALLVIRT &&
10472 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10473 mono_class_is_enum (klass) &&
10474 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10475 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10476 has_flag->klass == mono_defaults.enum_class &&
10477 !strcmp (has_flag->name, "HasFlag") &&
10478 has_flag->signature->hasthis &&
10479 has_flag->signature->param_count == 1) {
10480 CHECK_TYPELOAD (enum_class);
10482 if (enum_class == klass) {
10483 MonoInst *enum_this, *enum_flag;
10488 enum_this = sp [0];
10489 enum_flag = sp [1];
10491 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10496 // FIXME: LLVM can't handle the inconsistent bb linking
10497 if (!mono_class_is_nullable (klass) &&
10498 !mini_is_gsharedvt_klass (klass) &&
10499 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10500 (ip [5] == CEE_BRTRUE ||
10501 ip [5] == CEE_BRTRUE_S ||
10502 ip [5] == CEE_BRFALSE ||
10503 ip [5] == CEE_BRFALSE_S)) {
10504 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10506 MonoBasicBlock *true_bb, *false_bb;
10510 if (cfg->verbose_level > 3) {
10511 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10512 printf ("<box+brtrue opt>\n");
10517 case CEE_BRFALSE_S:
10520 target = ip + 1 + (signed char)(*ip);
10527 target = ip + 4 + (gint)(read32 (ip));
10531 g_assert_not_reached ();
10535 * We need to link both bblocks, since it is needed for handling stack
10536 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10537 * Branching to only one of them would lead to inconsistencies, so
10538 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10540 GET_BBLOCK (cfg, true_bb, target);
10541 GET_BBLOCK (cfg, false_bb, ip);
10543 mono_link_bblock (cfg, cfg->cbb, true_bb);
10544 mono_link_bblock (cfg, cfg->cbb, false_bb);
10546 if (sp != stack_start) {
10547 handle_stack_args (cfg, stack_start, sp - stack_start);
10549 CHECK_UNVERIFIABLE (cfg);
10552 if (COMPILE_LLVM (cfg)) {
10553 dreg = alloc_ireg (cfg);
10554 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10557 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10559 /* The JIT can't eliminate the iconst+compare */
10560 MONO_INST_NEW (cfg, ins, OP_BR);
10561 ins->inst_target_bb = is_true ? true_bb : false_bb;
10562 MONO_ADD_INS (cfg->cbb, ins);
10565 start_new_bblock = 1;
10569 *sp++ = handle_box (cfg, val, klass, context_used);
10571 CHECK_CFG_EXCEPTION;
10580 token = read32 (ip + 1);
10581 klass = mini_get_class (method, token, generic_context);
10582 CHECK_TYPELOAD (klass);
10584 mono_save_token_info (cfg, image, token, klass);
10586 context_used = mini_class_check_context_used (cfg, klass);
10588 if (mono_class_is_nullable (klass)) {
10591 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10592 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10596 ins = handle_unbox (cfg, klass, sp, context_used);
10609 MonoClassField *field;
10610 #ifndef DISABLE_REMOTING
10614 gboolean is_instance;
10616 gpointer addr = NULL;
10617 gboolean is_special_static;
10619 MonoInst *store_val = NULL;
10620 MonoInst *thread_ins;
10623 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10625 if (op == CEE_STFLD) {
10628 store_val = sp [1];
10633 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10635 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10638 if (op == CEE_STSFLD) {
10641 store_val = sp [0];
10646 token = read32 (ip + 1);
10647 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10648 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10649 klass = field->parent;
10652 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10655 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10656 FIELD_ACCESS_FAILURE (method, field);
10657 mono_class_init (klass);
10659 /* if the class is Critical then transparent code cannot access it's fields */
10660 if (!is_instance && mono_security_core_clr_enabled ())
10661 ensure_method_is_allowed_to_access_field (cfg, method, field);
10663 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10664 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10665 if (mono_security_core_clr_enabled ())
10666 ensure_method_is_allowed_to_access_field (cfg, method, field);
10669 ftype = mono_field_get_type (field);
10672 * LDFLD etc. is usable on static fields as well, so convert those cases to
10675 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10687 g_assert_not_reached ();
10689 is_instance = FALSE;
10692 context_used = mini_class_check_context_used (cfg, klass);
10694 /* INSTANCE CASE */
10696 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10697 if (op == CEE_STFLD) {
10698 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10700 #ifndef DISABLE_REMOTING
10701 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10702 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10703 MonoInst *iargs [5];
10705 GSHAREDVT_FAILURE (op);
10707 iargs [0] = sp [0];
10708 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10709 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10710 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10712 iargs [4] = sp [1];
10714 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10715 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10716 iargs, ip, cfg->real_offset, TRUE);
10717 CHECK_CFG_EXCEPTION;
10718 g_assert (costs > 0);
10720 cfg->real_offset += 5;
10722 inline_costs += costs;
10724 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10729 MonoInst *store, *wbarrier_ptr_ins = NULL;
10731 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10733 if (ins_flag & MONO_INST_VOLATILE) {
10734 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10735 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10738 if (mini_is_gsharedvt_klass (klass)) {
10739 MonoInst *offset_ins;
10741 context_used = mini_class_check_context_used (cfg, klass);
10743 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10744 /* The value is offset by 1 */
10745 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10746 dreg = alloc_ireg_mp (cfg);
10747 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10748 wbarrier_ptr_ins = ins;
10749 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10750 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10752 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10754 if (sp [0]->opcode != OP_LDADDR)
10755 store->flags |= MONO_INST_FAULT;
10757 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10758 if (mini_is_gsharedvt_klass (klass)) {
10759 g_assert (wbarrier_ptr_ins);
10760 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10762 /* insert call to write barrier */
10766 dreg = alloc_ireg_mp (cfg);
10767 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10768 emit_write_barrier (cfg, ptr, sp [1]);
10772 store->flags |= ins_flag;
10779 #ifndef DISABLE_REMOTING
10780 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10781 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10782 MonoInst *iargs [4];
10784 GSHAREDVT_FAILURE (op);
10786 iargs [0] = sp [0];
10787 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10788 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10789 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10790 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10791 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10792 iargs, ip, cfg->real_offset, TRUE);
10793 CHECK_CFG_EXCEPTION;
10794 g_assert (costs > 0);
10796 cfg->real_offset += 5;
10800 inline_costs += costs;
10802 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10808 if (sp [0]->type == STACK_VTYPE) {
10811 /* Have to compute the address of the variable */
10813 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10815 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10817 g_assert (var->klass == klass);
10819 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10823 if (op == CEE_LDFLDA) {
10824 if (sp [0]->type == STACK_OBJ) {
10825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10826 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10829 dreg = alloc_ireg_mp (cfg);
10831 if (mini_is_gsharedvt_klass (klass)) {
10832 MonoInst *offset_ins;
10834 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10835 /* The value is offset by 1 */
10836 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10837 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10839 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10841 ins->klass = mono_class_from_mono_type (field->type);
10842 ins->type = STACK_MP;
10847 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10849 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10850 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10859 if (mini_is_gsharedvt_klass (klass)) {
10860 MonoInst *offset_ins;
10862 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10863 /* The value is offset by 1 */
10864 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10865 dreg = alloc_ireg_mp (cfg);
10866 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10867 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10869 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10871 load->flags |= ins_flag;
10872 if (sp [0]->opcode != OP_LDADDR)
10873 load->flags |= MONO_INST_FAULT;
10885 context_used = mini_class_check_context_used (cfg, klass);
10887 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10888 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10892 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10893 * to be called here.
10895 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10896 mono_class_vtable (cfg->domain, klass);
10897 CHECK_TYPELOAD (klass);
10899 mono_domain_lock (cfg->domain);
10900 if (cfg->domain->special_static_fields)
10901 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10902 mono_domain_unlock (cfg->domain);
10904 is_special_static = mono_class_field_is_special_static (field);
10906 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10907 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10911 /* Generate IR to compute the field address */
10912 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10914 * Fast access to TLS data
10915 * Inline version of get_thread_static_data () in
10919 int idx, static_data_reg, array_reg, dreg;
10921 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10922 GSHAREDVT_FAILURE (op);
10924 static_data_reg = alloc_ireg (cfg);
10925 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10927 if (cfg->compile_aot) {
10928 int offset_reg, offset2_reg, idx_reg;
10930 /* For TLS variables, this will return the TLS offset */
10931 EMIT_NEW_SFLDACONST (cfg, ins, field);
10932 offset_reg = ins->dreg;
10933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10934 idx_reg = alloc_ireg (cfg);
10935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10937 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10938 array_reg = alloc_ireg (cfg);
10939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10940 offset2_reg = alloc_ireg (cfg);
10941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10943 dreg = alloc_ireg (cfg);
10944 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10946 offset = (gsize)addr & 0x7fffffff;
10947 idx = offset & 0x3f;
10949 array_reg = alloc_ireg (cfg);
10950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10951 dreg = alloc_ireg (cfg);
10952 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10954 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10955 (cfg->compile_aot && is_special_static) ||
10956 (context_used && is_special_static)) {
10957 MonoInst *iargs [2];
10959 g_assert (field->parent);
10960 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10961 if (context_used) {
10962 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10963 field, MONO_RGCTX_INFO_CLASS_FIELD);
10965 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10967 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10968 } else if (context_used) {
10969 MonoInst *static_data;
10972 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10973 method->klass->name_space, method->klass->name, method->name,
10974 depth, field->offset);
10977 if (mono_class_needs_cctor_run (klass, method))
10978 emit_class_init (cfg, klass);
10981 * The pointer we're computing here is
10983 * super_info.static_data + field->offset
10985 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10986 klass, MONO_RGCTX_INFO_STATIC_DATA);
10988 if (mini_is_gsharedvt_klass (klass)) {
10989 MonoInst *offset_ins;
10991 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10992 /* The value is offset by 1 */
10993 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10994 dreg = alloc_ireg_mp (cfg);
10995 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10996 } else if (field->offset == 0) {
10999 int addr_reg = mono_alloc_preg (cfg);
11000 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11002 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11003 MonoInst *iargs [2];
11005 g_assert (field->parent);
11006 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11007 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11008 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11010 MonoVTable *vtable = NULL;
11012 if (!cfg->compile_aot)
11013 vtable = mono_class_vtable (cfg->domain, klass);
11014 CHECK_TYPELOAD (klass);
11017 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11018 if (!(g_slist_find (class_inits, klass))) {
11019 emit_class_init (cfg, klass);
11020 if (cfg->verbose_level > 2)
11021 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11022 class_inits = g_slist_prepend (class_inits, klass);
11025 if (cfg->run_cctors) {
11026 /* This makes so that inline cannot trigger */
11027 /* .cctors: too many apps depend on them */
11028 /* running with a specific order... */
11030 if (! vtable->initialized)
11031 INLINE_FAILURE ("class init");
11032 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11033 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11034 goto exception_exit;
11038 if (cfg->compile_aot)
11039 EMIT_NEW_SFLDACONST (cfg, ins, field);
11042 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11044 EMIT_NEW_PCONST (cfg, ins, addr);
11047 MonoInst *iargs [1];
11048 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11049 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11053 /* Generate IR to do the actual load/store operation */
11055 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11056 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11057 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11060 if (op == CEE_LDSFLDA) {
11061 ins->klass = mono_class_from_mono_type (ftype);
11062 ins->type = STACK_PTR;
11064 } else if (op == CEE_STSFLD) {
11067 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11068 store->flags |= ins_flag;
11070 gboolean is_const = FALSE;
11071 MonoVTable *vtable = NULL;
11072 gpointer addr = NULL;
11074 if (!context_used) {
11075 vtable = mono_class_vtable (cfg->domain, klass);
11076 CHECK_TYPELOAD (klass);
11078 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11079 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11080 int ro_type = ftype->type;
11082 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11083 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11084 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11087 GSHAREDVT_FAILURE (op);
11089 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11092 case MONO_TYPE_BOOLEAN:
11094 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11098 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11101 case MONO_TYPE_CHAR:
11103 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11107 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11112 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11116 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11121 case MONO_TYPE_PTR:
11122 case MONO_TYPE_FNPTR:
11123 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11124 type_to_eval_stack_type ((cfg), field->type, *sp);
11127 case MONO_TYPE_STRING:
11128 case MONO_TYPE_OBJECT:
11129 case MONO_TYPE_CLASS:
11130 case MONO_TYPE_SZARRAY:
11131 case MONO_TYPE_ARRAY:
11132 if (!mono_gc_is_moving ()) {
11133 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11134 type_to_eval_stack_type ((cfg), field->type, *sp);
11142 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11147 case MONO_TYPE_VALUETYPE:
11157 CHECK_STACK_OVF (1);
11159 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11160 load->flags |= ins_flag;
11166 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11167 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11168 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11179 token = read32 (ip + 1);
11180 klass = mini_get_class (method, token, generic_context);
11181 CHECK_TYPELOAD (klass);
11182 if (ins_flag & MONO_INST_VOLATILE) {
11183 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11184 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11186 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11187 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11188 ins->flags |= ins_flag;
11189 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11190 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11191 /* insert call to write barrier */
11192 emit_write_barrier (cfg, sp [0], sp [1]);
11204 const char *data_ptr;
11206 guint32 field_token;
11212 token = read32 (ip + 1);
11214 klass = mini_get_class (method, token, generic_context);
11215 CHECK_TYPELOAD (klass);
11217 context_used = mini_class_check_context_used (cfg, klass);
11219 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11220 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11221 ins->sreg1 = sp [0]->dreg;
11222 ins->type = STACK_I4;
11223 ins->dreg = alloc_ireg (cfg);
11224 MONO_ADD_INS (cfg->cbb, ins);
11225 *sp = mono_decompose_opcode (cfg, ins);
11228 if (context_used) {
11229 MonoInst *args [3];
11230 MonoClass *array_class = mono_array_class_get (klass, 1);
11231 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11233 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11236 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11237 array_class, MONO_RGCTX_INFO_VTABLE);
11242 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11244 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11246 if (cfg->opt & MONO_OPT_SHARED) {
11247 /* Decompose now to avoid problems with references to the domainvar */
11248 MonoInst *iargs [3];
11250 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11251 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11252 iargs [2] = sp [0];
11254 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11256 /* Decompose later since it is needed by abcrem */
11257 MonoClass *array_type = mono_array_class_get (klass, 1);
11258 mono_class_vtable (cfg->domain, array_type);
11259 CHECK_TYPELOAD (array_type);
11261 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11262 ins->dreg = alloc_ireg_ref (cfg);
11263 ins->sreg1 = sp [0]->dreg;
11264 ins->inst_newa_class = klass;
11265 ins->type = STACK_OBJ;
11266 ins->klass = array_type;
11267 MONO_ADD_INS (cfg->cbb, ins);
11268 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11269 cfg->cbb->has_array_access = TRUE;
11271 /* Needed so mono_emit_load_get_addr () gets called */
11272 mono_get_got_var (cfg);
11282 * we inline/optimize the initialization sequence if possible.
11283 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11284 * for small sizes open code the memcpy
11285 * ensure the rva field is big enough
11287 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11288 MonoMethod *memcpy_method = get_memcpy_method ();
11289 MonoInst *iargs [3];
11290 int add_reg = alloc_ireg_mp (cfg);
11292 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11293 if (cfg->compile_aot) {
11294 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11296 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11298 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11299 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11308 if (sp [0]->type != STACK_OBJ)
11311 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11312 ins->dreg = alloc_preg (cfg);
11313 ins->sreg1 = sp [0]->dreg;
11314 ins->type = STACK_I4;
11315 /* This flag will be inherited by the decomposition */
11316 ins->flags |= MONO_INST_FAULT;
11317 MONO_ADD_INS (cfg->cbb, ins);
11318 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11319 cfg->cbb->has_array_access = TRUE;
11327 if (sp [0]->type != STACK_OBJ)
11330 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11332 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11333 CHECK_TYPELOAD (klass);
11334 /* we need to make sure that this array is exactly the type it needs
11335 * to be for correctness. the wrappers are lax with their usage
11336 * so we need to ignore them here
11338 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11339 MonoClass *array_class = mono_array_class_get (klass, 1);
11340 mini_emit_check_array_type (cfg, sp [0], array_class);
11341 CHECK_TYPELOAD (array_class);
11345 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11350 case CEE_LDELEM_I1:
11351 case CEE_LDELEM_U1:
11352 case CEE_LDELEM_I2:
11353 case CEE_LDELEM_U2:
11354 case CEE_LDELEM_I4:
11355 case CEE_LDELEM_U4:
11356 case CEE_LDELEM_I8:
11358 case CEE_LDELEM_R4:
11359 case CEE_LDELEM_R8:
11360 case CEE_LDELEM_REF: {
11366 if (*ip == CEE_LDELEM) {
11368 token = read32 (ip + 1);
11369 klass = mini_get_class (method, token, generic_context);
11370 CHECK_TYPELOAD (klass);
11371 mono_class_init (klass);
11374 klass = array_access_to_klass (*ip);
11376 if (sp [0]->type != STACK_OBJ)
11379 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11381 if (mini_is_gsharedvt_variable_klass (klass)) {
11382 // FIXME-VT: OP_ICONST optimization
11383 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11384 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11385 ins->opcode = OP_LOADV_MEMBASE;
11386 } else if (sp [1]->opcode == OP_ICONST) {
11387 int array_reg = sp [0]->dreg;
11388 int index_reg = sp [1]->dreg;
11389 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11391 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11392 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11394 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11395 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11397 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11401 if (*ip == CEE_LDELEM)
11408 case CEE_STELEM_I1:
11409 case CEE_STELEM_I2:
11410 case CEE_STELEM_I4:
11411 case CEE_STELEM_I8:
11412 case CEE_STELEM_R4:
11413 case CEE_STELEM_R8:
11414 case CEE_STELEM_REF:
11419 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11421 if (*ip == CEE_STELEM) {
11423 token = read32 (ip + 1);
11424 klass = mini_get_class (method, token, generic_context);
11425 CHECK_TYPELOAD (klass);
11426 mono_class_init (klass);
11429 klass = array_access_to_klass (*ip);
11431 if (sp [0]->type != STACK_OBJ)
11434 emit_array_store (cfg, klass, sp, TRUE);
11436 if (*ip == CEE_STELEM)
11443 case CEE_CKFINITE: {
11447 if (cfg->llvm_only) {
11448 MonoInst *iargs [1];
11450 iargs [0] = sp [0];
11451 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11453 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11454 ins->sreg1 = sp [0]->dreg;
11455 ins->dreg = alloc_freg (cfg);
11456 ins->type = STACK_R8;
11457 MONO_ADD_INS (cfg->cbb, ins);
11459 *sp++ = mono_decompose_opcode (cfg, ins);
11465 case CEE_REFANYVAL: {
11466 MonoInst *src_var, *src;
11468 int klass_reg = alloc_preg (cfg);
11469 int dreg = alloc_preg (cfg);
11471 GSHAREDVT_FAILURE (*ip);
11474 MONO_INST_NEW (cfg, ins, *ip);
11477 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11478 CHECK_TYPELOAD (klass);
11480 context_used = mini_class_check_context_used (cfg, klass);
11483 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11485 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11486 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11489 if (context_used) {
11490 MonoInst *klass_ins;
11492 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11493 klass, MONO_RGCTX_INFO_KLASS);
11496 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11497 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11499 mini_emit_class_check (cfg, klass_reg, klass);
11501 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11502 ins->type = STACK_MP;
11503 ins->klass = klass;
11508 case CEE_MKREFANY: {
11509 MonoInst *loc, *addr;
11511 GSHAREDVT_FAILURE (*ip);
11514 MONO_INST_NEW (cfg, ins, *ip);
11517 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11518 CHECK_TYPELOAD (klass);
11520 context_used = mini_class_check_context_used (cfg, klass);
11522 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11523 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11525 if (context_used) {
11526 MonoInst *const_ins;
11527 int type_reg = alloc_preg (cfg);
11529 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11530 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11532 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11534 int const_reg = alloc_preg (cfg);
11535 int type_reg = alloc_preg (cfg);
11537 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11538 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11540 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11544 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11545 ins->type = STACK_VTYPE;
11546 ins->klass = mono_defaults.typed_reference_class;
11551 case CEE_LDTOKEN: {
11553 MonoClass *handle_class;
11555 CHECK_STACK_OVF (1);
11558 n = read32 (ip + 1);
11560 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11561 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11562 handle = mono_method_get_wrapper_data (method, n);
11563 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11564 if (handle_class == mono_defaults.typehandle_class)
11565 handle = &((MonoClass*)handle)->byval_arg;
11568 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11573 mono_class_init (handle_class);
11574 if (cfg->gshared) {
11575 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11576 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11577 /* This case handles ldtoken
11578 of an open type, like for
11581 } else if (handle_class == mono_defaults.typehandle_class) {
11582 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11583 } else if (handle_class == mono_defaults.fieldhandle_class)
11584 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11585 else if (handle_class == mono_defaults.methodhandle_class)
11586 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11588 g_assert_not_reached ();
11591 if ((cfg->opt & MONO_OPT_SHARED) &&
11592 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11593 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11594 MonoInst *addr, *vtvar, *iargs [3];
11595 int method_context_used;
11597 method_context_used = mini_method_check_context_used (cfg, method);
11599 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11601 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11602 EMIT_NEW_ICONST (cfg, iargs [1], n);
11603 if (method_context_used) {
11604 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11605 method, MONO_RGCTX_INFO_METHOD);
11606 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11608 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11609 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11611 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11615 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11617 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11618 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11619 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11620 (cmethod->klass == mono_defaults.systemtype_class) &&
11621 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11622 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11624 mono_class_init (tclass);
11625 if (context_used) {
11626 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11627 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11628 } else if (cfg->compile_aot) {
11629 if (method->wrapper_type) {
11630 error_init (&error); //got to do it since there are multiple conditionals below
11631 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11632 /* Special case for static synchronized wrappers */
11633 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11635 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11636 /* FIXME: n is not a normal token */
11638 EMIT_NEW_PCONST (cfg, ins, NULL);
11641 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11644 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11646 EMIT_NEW_PCONST (cfg, ins, rt);
11648 ins->type = STACK_OBJ;
11649 ins->klass = cmethod->klass;
11652 MonoInst *addr, *vtvar;
11654 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11656 if (context_used) {
11657 if (handle_class == mono_defaults.typehandle_class) {
11658 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11659 mono_class_from_mono_type ((MonoType *)handle),
11660 MONO_RGCTX_INFO_TYPE);
11661 } else if (handle_class == mono_defaults.methodhandle_class) {
11662 ins = emit_get_rgctx_method (cfg, context_used,
11663 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11664 } else if (handle_class == mono_defaults.fieldhandle_class) {
11665 ins = emit_get_rgctx_field (cfg, context_used,
11666 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11668 g_assert_not_reached ();
11670 } else if (cfg->compile_aot) {
11671 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11673 EMIT_NEW_PCONST (cfg, ins, handle);
11675 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11677 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11687 if (sp [-1]->type != STACK_OBJ)
11690 MONO_INST_NEW (cfg, ins, OP_THROW);
11692 ins->sreg1 = sp [0]->dreg;
11694 cfg->cbb->out_of_line = TRUE;
11695 MONO_ADD_INS (cfg->cbb, ins);
11696 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11697 MONO_ADD_INS (cfg->cbb, ins);
11700 link_bblock (cfg, cfg->cbb, end_bblock);
11701 start_new_bblock = 1;
11702 /* This can complicate code generation for llvm since the return value might not be defined */
11703 if (COMPILE_LLVM (cfg))
11704 INLINE_FAILURE ("throw");
11706 case CEE_ENDFINALLY:
11707 if (!ip_in_finally_clause (cfg, ip - header->code))
11709 /* mono_save_seq_point_info () depends on this */
11710 if (sp != stack_start)
11711 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11712 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11713 MONO_ADD_INS (cfg->cbb, ins);
11715 start_new_bblock = 1;
11718 * Control will leave the method so empty the stack, otherwise
11719 * the next basic block will start with a nonempty stack.
11721 while (sp != stack_start) {
11726 case CEE_LEAVE_S: {
11729 if (*ip == CEE_LEAVE) {
11731 target = ip + 5 + (gint32)read32(ip + 1);
11734 target = ip + 2 + (signed char)(ip [1]);
11737 /* empty the stack */
11738 while (sp != stack_start) {
11743 * If this leave statement is in a catch block, check for a
11744 * pending exception, and rethrow it if necessary.
11745 * We avoid doing this in runtime invoke wrappers, since those are called
11746 * by native code which excepts the wrapper to catch all exceptions.
11748 for (i = 0; i < header->num_clauses; ++i) {
11749 MonoExceptionClause *clause = &header->clauses [i];
11752 * Use <= in the final comparison to handle clauses with multiple
11753 * leave statements, like in bug #78024.
11754 * The ordering of the exception clauses guarantees that we find the
11755 * innermost clause.
11757 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11759 MonoBasicBlock *dont_throw;
11764 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11767 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11769 NEW_BBLOCK (cfg, dont_throw);
11772 * Currently, we always rethrow the abort exception, despite the
11773 * fact that this is not correct. See thread6.cs for an example.
11774 * But propagating the abort exception is more important than
11775 * getting the sematics right.
11777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11779 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11781 MONO_START_BB (cfg, dont_throw);
11786 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11789 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11791 MonoExceptionClause *clause;
11793 for (tmp = handlers; tmp; tmp = tmp->next) {
11794 clause = (MonoExceptionClause *)tmp->data;
11795 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11797 link_bblock (cfg, cfg->cbb, tblock);
11798 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11799 ins->inst_target_bb = tblock;
11800 ins->inst_eh_block = clause;
11801 MONO_ADD_INS (cfg->cbb, ins);
11802 cfg->cbb->has_call_handler = 1;
11803 if (COMPILE_LLVM (cfg)) {
11804 MonoBasicBlock *target_bb;
11807 * Link the finally bblock with the target, since it will
11808 * conceptually branch there.
11810 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11811 GET_BBLOCK (cfg, target_bb, target);
11812 link_bblock (cfg, tblock, target_bb);
11815 g_list_free (handlers);
11818 MONO_INST_NEW (cfg, ins, OP_BR);
11819 MONO_ADD_INS (cfg->cbb, ins);
11820 GET_BBLOCK (cfg, tblock, target);
11821 link_bblock (cfg, cfg->cbb, tblock);
11822 ins->inst_target_bb = tblock;
11824 start_new_bblock = 1;
11826 if (*ip == CEE_LEAVE)
11835 * Mono specific opcodes
11837 case MONO_CUSTOM_PREFIX: {
11839 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11843 case CEE_MONO_ICALL: {
11845 MonoJitICallInfo *info;
11847 token = read32 (ip + 2);
11848 func = mono_method_get_wrapper_data (method, token);
11849 info = mono_find_jit_icall_by_addr (func);
11851 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11854 CHECK_STACK (info->sig->param_count);
11855 sp -= info->sig->param_count;
11857 ins = mono_emit_jit_icall (cfg, info->func, sp);
11858 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11862 inline_costs += 10 * num_calls++;
11866 case CEE_MONO_LDPTR_CARD_TABLE:
11867 case CEE_MONO_LDPTR_NURSERY_START:
11868 case CEE_MONO_LDPTR_NURSERY_BITS:
11869 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11870 CHECK_STACK_OVF (1);
11873 case CEE_MONO_LDPTR_CARD_TABLE:
11874 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11876 case CEE_MONO_LDPTR_NURSERY_START:
11877 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11879 case CEE_MONO_LDPTR_NURSERY_BITS:
11880 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11882 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11883 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11889 inline_costs += 10 * num_calls++;
11892 case CEE_MONO_LDPTR: {
11895 CHECK_STACK_OVF (1);
11897 token = read32 (ip + 2);
11899 ptr = mono_method_get_wrapper_data (method, token);
11900 EMIT_NEW_PCONST (cfg, ins, ptr);
11903 inline_costs += 10 * num_calls++;
11904 /* Can't embed random pointers into AOT code */
11908 case CEE_MONO_JIT_ICALL_ADDR: {
11909 MonoJitICallInfo *callinfo;
11912 CHECK_STACK_OVF (1);
11914 token = read32 (ip + 2);
11916 ptr = mono_method_get_wrapper_data (method, token);
11917 callinfo = mono_find_jit_icall_by_addr (ptr);
11918 g_assert (callinfo);
11919 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11922 inline_costs += 10 * num_calls++;
11925 case CEE_MONO_ICALL_ADDR: {
11926 MonoMethod *cmethod;
11929 CHECK_STACK_OVF (1);
11931 token = read32 (ip + 2);
11933 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11935 if (cfg->compile_aot) {
11936 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11938 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11939 * before the call, its not needed when using direct pinvoke.
11940 * This is not an optimization, but its used to avoid looking up pinvokes
11941 * on platforms which don't support dlopen ().
11943 EMIT_NEW_PCONST (cfg, ins, NULL);
11945 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11948 ptr = mono_lookup_internal_call (cmethod);
11950 EMIT_NEW_PCONST (cfg, ins, ptr);
11956 case CEE_MONO_VTADDR: {
11957 MonoInst *src_var, *src;
11963 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11964 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11969 case CEE_MONO_NEWOBJ: {
11970 MonoInst *iargs [2];
11972 CHECK_STACK_OVF (1);
11974 token = read32 (ip + 2);
11975 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11976 mono_class_init (klass);
11977 NEW_DOMAINCONST (cfg, iargs [0]);
11978 MONO_ADD_INS (cfg->cbb, iargs [0]);
11979 NEW_CLASSCONST (cfg, iargs [1], klass);
11980 MONO_ADD_INS (cfg->cbb, iargs [1]);
11981 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11983 inline_costs += 10 * num_calls++;
11986 case CEE_MONO_OBJADDR:
11989 MONO_INST_NEW (cfg, ins, OP_MOVE);
11990 ins->dreg = alloc_ireg_mp (cfg);
11991 ins->sreg1 = sp [0]->dreg;
11992 ins->type = STACK_MP;
11993 MONO_ADD_INS (cfg->cbb, ins);
11997 case CEE_MONO_LDNATIVEOBJ:
11999 * Similar to LDOBJ, but instead load the unmanaged
12000 * representation of the vtype to the stack.
12005 token = read32 (ip + 2);
12006 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12007 g_assert (klass->valuetype);
12008 mono_class_init (klass);
12011 MonoInst *src, *dest, *temp;
12014 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12015 temp->backend.is_pinvoke = 1;
12016 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12017 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12019 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12020 dest->type = STACK_VTYPE;
12021 dest->klass = klass;
12027 case CEE_MONO_RETOBJ: {
12029 * Same as RET, but return the native representation of a vtype
12032 g_assert (cfg->ret);
12033 g_assert (mono_method_signature (method)->pinvoke);
12038 token = read32 (ip + 2);
12039 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12041 if (!cfg->vret_addr) {
12042 g_assert (cfg->ret_var_is_local);
12044 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12046 EMIT_NEW_RETLOADA (cfg, ins);
12048 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12050 if (sp != stack_start)
12053 MONO_INST_NEW (cfg, ins, OP_BR);
12054 ins->inst_target_bb = end_bblock;
12055 MONO_ADD_INS (cfg->cbb, ins);
12056 link_bblock (cfg, cfg->cbb, end_bblock);
12057 start_new_bblock = 1;
12061 case CEE_MONO_SAVE_LMF:
12062 case CEE_MONO_RESTORE_LMF:
12065 case CEE_MONO_CLASSCONST:
12066 CHECK_STACK_OVF (1);
12068 token = read32 (ip + 2);
12069 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12072 inline_costs += 10 * num_calls++;
12074 case CEE_MONO_NOT_TAKEN:
12075 cfg->cbb->out_of_line = TRUE;
12078 case CEE_MONO_TLS: {
12081 CHECK_STACK_OVF (1);
12083 key = (MonoTlsKey)read32 (ip + 2);
12084 g_assert (key < TLS_KEY_NUM);
12086 ins = mono_create_tls_get (cfg, key);
12088 ins->type = STACK_PTR;
12093 case CEE_MONO_DYN_CALL: {
12094 MonoCallInst *call;
12096 /* It would be easier to call a trampoline, but that would put an
12097 * extra frame on the stack, confusing exception handling. So
12098 * implement it inline using an opcode for now.
12101 if (!cfg->dyn_call_var) {
12102 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12103 /* prevent it from being register allocated */
12104 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12107 /* Has to use a call inst since it local regalloc expects it */
12108 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12109 ins = (MonoInst*)call;
12111 ins->sreg1 = sp [0]->dreg;
12112 ins->sreg2 = sp [1]->dreg;
12113 MONO_ADD_INS (cfg->cbb, ins);
12115 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12118 inline_costs += 10 * num_calls++;
12122 case CEE_MONO_MEMORY_BARRIER: {
12124 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12128 case CEE_MONO_ATOMIC_STORE_I4: {
12129 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12135 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12136 ins->dreg = sp [0]->dreg;
12137 ins->sreg1 = sp [1]->dreg;
12138 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12139 MONO_ADD_INS (cfg->cbb, ins);
12144 case CEE_MONO_JIT_ATTACH: {
12145 MonoInst *args [16], *domain_ins;
12146 MonoInst *ad_ins, *jit_tls_ins;
12147 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12149 g_assert (!mono_threads_is_coop_enabled ());
12151 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12153 EMIT_NEW_PCONST (cfg, ins, NULL);
12154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12156 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12157 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12159 if (ad_ins && jit_tls_ins) {
12160 NEW_BBLOCK (cfg, next_bb);
12161 NEW_BBLOCK (cfg, call_bb);
12163 if (cfg->compile_aot) {
12164 /* AOT code is only used in the root domain */
12165 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12167 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12169 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12172 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12176 MONO_START_BB (cfg, call_bb);
12179 /* AOT code is only used in the root domain */
12180 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12181 if (cfg->compile_aot) {
12185 * This is called on unattached threads, so it cannot go through the trampoline
12186 * infrastructure. Use an indirect call through a got slot initialized at load time
12189 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12190 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12192 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12194 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12197 MONO_START_BB (cfg, next_bb);
12202 case CEE_MONO_JIT_DETACH: {
12203 MonoInst *args [16];
12205 /* Restore the original domain */
12206 dreg = alloc_ireg (cfg);
12207 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12208 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12212 case CEE_MONO_CALLI_EXTRA_ARG: {
12214 MonoMethodSignature *fsig;
12218 * This is the same as CEE_CALLI, but passes an additional argument
12219 * to the called method in llvmonly mode.
12220 * This is only used by delegate invoke wrappers to call the
12221 * actual delegate method.
12223 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12226 token = read32 (ip + 2);
12234 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12237 if (cfg->llvm_only)
12238 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12240 n = fsig->param_count + fsig->hasthis + 1;
12247 if (cfg->llvm_only) {
12249 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12250 * cconv. This is set by mono_init_delegate ().
12252 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12253 MonoInst *callee = addr;
12254 MonoInst *call, *localloc_ins;
12255 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12256 int low_bit_reg = alloc_preg (cfg);
12258 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12259 NEW_BBLOCK (cfg, end_bb);
12261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12263 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12265 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12266 addr = emit_get_rgctx_sig (cfg, context_used,
12267 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12269 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12271 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12272 ins->dreg = alloc_preg (cfg);
12273 ins->inst_imm = 2 * SIZEOF_VOID_P;
12274 MONO_ADD_INS (cfg->cbb, ins);
12275 localloc_ins = ins;
12276 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12278 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12280 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12283 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12284 MONO_START_BB (cfg, is_gsharedvt_bb);
12285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12286 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12287 ins->dreg = call->dreg;
12289 MONO_START_BB (cfg, end_bb);
12291 /* Caller uses a normal calling conv */
12293 MonoInst *callee = addr;
12294 MonoInst *call, *localloc_ins;
12295 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12296 int low_bit_reg = alloc_preg (cfg);
12298 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12299 NEW_BBLOCK (cfg, end_bb);
12301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12302 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12303 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12305 /* Normal case: callee uses a normal cconv, no conversion is needed */
12306 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12307 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12308 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12309 MONO_START_BB (cfg, is_gsharedvt_bb);
12310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12311 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12312 MONO_ADD_INS (cfg->cbb, addr);
12314 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12316 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12317 ins->dreg = alloc_preg (cfg);
12318 ins->inst_imm = 2 * SIZEOF_VOID_P;
12319 MONO_ADD_INS (cfg->cbb, ins);
12320 localloc_ins = ins;
12321 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12323 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12325 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12326 ins->dreg = call->dreg;
12327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12329 MONO_START_BB (cfg, end_bb);
12332 /* Same as CEE_CALLI */
12333 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12335 * We pass the address to the gsharedvt trampoline in the rgctx reg
12337 MonoInst *callee = addr;
12339 addr = emit_get_rgctx_sig (cfg, context_used,
12340 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12341 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12343 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12347 if (!MONO_TYPE_IS_VOID (fsig->ret))
12348 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12350 CHECK_CFG_EXCEPTION;
12354 constrained_class = NULL;
12357 case CEE_MONO_LDDOMAIN:
12358 CHECK_STACK_OVF (1);
12359 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12363 case CEE_MONO_GET_LAST_ERROR:
12365 CHECK_STACK_OVF (1);
12367 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12368 ins->dreg = alloc_dreg (cfg, STACK_I4);
12369 ins->type = STACK_I4;
12370 MONO_ADD_INS (cfg->cbb, ins);
12375 case CEE_MONO_GET_RGCTX_ARG:
12377 CHECK_STACK_OVF (1);
12379 mono_create_rgctx_var (cfg);
12381 MONO_INST_NEW (cfg, ins, OP_MOVE);
12382 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12383 ins->sreg1 = cfg->rgctx_var->dreg;
12384 ins->type = STACK_PTR;
12385 MONO_ADD_INS (cfg->cbb, ins);
12391 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12397 case CEE_PREFIX1: {
12400 case CEE_ARGLIST: {
12401 /* somewhat similar to LDTOKEN */
12402 MonoInst *addr, *vtvar;
12403 CHECK_STACK_OVF (1);
12404 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12406 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12407 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12409 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12410 ins->type = STACK_VTYPE;
12411 ins->klass = mono_defaults.argumenthandle_class;
12421 MonoInst *cmp, *arg1, *arg2;
12429 * The following transforms:
12430 * CEE_CEQ into OP_CEQ
12431 * CEE_CGT into OP_CGT
12432 * CEE_CGT_UN into OP_CGT_UN
12433 * CEE_CLT into OP_CLT
12434 * CEE_CLT_UN into OP_CLT_UN
12436 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12438 MONO_INST_NEW (cfg, ins, cmp->opcode);
12439 cmp->sreg1 = arg1->dreg;
12440 cmp->sreg2 = arg2->dreg;
12441 type_from_op (cfg, cmp, arg1, arg2);
12443 add_widen_op (cfg, cmp, &arg1, &arg2);
12444 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12445 cmp->opcode = OP_LCOMPARE;
12446 else if (arg1->type == STACK_R4)
12447 cmp->opcode = OP_RCOMPARE;
12448 else if (arg1->type == STACK_R8)
12449 cmp->opcode = OP_FCOMPARE;
12451 cmp->opcode = OP_ICOMPARE;
12452 MONO_ADD_INS (cfg->cbb, cmp);
12453 ins->type = STACK_I4;
12454 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12455 type_from_op (cfg, ins, arg1, arg2);
12457 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12459 * The backends expect the fceq opcodes to do the
12462 ins->sreg1 = cmp->sreg1;
12463 ins->sreg2 = cmp->sreg2;
12466 MONO_ADD_INS (cfg->cbb, ins);
12472 MonoInst *argconst;
12473 MonoMethod *cil_method;
12475 CHECK_STACK_OVF (1);
12477 n = read32 (ip + 2);
12478 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12481 mono_class_init (cmethod->klass);
12483 mono_save_token_info (cfg, image, n, cmethod);
12485 context_used = mini_method_check_context_used (cfg, cmethod);
12487 cil_method = cmethod;
12488 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12489 emit_method_access_failure (cfg, method, cil_method);
12491 if (mono_security_core_clr_enabled ())
12492 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12495 * Optimize the common case of ldftn+delegate creation
12497 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12498 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12499 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12500 MonoInst *target_ins, *handle_ins;
12501 MonoMethod *invoke;
12502 int invoke_context_used;
12504 invoke = mono_get_delegate_invoke (ctor_method->klass);
12505 if (!invoke || !mono_method_signature (invoke))
12508 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12510 target_ins = sp [-1];
12512 if (mono_security_core_clr_enabled ())
12513 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12515 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12516 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12517 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12519 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12523 /* FIXME: SGEN support */
12524 if (invoke_context_used == 0 || cfg->llvm_only) {
12526 if (cfg->verbose_level > 3)
12527 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12528 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12531 CHECK_CFG_EXCEPTION;
12541 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12542 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12546 inline_costs += 10 * num_calls++;
12549 case CEE_LDVIRTFTN: {
12550 MonoInst *args [2];
12554 n = read32 (ip + 2);
12555 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12558 mono_class_init (cmethod->klass);
12560 context_used = mini_method_check_context_used (cfg, cmethod);
12562 if (mono_security_core_clr_enabled ())
12563 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12566 * Optimize the common case of ldvirtftn+delegate creation
12568 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12569 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12570 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12571 MonoInst *target_ins, *handle_ins;
12572 MonoMethod *invoke;
12573 int invoke_context_used;
12574 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12576 invoke = mono_get_delegate_invoke (ctor_method->klass);
12577 if (!invoke || !mono_method_signature (invoke))
12580 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12582 target_ins = sp [-1];
12584 if (mono_security_core_clr_enabled ())
12585 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12587 /* FIXME: SGEN support */
12588 if (invoke_context_used == 0 || cfg->llvm_only) {
12590 if (cfg->verbose_level > 3)
12591 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12592 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12595 CHECK_CFG_EXCEPTION;
12608 args [1] = emit_get_rgctx_method (cfg, context_used,
12609 cmethod, MONO_RGCTX_INFO_METHOD);
12612 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12614 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12617 inline_costs += 10 * num_calls++;
12621 CHECK_STACK_OVF (1);
12623 n = read16 (ip + 2);
12625 EMIT_NEW_ARGLOAD (cfg, ins, n);
12630 CHECK_STACK_OVF (1);
12632 n = read16 (ip + 2);
12634 NEW_ARGLOADA (cfg, ins, n);
12635 MONO_ADD_INS (cfg->cbb, ins);
12643 n = read16 (ip + 2);
12645 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12647 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12651 CHECK_STACK_OVF (1);
12653 n = read16 (ip + 2);
12655 EMIT_NEW_LOCLOAD (cfg, ins, n);
12660 unsigned char *tmp_ip;
12661 CHECK_STACK_OVF (1);
12663 n = read16 (ip + 2);
12666 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12672 EMIT_NEW_LOCLOADA (cfg, ins, n);
12681 n = read16 (ip + 2);
12683 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12685 emit_stloc_ir (cfg, sp, header, n);
12689 case CEE_LOCALLOC: {
12691 MonoBasicBlock *non_zero_bb, *end_bb;
12692 int alloc_ptr = alloc_preg (cfg);
12694 if (sp != stack_start)
12696 if (cfg->method != method)
12698 * Inlining this into a loop in a parent could lead to
12699 * stack overflows which is different behavior than the
12700 * non-inlined case, thus disable inlining in this case.
12702 INLINE_FAILURE("localloc");
12704 NEW_BBLOCK (cfg, non_zero_bb);
12705 NEW_BBLOCK (cfg, end_bb);
12707 /* if size != zero */
12708 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12711 //size is zero, so result is NULL
12712 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12715 MONO_START_BB (cfg, non_zero_bb);
12716 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12717 ins->dreg = alloc_ptr;
12718 ins->sreg1 = sp [0]->dreg;
12719 ins->type = STACK_PTR;
12720 MONO_ADD_INS (cfg->cbb, ins);
12722 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12724 ins->flags |= MONO_INST_INIT;
12726 MONO_START_BB (cfg, end_bb);
12727 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12728 ins->type = STACK_PTR;
12734 case CEE_ENDFILTER: {
12735 MonoExceptionClause *clause, *nearest;
12740 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12742 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12743 ins->sreg1 = (*sp)->dreg;
12744 MONO_ADD_INS (cfg->cbb, ins);
12745 start_new_bblock = 1;
12749 for (cc = 0; cc < header->num_clauses; ++cc) {
12750 clause = &header->clauses [cc];
12751 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12752 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12753 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12756 g_assert (nearest);
12757 if ((ip - header->code) != nearest->handler_offset)
12762 case CEE_UNALIGNED_:
12763 ins_flag |= MONO_INST_UNALIGNED;
12764 /* FIXME: record alignment? we can assume 1 for now */
12768 case CEE_VOLATILE_:
12769 ins_flag |= MONO_INST_VOLATILE;
12773 ins_flag |= MONO_INST_TAILCALL;
12774 cfg->flags |= MONO_CFG_HAS_TAIL;
12775 /* Can't inline tail calls at this time */
12776 inline_costs += 100000;
12783 token = read32 (ip + 2);
12784 klass = mini_get_class (method, token, generic_context);
12785 CHECK_TYPELOAD (klass);
12786 if (generic_class_is_reference_type (cfg, klass))
12787 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12789 mini_emit_initobj (cfg, *sp, NULL, klass);
12793 case CEE_CONSTRAINED_:
12795 token = read32 (ip + 2);
12796 constrained_class = mini_get_class (method, token, generic_context);
12797 CHECK_TYPELOAD (constrained_class);
12801 case CEE_INITBLK: {
12802 MonoInst *iargs [3];
12806 /* Skip optimized paths for volatile operations. */
12807 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12808 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12809 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12810 /* emit_memset only works when val == 0 */
12811 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12814 iargs [0] = sp [0];
12815 iargs [1] = sp [1];
12816 iargs [2] = sp [2];
12817 if (ip [1] == CEE_CPBLK) {
12819 * FIXME: It's unclear whether we should be emitting both the acquire
12820 * and release barriers for cpblk. It is technically both a load and
12821 * store operation, so it seems like that's the sensible thing to do.
12823 * FIXME: We emit full barriers on both sides of the operation for
12824 * simplicity. We should have a separate atomic memcpy method instead.
12826 MonoMethod *memcpy_method = get_memcpy_method ();
12828 if (ins_flag & MONO_INST_VOLATILE)
12829 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12831 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12832 call->flags |= ins_flag;
12834 if (ins_flag & MONO_INST_VOLATILE)
12835 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12837 MonoMethod *memset_method = get_memset_method ();
12838 if (ins_flag & MONO_INST_VOLATILE) {
12839 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12840 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12842 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12843 call->flags |= ins_flag;
12854 ins_flag |= MONO_INST_NOTYPECHECK;
12856 ins_flag |= MONO_INST_NORANGECHECK;
12857 /* we ignore the no-nullcheck for now since we
12858 * really do it explicitly only when doing callvirt->call
12862 case CEE_RETHROW: {
12864 int handler_offset = -1;
12866 for (i = 0; i < header->num_clauses; ++i) {
12867 MonoExceptionClause *clause = &header->clauses [i];
12868 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12869 handler_offset = clause->handler_offset;
12874 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12876 if (handler_offset == -1)
12879 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12880 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12881 ins->sreg1 = load->dreg;
12882 MONO_ADD_INS (cfg->cbb, ins);
12884 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12885 MONO_ADD_INS (cfg->cbb, ins);
12888 link_bblock (cfg, cfg->cbb, end_bblock);
12889 start_new_bblock = 1;
12897 CHECK_STACK_OVF (1);
12899 token = read32 (ip + 2);
12900 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12901 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12904 val = mono_type_size (type, &ialign);
12906 MonoClass *klass = mini_get_class (method, token, generic_context);
12907 CHECK_TYPELOAD (klass);
12909 val = mono_type_size (&klass->byval_arg, &ialign);
12911 if (mini_is_gsharedvt_klass (klass))
12912 GSHAREDVT_FAILURE (*ip);
12914 EMIT_NEW_ICONST (cfg, ins, val);
12919 case CEE_REFANYTYPE: {
12920 MonoInst *src_var, *src;
12922 GSHAREDVT_FAILURE (*ip);
12928 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12930 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12931 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12932 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12937 case CEE_READONLY_:
12950 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12960 g_warning ("opcode 0x%02x not handled", *ip);
12964 if (start_new_bblock != 1)
12967 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12968 if (cfg->cbb->next_bb) {
12969 /* This could already be set because of inlining, #693905 */
12970 MonoBasicBlock *bb = cfg->cbb;
12972 while (bb->next_bb)
12974 bb->next_bb = end_bblock;
12976 cfg->cbb->next_bb = end_bblock;
12979 if (cfg->method == method && cfg->domainvar) {
12981 MonoInst *get_domain;
12983 cfg->cbb = init_localsbb;
12985 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12986 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12987 MONO_ADD_INS (cfg->cbb, store);
12990 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12991 if (cfg->compile_aot)
12992 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12993 mono_get_got_var (cfg);
12996 if (cfg->method == method && cfg->got_var)
12997 mono_emit_load_got_addr (cfg);
12999 if (init_localsbb) {
13000 cfg->cbb = init_localsbb;
13002 for (i = 0; i < header->num_locals; ++i) {
13003 emit_init_local (cfg, i, header->locals [i], init_locals);
13007 if (cfg->init_ref_vars && cfg->method == method) {
13008 /* Emit initialization for ref vars */
13009 // FIXME: Avoid duplication initialization for IL locals.
13010 for (i = 0; i < cfg->num_varinfo; ++i) {
13011 MonoInst *ins = cfg->varinfo [i];
13013 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13014 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13018 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13019 cfg->cbb = init_localsbb;
13020 emit_push_lmf (cfg);
13023 cfg->cbb = init_localsbb;
13024 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13027 MonoBasicBlock *bb;
13030 * Make seq points at backward branch targets interruptable.
13032 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13033 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13034 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13037 /* Add a sequence point for method entry/exit events */
13038 if (seq_points && cfg->gen_sdb_seq_points) {
13039 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13040 MONO_ADD_INS (init_localsbb, ins);
13041 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13042 MONO_ADD_INS (cfg->bb_exit, ins);
13046 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13047 * the code they refer to was dead (#11880).
13049 if (sym_seq_points) {
13050 for (i = 0; i < header->code_size; ++i) {
13051 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13054 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13055 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13062 if (cfg->method == method) {
13063 MonoBasicBlock *bb;
13064 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13065 if (bb == cfg->bb_init)
13068 bb->region = mono_find_block_region (cfg, bb->real_offset);
13070 mono_create_spvar_for_region (cfg, bb->region);
13071 if (cfg->verbose_level > 2)
13072 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13075 MonoBasicBlock *bb;
13076 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13077 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13078 bb->real_offset = inline_offset;
13082 if (inline_costs < 0) {
13085 /* Method is too large */
13086 mname = mono_method_full_name (method, TRUE);
13087 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13091 if ((cfg->verbose_level > 2) && (cfg->method == method))
13092 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13097 g_assert (!mono_error_ok (&cfg->error));
13101 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13105 set_exception_type_from_invalid_il (cfg, method, ip);
13109 g_slist_free (class_inits);
13110 mono_basic_block_free (original_bb);
13111 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13112 if (cfg->exception_type)
13115 return inline_costs;
13119 store_membase_reg_to_store_membase_imm (int opcode)
13122 case OP_STORE_MEMBASE_REG:
13123 return OP_STORE_MEMBASE_IMM;
13124 case OP_STOREI1_MEMBASE_REG:
13125 return OP_STOREI1_MEMBASE_IMM;
13126 case OP_STOREI2_MEMBASE_REG:
13127 return OP_STOREI2_MEMBASE_IMM;
13128 case OP_STOREI4_MEMBASE_REG:
13129 return OP_STOREI4_MEMBASE_IMM;
13130 case OP_STOREI8_MEMBASE_REG:
13131 return OP_STOREI8_MEMBASE_IMM;
13133 g_assert_not_reached ();
13140 mono_op_to_op_imm (int opcode)
13144 return OP_IADD_IMM;
13146 return OP_ISUB_IMM;
13148 return OP_IDIV_IMM;
13150 return OP_IDIV_UN_IMM;
13152 return OP_IREM_IMM;
13154 return OP_IREM_UN_IMM;
13156 return OP_IMUL_IMM;
13158 return OP_IAND_IMM;
13162 return OP_IXOR_IMM;
13164 return OP_ISHL_IMM;
13166 return OP_ISHR_IMM;
13168 return OP_ISHR_UN_IMM;
13171 return OP_LADD_IMM;
13173 return OP_LSUB_IMM;
13175 return OP_LAND_IMM;
13179 return OP_LXOR_IMM;
13181 return OP_LSHL_IMM;
13183 return OP_LSHR_IMM;
13185 return OP_LSHR_UN_IMM;
13186 #if SIZEOF_REGISTER == 8
13188 return OP_LREM_IMM;
13192 return OP_COMPARE_IMM;
13194 return OP_ICOMPARE_IMM;
13196 return OP_LCOMPARE_IMM;
13198 case OP_STORE_MEMBASE_REG:
13199 return OP_STORE_MEMBASE_IMM;
13200 case OP_STOREI1_MEMBASE_REG:
13201 return OP_STOREI1_MEMBASE_IMM;
13202 case OP_STOREI2_MEMBASE_REG:
13203 return OP_STOREI2_MEMBASE_IMM;
13204 case OP_STOREI4_MEMBASE_REG:
13205 return OP_STOREI4_MEMBASE_IMM;
13207 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13209 return OP_X86_PUSH_IMM;
13210 case OP_X86_COMPARE_MEMBASE_REG:
13211 return OP_X86_COMPARE_MEMBASE_IMM;
13213 #if defined(TARGET_AMD64)
13214 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13215 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13217 case OP_VOIDCALL_REG:
13218 return OP_VOIDCALL;
13226 return OP_LOCALLOC_IMM;
13233 ldind_to_load_membase (int opcode)
13237 return OP_LOADI1_MEMBASE;
13239 return OP_LOADU1_MEMBASE;
13241 return OP_LOADI2_MEMBASE;
13243 return OP_LOADU2_MEMBASE;
13245 return OP_LOADI4_MEMBASE;
13247 return OP_LOADU4_MEMBASE;
13249 return OP_LOAD_MEMBASE;
13250 case CEE_LDIND_REF:
13251 return OP_LOAD_MEMBASE;
13253 return OP_LOADI8_MEMBASE;
13255 return OP_LOADR4_MEMBASE;
13257 return OP_LOADR8_MEMBASE;
13259 g_assert_not_reached ();
13266 stind_to_store_membase (int opcode)
13270 return OP_STOREI1_MEMBASE_REG;
13272 return OP_STOREI2_MEMBASE_REG;
13274 return OP_STOREI4_MEMBASE_REG;
13276 case CEE_STIND_REF:
13277 return OP_STORE_MEMBASE_REG;
13279 return OP_STOREI8_MEMBASE_REG;
13281 return OP_STORER4_MEMBASE_REG;
13283 return OP_STORER8_MEMBASE_REG;
13285 g_assert_not_reached ();
13292 mono_load_membase_to_load_mem (int opcode)
13294 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13295 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13297 case OP_LOAD_MEMBASE:
13298 return OP_LOAD_MEM;
13299 case OP_LOADU1_MEMBASE:
13300 return OP_LOADU1_MEM;
13301 case OP_LOADU2_MEMBASE:
13302 return OP_LOADU2_MEM;
13303 case OP_LOADI4_MEMBASE:
13304 return OP_LOADI4_MEM;
13305 case OP_LOADU4_MEMBASE:
13306 return OP_LOADU4_MEM;
13307 #if SIZEOF_REGISTER == 8
13308 case OP_LOADI8_MEMBASE:
13309 return OP_LOADI8_MEM;
13318 op_to_op_dest_membase (int store_opcode, int opcode)
13320 #if defined(TARGET_X86)
13321 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13326 return OP_X86_ADD_MEMBASE_REG;
13328 return OP_X86_SUB_MEMBASE_REG;
13330 return OP_X86_AND_MEMBASE_REG;
13332 return OP_X86_OR_MEMBASE_REG;
13334 return OP_X86_XOR_MEMBASE_REG;
13337 return OP_X86_ADD_MEMBASE_IMM;
13340 return OP_X86_SUB_MEMBASE_IMM;
13343 return OP_X86_AND_MEMBASE_IMM;
13346 return OP_X86_OR_MEMBASE_IMM;
13349 return OP_X86_XOR_MEMBASE_IMM;
13355 #if defined(TARGET_AMD64)
13356 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13361 return OP_X86_ADD_MEMBASE_REG;
13363 return OP_X86_SUB_MEMBASE_REG;
13365 return OP_X86_AND_MEMBASE_REG;
13367 return OP_X86_OR_MEMBASE_REG;
13369 return OP_X86_XOR_MEMBASE_REG;
13371 return OP_X86_ADD_MEMBASE_IMM;
13373 return OP_X86_SUB_MEMBASE_IMM;
13375 return OP_X86_AND_MEMBASE_IMM;
13377 return OP_X86_OR_MEMBASE_IMM;
13379 return OP_X86_XOR_MEMBASE_IMM;
13381 return OP_AMD64_ADD_MEMBASE_REG;
13383 return OP_AMD64_SUB_MEMBASE_REG;
13385 return OP_AMD64_AND_MEMBASE_REG;
13387 return OP_AMD64_OR_MEMBASE_REG;
13389 return OP_AMD64_XOR_MEMBASE_REG;
13392 return OP_AMD64_ADD_MEMBASE_IMM;
13395 return OP_AMD64_SUB_MEMBASE_IMM;
13398 return OP_AMD64_AND_MEMBASE_IMM;
13401 return OP_AMD64_OR_MEMBASE_IMM;
13404 return OP_AMD64_XOR_MEMBASE_IMM;
13414 op_to_op_store_membase (int store_opcode, int opcode)
13416 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13419 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13420 return OP_X86_SETEQ_MEMBASE;
13422 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13423 return OP_X86_SETNE_MEMBASE;
13431 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13434 /* FIXME: This has sign extension issues */
13436 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13437 return OP_X86_COMPARE_MEMBASE8_IMM;
13440 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13445 return OP_X86_PUSH_MEMBASE;
13446 case OP_COMPARE_IMM:
13447 case OP_ICOMPARE_IMM:
13448 return OP_X86_COMPARE_MEMBASE_IMM;
13451 return OP_X86_COMPARE_MEMBASE_REG;
13455 #ifdef TARGET_AMD64
13456 /* FIXME: This has sign extension issues */
13458 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13459 return OP_X86_COMPARE_MEMBASE8_IMM;
13464 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13465 return OP_X86_PUSH_MEMBASE;
13467 /* FIXME: This only works for 32 bit immediates
13468 case OP_COMPARE_IMM:
13469 case OP_LCOMPARE_IMM:
13470 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13471 return OP_AMD64_COMPARE_MEMBASE_IMM;
13473 case OP_ICOMPARE_IMM:
13474 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13475 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13479 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13480 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13481 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13482 return OP_AMD64_COMPARE_MEMBASE_REG;
13485 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13486 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13495 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13498 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13504 return OP_X86_COMPARE_REG_MEMBASE;
13506 return OP_X86_ADD_REG_MEMBASE;
13508 return OP_X86_SUB_REG_MEMBASE;
13510 return OP_X86_AND_REG_MEMBASE;
13512 return OP_X86_OR_REG_MEMBASE;
13514 return OP_X86_XOR_REG_MEMBASE;
13518 #ifdef TARGET_AMD64
13519 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13522 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13524 return OP_X86_ADD_REG_MEMBASE;
13526 return OP_X86_SUB_REG_MEMBASE;
13528 return OP_X86_AND_REG_MEMBASE;
13530 return OP_X86_OR_REG_MEMBASE;
13532 return OP_X86_XOR_REG_MEMBASE;
13534 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13538 return OP_AMD64_COMPARE_REG_MEMBASE;
13540 return OP_AMD64_ADD_REG_MEMBASE;
13542 return OP_AMD64_SUB_REG_MEMBASE;
13544 return OP_AMD64_AND_REG_MEMBASE;
13546 return OP_AMD64_OR_REG_MEMBASE;
13548 return OP_AMD64_XOR_REG_MEMBASE;
13557 mono_op_to_op_imm_noemul (int opcode)
13560 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13566 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13573 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13578 return mono_op_to_op_imm (opcode);
13583 * mono_handle_global_vregs:
13585 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13589 mono_handle_global_vregs (MonoCompile *cfg)
13591 gint32 *vreg_to_bb;
13592 MonoBasicBlock *bb;
13595 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13597 #ifdef MONO_ARCH_SIMD_INTRINSICS
13598 if (cfg->uses_simd_intrinsics)
13599 mono_simd_simplify_indirection (cfg);
13602 /* Find local vregs used in more than one bb */
13603 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13604 MonoInst *ins = bb->code;
13605 int block_num = bb->block_num;
13607 if (cfg->verbose_level > 2)
13608 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13611 for (; ins; ins = ins->next) {
13612 const char *spec = INS_INFO (ins->opcode);
13613 int regtype = 0, regindex;
13616 if (G_UNLIKELY (cfg->verbose_level > 2))
13617 mono_print_ins (ins);
13619 g_assert (ins->opcode >= MONO_CEE_LAST);
13621 for (regindex = 0; regindex < 4; regindex ++) {
13624 if (regindex == 0) {
13625 regtype = spec [MONO_INST_DEST];
13626 if (regtype == ' ')
13629 } else if (regindex == 1) {
13630 regtype = spec [MONO_INST_SRC1];
13631 if (regtype == ' ')
13634 } else if (regindex == 2) {
13635 regtype = spec [MONO_INST_SRC2];
13636 if (regtype == ' ')
13639 } else if (regindex == 3) {
13640 regtype = spec [MONO_INST_SRC3];
13641 if (regtype == ' ')
13646 #if SIZEOF_REGISTER == 4
13647 /* In the LLVM case, the long opcodes are not decomposed */
13648 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13650 * Since some instructions reference the original long vreg,
13651 * and some reference the two component vregs, it is quite hard
13652 * to determine when it needs to be global. So be conservative.
13654 if (!get_vreg_to_inst (cfg, vreg)) {
13655 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13657 if (cfg->verbose_level > 2)
13658 printf ("LONG VREG R%d made global.\n", vreg);
13662 * Make the component vregs volatile since the optimizations can
13663 * get confused otherwise.
13665 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13666 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13670 g_assert (vreg != -1);
13672 prev_bb = vreg_to_bb [vreg];
13673 if (prev_bb == 0) {
13674 /* 0 is a valid block num */
13675 vreg_to_bb [vreg] = block_num + 1;
13676 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13677 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13680 if (!get_vreg_to_inst (cfg, vreg)) {
13681 if (G_UNLIKELY (cfg->verbose_level > 2))
13682 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13686 if (vreg_is_ref (cfg, vreg))
13687 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13689 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13692 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13695 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13699 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13702 g_assert_not_reached ();
13706 /* Flag as having been used in more than one bb */
13707 vreg_to_bb [vreg] = -1;
13713 /* If a variable is used in only one bblock, convert it into a local vreg */
13714 for (i = 0; i < cfg->num_varinfo; i++) {
13715 MonoInst *var = cfg->varinfo [i];
13716 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13718 switch (var->type) {
13724 #if SIZEOF_REGISTER == 8
13727 #if !defined(TARGET_X86)
13728 /* Enabling this screws up the fp stack on x86 */
13731 if (mono_arch_is_soft_float ())
13735 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13739 /* Arguments are implicitly global */
13740 /* Putting R4 vars into registers doesn't work currently */
13741 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13742 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13744 * Make that the variable's liveness interval doesn't contain a call, since
13745 * that would cause the lvreg to be spilled, making the whole optimization
13748 /* This is too slow for JIT compilation */
13750 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13752 int def_index, call_index, ins_index;
13753 gboolean spilled = FALSE;
13758 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13759 const char *spec = INS_INFO (ins->opcode);
13761 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13762 def_index = ins_index;
13764 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13765 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13766 if (call_index > def_index) {
13772 if (MONO_IS_CALL (ins))
13773 call_index = ins_index;
13783 if (G_UNLIKELY (cfg->verbose_level > 2))
13784 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13785 var->flags |= MONO_INST_IS_DEAD;
13786 cfg->vreg_to_inst [var->dreg] = NULL;
13793 * Compress the varinfo and vars tables so the liveness computation is faster and
13794 * takes up less space.
13797 for (i = 0; i < cfg->num_varinfo; ++i) {
13798 MonoInst *var = cfg->varinfo [i];
13799 if (pos < i && cfg->locals_start == i)
13800 cfg->locals_start = pos;
13801 if (!(var->flags & MONO_INST_IS_DEAD)) {
13803 cfg->varinfo [pos] = cfg->varinfo [i];
13804 cfg->varinfo [pos]->inst_c0 = pos;
13805 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13806 cfg->vars [pos].idx = pos;
13807 #if SIZEOF_REGISTER == 4
13808 if (cfg->varinfo [pos]->type == STACK_I8) {
13809 /* Modify the two component vars too */
13812 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13813 var1->inst_c0 = pos;
13814 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13815 var1->inst_c0 = pos;
13822 cfg->num_varinfo = pos;
13823 if (cfg->locals_start > cfg->num_varinfo)
13824 cfg->locals_start = cfg->num_varinfo;
13828 * mono_allocate_gsharedvt_vars:
13830 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13831 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13834 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13838 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13840 for (i = 0; i < cfg->num_varinfo; ++i) {
13841 MonoInst *ins = cfg->varinfo [i];
13844 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13845 if (i >= cfg->locals_start) {
13847 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13848 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13849 ins->opcode = OP_GSHAREDVT_LOCAL;
13850 ins->inst_imm = idx;
13853 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13854 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13861 * mono_spill_global_vars:
13863 * Generate spill code for variables which are not allocated to registers,
13864 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13865 * code is generated which could be optimized by the local optimization passes.
13868 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13870 MonoBasicBlock *bb;
13872 int orig_next_vreg;
13873 guint32 *vreg_to_lvreg;
13875 guint32 i, lvregs_len, lvregs_size;
13876 gboolean dest_has_lvreg = FALSE;
13877 MonoStackType stacktypes [128];
13878 MonoInst **live_range_start, **live_range_end;
13879 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13881 *need_local_opts = FALSE;
13883 memset (spec2, 0, sizeof (spec2));
13885 /* FIXME: Move this function to mini.c */
13886 stacktypes ['i'] = STACK_PTR;
13887 stacktypes ['l'] = STACK_I8;
13888 stacktypes ['f'] = STACK_R8;
13889 #ifdef MONO_ARCH_SIMD_INTRINSICS
13890 stacktypes ['x'] = STACK_VTYPE;
13893 #if SIZEOF_REGISTER == 4
13894 /* Create MonoInsts for longs */
13895 for (i = 0; i < cfg->num_varinfo; i++) {
13896 MonoInst *ins = cfg->varinfo [i];
13898 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13899 switch (ins->type) {
13904 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13907 g_assert (ins->opcode == OP_REGOFFSET);
13909 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13911 tree->opcode = OP_REGOFFSET;
13912 tree->inst_basereg = ins->inst_basereg;
13913 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13915 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13917 tree->opcode = OP_REGOFFSET;
13918 tree->inst_basereg = ins->inst_basereg;
13919 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13929 if (cfg->compute_gc_maps) {
13930 /* registers need liveness info even for !non refs */
13931 for (i = 0; i < cfg->num_varinfo; i++) {
13932 MonoInst *ins = cfg->varinfo [i];
13934 if (ins->opcode == OP_REGVAR)
13935 ins->flags |= MONO_INST_GC_TRACK;
13939 /* FIXME: widening and truncation */
13942 * As an optimization, when a variable allocated to the stack is first loaded into
13943 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13944 * the variable again.
13946 orig_next_vreg = cfg->next_vreg;
13947 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13948 lvregs_size = 1024;
13949 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13953 * These arrays contain the first and last instructions accessing a given
13955 * Since we emit bblocks in the same order we process them here, and we
13956 * don't split live ranges, these will precisely describe the live range of
13957 * the variable, i.e. the instruction range where a valid value can be found
13958 * in the variables location.
13959 * The live range is computed using the liveness info computed by the liveness pass.
13960 * We can't use vmv->range, since that is an abstract live range, and we need
13961 * one which is instruction precise.
13962 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13964 /* FIXME: Only do this if debugging info is requested */
13965 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13966 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13967 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13968 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13970 /* Add spill loads/stores */
13971 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13974 if (cfg->verbose_level > 2)
13975 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13977 /* Clear vreg_to_lvreg array */
13978 for (i = 0; i < lvregs_len; i++)
13979 vreg_to_lvreg [lvregs [i]] = 0;
13983 MONO_BB_FOR_EACH_INS (bb, ins) {
13984 const char *spec = INS_INFO (ins->opcode);
13985 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13986 gboolean store, no_lvreg;
13987 int sregs [MONO_MAX_SRC_REGS];
13989 if (G_UNLIKELY (cfg->verbose_level > 2))
13990 mono_print_ins (ins);
13992 if (ins->opcode == OP_NOP)
13996 * We handle LDADDR here as well, since it can only be decomposed
13997 * when variable addresses are known.
13999 if (ins->opcode == OP_LDADDR) {
14000 MonoInst *var = (MonoInst *)ins->inst_p0;
14002 if (var->opcode == OP_VTARG_ADDR) {
14003 /* Happens on SPARC/S390 where vtypes are passed by reference */
14004 MonoInst *vtaddr = var->inst_left;
14005 if (vtaddr->opcode == OP_REGVAR) {
14006 ins->opcode = OP_MOVE;
14007 ins->sreg1 = vtaddr->dreg;
14009 else if (var->inst_left->opcode == OP_REGOFFSET) {
14010 ins->opcode = OP_LOAD_MEMBASE;
14011 ins->inst_basereg = vtaddr->inst_basereg;
14012 ins->inst_offset = vtaddr->inst_offset;
14015 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14016 /* gsharedvt arg passed by ref */
14017 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14019 ins->opcode = OP_LOAD_MEMBASE;
14020 ins->inst_basereg = var->inst_basereg;
14021 ins->inst_offset = var->inst_offset;
14022 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14023 MonoInst *load, *load2, *load3;
14024 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14025 int reg1, reg2, reg3;
14026 MonoInst *info_var = cfg->gsharedvt_info_var;
14027 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14031 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14034 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14036 g_assert (info_var);
14037 g_assert (locals_var);
14039 /* Mark the instruction used to compute the locals var as used */
14040 cfg->gsharedvt_locals_var_ins = NULL;
14042 /* Load the offset */
14043 if (info_var->opcode == OP_REGOFFSET) {
14044 reg1 = alloc_ireg (cfg);
14045 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14046 } else if (info_var->opcode == OP_REGVAR) {
14048 reg1 = info_var->dreg;
14050 g_assert_not_reached ();
14052 reg2 = alloc_ireg (cfg);
14053 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14054 /* Load the locals area address */
14055 reg3 = alloc_ireg (cfg);
14056 if (locals_var->opcode == OP_REGOFFSET) {
14057 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14058 } else if (locals_var->opcode == OP_REGVAR) {
14059 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14061 g_assert_not_reached ();
14063 /* Compute the address */
14064 ins->opcode = OP_PADD;
14068 mono_bblock_insert_before_ins (bb, ins, load3);
14069 mono_bblock_insert_before_ins (bb, load3, load2);
14071 mono_bblock_insert_before_ins (bb, load2, load);
14073 g_assert (var->opcode == OP_REGOFFSET);
14075 ins->opcode = OP_ADD_IMM;
14076 ins->sreg1 = var->inst_basereg;
14077 ins->inst_imm = var->inst_offset;
14080 *need_local_opts = TRUE;
14081 spec = INS_INFO (ins->opcode);
14084 if (ins->opcode < MONO_CEE_LAST) {
14085 mono_print_ins (ins);
14086 g_assert_not_reached ();
14090 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14094 if (MONO_IS_STORE_MEMBASE (ins)) {
14095 tmp_reg = ins->dreg;
14096 ins->dreg = ins->sreg2;
14097 ins->sreg2 = tmp_reg;
14100 spec2 [MONO_INST_DEST] = ' ';
14101 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14102 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14103 spec2 [MONO_INST_SRC3] = ' ';
14105 } else if (MONO_IS_STORE_MEMINDEX (ins))
14106 g_assert_not_reached ();
14111 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14112 printf ("\t %.3s %d", spec, ins->dreg);
14113 num_sregs = mono_inst_get_src_registers (ins, sregs);
14114 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14115 printf (" %d", sregs [srcindex]);
14122 regtype = spec [MONO_INST_DEST];
14123 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14126 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14127 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14128 MonoInst *store_ins;
14130 MonoInst *def_ins = ins;
14131 int dreg = ins->dreg; /* The original vreg */
14133 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14135 if (var->opcode == OP_REGVAR) {
14136 ins->dreg = var->dreg;
14137 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14139 * Instead of emitting a load+store, use a _membase opcode.
14141 g_assert (var->opcode == OP_REGOFFSET);
14142 if (ins->opcode == OP_MOVE) {
14146 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14147 ins->inst_basereg = var->inst_basereg;
14148 ins->inst_offset = var->inst_offset;
14151 spec = INS_INFO (ins->opcode);
14155 g_assert (var->opcode == OP_REGOFFSET);
14157 prev_dreg = ins->dreg;
14159 /* Invalidate any previous lvreg for this vreg */
14160 vreg_to_lvreg [ins->dreg] = 0;
14164 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14166 store_opcode = OP_STOREI8_MEMBASE_REG;
14169 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14171 #if SIZEOF_REGISTER != 8
14172 if (regtype == 'l') {
14173 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14174 mono_bblock_insert_after_ins (bb, ins, store_ins);
14175 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14176 mono_bblock_insert_after_ins (bb, ins, store_ins);
14177 def_ins = store_ins;
14182 g_assert (store_opcode != OP_STOREV_MEMBASE);
14184 /* Try to fuse the store into the instruction itself */
14185 /* FIXME: Add more instructions */
14186 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14187 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14188 ins->inst_imm = ins->inst_c0;
14189 ins->inst_destbasereg = var->inst_basereg;
14190 ins->inst_offset = var->inst_offset;
14191 spec = INS_INFO (ins->opcode);
14192 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14193 ins->opcode = store_opcode;
14194 ins->inst_destbasereg = var->inst_basereg;
14195 ins->inst_offset = var->inst_offset;
14199 tmp_reg = ins->dreg;
14200 ins->dreg = ins->sreg2;
14201 ins->sreg2 = tmp_reg;
14204 spec2 [MONO_INST_DEST] = ' ';
14205 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14206 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14207 spec2 [MONO_INST_SRC3] = ' ';
14209 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14210 // FIXME: The backends expect the base reg to be in inst_basereg
14211 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14213 ins->inst_basereg = var->inst_basereg;
14214 ins->inst_offset = var->inst_offset;
14215 spec = INS_INFO (ins->opcode);
14217 /* printf ("INS: "); mono_print_ins (ins); */
14218 /* Create a store instruction */
14219 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14221 /* Insert it after the instruction */
14222 mono_bblock_insert_after_ins (bb, ins, store_ins);
14224 def_ins = store_ins;
14227 * We can't assign ins->dreg to var->dreg here, since the
14228 * sregs could use it. So set a flag, and do it after
14231 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14232 dest_has_lvreg = TRUE;
14237 if (def_ins && !live_range_start [dreg]) {
14238 live_range_start [dreg] = def_ins;
14239 live_range_start_bb [dreg] = bb;
14242 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14245 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14246 tmp->inst_c1 = dreg;
14247 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14254 num_sregs = mono_inst_get_src_registers (ins, sregs);
14255 for (srcindex = 0; srcindex < 3; ++srcindex) {
14256 regtype = spec [MONO_INST_SRC1 + srcindex];
14257 sreg = sregs [srcindex];
14259 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14260 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14261 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14262 MonoInst *use_ins = ins;
14263 MonoInst *load_ins;
14264 guint32 load_opcode;
14266 if (var->opcode == OP_REGVAR) {
14267 sregs [srcindex] = var->dreg;
14268 //mono_inst_set_src_registers (ins, sregs);
14269 live_range_end [sreg] = use_ins;
14270 live_range_end_bb [sreg] = bb;
14272 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14275 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14276 /* var->dreg is a hreg */
14277 tmp->inst_c1 = sreg;
14278 mono_bblock_insert_after_ins (bb, ins, tmp);
14284 g_assert (var->opcode == OP_REGOFFSET);
14286 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14288 g_assert (load_opcode != OP_LOADV_MEMBASE);
14290 if (vreg_to_lvreg [sreg]) {
14291 g_assert (vreg_to_lvreg [sreg] != -1);
14293 /* The variable is already loaded to an lvreg */
14294 if (G_UNLIKELY (cfg->verbose_level > 2))
14295 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14296 sregs [srcindex] = vreg_to_lvreg [sreg];
14297 //mono_inst_set_src_registers (ins, sregs);
14301 /* Try to fuse the load into the instruction */
14302 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14303 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14304 sregs [0] = var->inst_basereg;
14305 //mono_inst_set_src_registers (ins, sregs);
14306 ins->inst_offset = var->inst_offset;
14307 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14308 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14309 sregs [1] = var->inst_basereg;
14310 //mono_inst_set_src_registers (ins, sregs);
14311 ins->inst_offset = var->inst_offset;
14313 if (MONO_IS_REAL_MOVE (ins)) {
14314 ins->opcode = OP_NOP;
14317 //printf ("%d ", srcindex); mono_print_ins (ins);
14319 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14321 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14322 if (var->dreg == prev_dreg) {
14324 * sreg refers to the value loaded by the load
14325 * emitted below, but we need to use ins->dreg
14326 * since it refers to the store emitted earlier.
14330 g_assert (sreg != -1);
14331 vreg_to_lvreg [var->dreg] = sreg;
14332 if (lvregs_len >= lvregs_size) {
14333 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14334 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14335 lvregs = new_lvregs;
14338 lvregs [lvregs_len ++] = var->dreg;
14342 sregs [srcindex] = sreg;
14343 //mono_inst_set_src_registers (ins, sregs);
14345 #if SIZEOF_REGISTER != 8
14346 if (regtype == 'l') {
14347 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14348 mono_bblock_insert_before_ins (bb, ins, load_ins);
14349 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14350 mono_bblock_insert_before_ins (bb, ins, load_ins);
14351 use_ins = load_ins;
14356 #if SIZEOF_REGISTER == 4
14357 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14359 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14360 mono_bblock_insert_before_ins (bb, ins, load_ins);
14361 use_ins = load_ins;
14365 if (var->dreg < orig_next_vreg) {
14366 live_range_end [var->dreg] = use_ins;
14367 live_range_end_bb [var->dreg] = bb;
14370 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14373 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14374 tmp->inst_c1 = var->dreg;
14375 mono_bblock_insert_after_ins (bb, ins, tmp);
14379 mono_inst_set_src_registers (ins, sregs);
14381 if (dest_has_lvreg) {
14382 g_assert (ins->dreg != -1);
14383 vreg_to_lvreg [prev_dreg] = ins->dreg;
14384 if (lvregs_len >= lvregs_size) {
14385 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14386 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14387 lvregs = new_lvregs;
14390 lvregs [lvregs_len ++] = prev_dreg;
14391 dest_has_lvreg = FALSE;
14395 tmp_reg = ins->dreg;
14396 ins->dreg = ins->sreg2;
14397 ins->sreg2 = tmp_reg;
14400 if (MONO_IS_CALL (ins)) {
14401 /* Clear vreg_to_lvreg array */
14402 for (i = 0; i < lvregs_len; i++)
14403 vreg_to_lvreg [lvregs [i]] = 0;
14405 } else if (ins->opcode == OP_NOP) {
14407 MONO_INST_NULLIFY_SREGS (ins);
14410 if (cfg->verbose_level > 2)
14411 mono_print_ins_index (1, ins);
14414 /* Extend the live range based on the liveness info */
14415 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14416 for (i = 0; i < cfg->num_varinfo; i ++) {
14417 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14419 if (vreg_is_volatile (cfg, vi->vreg))
14420 /* The liveness info is incomplete */
14423 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14424 /* Live from at least the first ins of this bb */
14425 live_range_start [vi->vreg] = bb->code;
14426 live_range_start_bb [vi->vreg] = bb;
14429 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14430 /* Live at least until the last ins of this bb */
14431 live_range_end [vi->vreg] = bb->last_ins;
14432 live_range_end_bb [vi->vreg] = bb;
14439 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14440 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14442 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14443 for (i = 0; i < cfg->num_varinfo; ++i) {
14444 int vreg = MONO_VARINFO (cfg, i)->vreg;
14447 if (live_range_start [vreg]) {
14448 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14450 ins->inst_c1 = vreg;
14451 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14453 if (live_range_end [vreg]) {
14454 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14456 ins->inst_c1 = vreg;
14457 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14458 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14460 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14465 if (cfg->gsharedvt_locals_var_ins) {
14466 /* Nullify if unused */
14467 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14468 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14471 g_free (live_range_start);
14472 g_free (live_range_end);
14473 g_free (live_range_start_bb);
14474 g_free (live_range_end_bb);
14480 * - use 'iadd' instead of 'int_add'
14481 * - handling ovf opcodes: decompose in method_to_ir.
14482 * - unify iregs/fregs
14483 * -> partly done, the missing parts are:
14484 * - a more complete unification would involve unifying the hregs as well, so
14485 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14486 * would no longer map to the machine hregs, so the code generators would need to
14487 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14488 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14489 * fp/non-fp branches speeds it up by about 15%.
14490 * - use sext/zext opcodes instead of shifts
14492 * - get rid of TEMPLOADs if possible and use vregs instead
14493 * - clean up usage of OP_P/OP_ opcodes
14494 * - cleanup usage of DUMMY_USE
14495 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14497 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14498 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14499 * - make sure handle_stack_args () is called before the branch is emitted
14500 * - when the new IR is done, get rid of all unused stuff
14501 * - COMPARE/BEQ as separate instructions or unify them ?
14502 * - keeping them separate allows specialized compare instructions like
14503 * compare_imm, compare_membase
14504 * - most back ends unify fp compare+branch, fp compare+ceq
14505 * - integrate mono_save_args into inline_method
14506 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14507 * - handle long shift opts on 32 bit platforms somehow: they require
14508 * 3 sregs (2 for arg1 and 1 for arg2)
14509 * - make byref a 'normal' type.
14510 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14511 * variable if needed.
14512 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14513 * like inline_method.
14514 * - remove inlining restrictions
14515 * - fix LNEG and enable cfold of INEG
14516 * - generalize x86 optimizations like ldelema as a peephole optimization
14517 * - add store_mem_imm for amd64
14518 * - optimize the loading of the interruption flag in the managed->native wrappers
14519 * - avoid special handling of OP_NOP in passes
14520 * - move code inserting instructions into one function/macro.
14521 * - try a coalescing phase after liveness analysis
14522 * - add float -> vreg conversion + local optimizations on !x86
14523 * - figure out how to handle decomposed branches during optimizations, ie.
14524 * compare+branch, op_jump_table+op_br etc.
14525 * - promote RuntimeXHandles to vregs
14526 * - vtype cleanups:
14527 * - add a NEW_VARLOADA_VREG macro
14528 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14529 * accessing vtype fields.
14530 * - get rid of I8CONST on 64 bit platforms
14531 * - dealing with the increase in code size due to branches created during opcode
14533 * - use extended basic blocks
14534 * - all parts of the JIT
14535 * - handle_global_vregs () && local regalloc
14536 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14537 * - sources of increase in code size:
14540 * - isinst and castclass
14541 * - lvregs not allocated to global registers even if used multiple times
14542 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14544 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14545 * - add all micro optimizations from the old JIT
14546 * - put tree optimizations into the deadce pass
14547 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14548 * specific function.
14549 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14550 * fcompare + branchCC.
14551 * - create a helper function for allocating a stack slot, taking into account
14552 * MONO_CFG_HAS_SPILLUP.
14554 * - merge the ia64 switch changes.
14555 * - optimize mono_regstate2_alloc_int/float.
14556 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14557 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14558 * parts of the tree could be separated by other instructions, killing the tree
14559 * arguments, or stores killing loads etc. Also, should we fold loads into other
14560 * instructions if the result of the load is used multiple times ?
14561 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14562 * - LAST MERGE: 108395.
14563 * - when returning vtypes in registers, generate IR and append it to the end of the
14564 * last bb instead of doing it in the epilog.
14565 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14573 - When to decompose opcodes:
14574 - earlier: this makes some optimizations hard to implement, since the low level IR
14575 no longer contains the neccessary information. But it is easier to do.
14576 - later: harder to implement, enables more optimizations.
14577 - Branches inside bblocks:
14578 - created when decomposing complex opcodes.
14579 - branches to another bblock: harmless, but not tracked by the branch
14580 optimizations, so need to branch to a label at the start of the bblock.
14581 - branches to inside the same bblock: very problematic, trips up the local
14582 reg allocator. Can be fixed by spitting the current bblock, but that is a
14583 complex operation, since some local vregs can become global vregs etc.
14584 - Local/global vregs:
14585 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14586 local register allocator.
14587 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14588 structure, created by mono_create_var (). Assigned to hregs or the stack by
14589 the global register allocator.
14590 - When to do optimizations like alu->alu_imm:
14591 - earlier -> saves work later on since the IR will be smaller/simpler
14592 - later -> can work on more instructions
14593 - Handling of valuetypes:
14594 - When a vtype is pushed on the stack, a new temporary is created, an
14595 instruction computing its address (LDADDR) is emitted and pushed on
14596 the stack. Need to optimize cases when the vtype is used immediately as in
14597 argument passing, stloc etc.
14598 - Instead of the to_end stuff in the old JIT, simply call the function handling
14599 the values on the stack before emitting the last instruction of the bb.
14602 #else /* !DISABLE_JIT */
14605 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14609 #endif /* !DISABLE_JIT */