3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 inline static MonoInst*
152 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
158 static MonoMethodSignature *helper_sig_jit_thread_attach;
159 static MonoMethodSignature *helper_sig_get_tls_tramp;
160 static MonoMethodSignature *helper_sig_set_tls_tramp;
162 /* type loading helpers */
163 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
164 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
167 * Instruction metadata
175 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
176 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
182 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 /* keep in sync with the enum in mini.h */
190 #include "mini-ops.h"
195 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
196 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
198 * This should contain the index of the last sreg + 1. This is not the same
199 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
201 const gint8 ins_sreg_counts[] = {
202 #include "mini-ops.h"
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 type = &type->data.generic_class->container_class->byval_arg;
332 g_assert (cfg->gshared);
333 if (mini_type_var_is_vt (type))
336 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
344 mono_print_bb (MonoBasicBlock *bb, const char *msg)
348 GString *str = g_string_new ("");
350 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 g_string_append_printf (str, ", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 g_string_append_printf (str, " ]\n");
358 g_print ("%s", str->str);
359 g_string_free (str, TRUE);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
372 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
373 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
376 static MONO_NEVER_INLINE void
377 break_on_unverified (void)
379 if (mini_get_debug_options ()->break_on_unverified)
383 static MONO_NEVER_INLINE void
384 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
386 char *method_fname = mono_method_full_name (method, TRUE);
387 char *field_fname = mono_field_full_name (field);
388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
389 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
390 g_free (method_fname);
391 g_free (field_fname);
394 static MONO_NEVER_INLINE void
395 inline_failure (MonoCompile *cfg, const char *msg)
397 if (cfg->verbose_level >= 2)
398 printf ("inline failed: %s\n", msg);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
402 static MONO_NEVER_INLINE void
403 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 if (cfg->verbose_level > 2) \
406 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
410 static MONO_NEVER_INLINE void
411 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
413 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
414 if (cfg->verbose_level >= 2)
415 printf ("%s\n", cfg->exception_message);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
420 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
421 * foo<T> (int i) { ldarg.0; box T; }
423 #define UNVERIFIED do { \
424 if (cfg->gsharedvt) { \
425 if (cfg->verbose_level > 2) \
426 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
428 goto exception_exit; \
430 break_on_unverified (); \
434 #define GET_BBLOCK(cfg,tblock,ip) do { \
435 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
437 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
438 NEW_BBLOCK (cfg, (tblock)); \
439 (tblock)->cil_code = (ip); \
440 ADD_BBLOCK (cfg, (tblock)); \
444 #if defined(TARGET_X86) || defined(TARGET_AMD64)
445 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
446 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
447 (dest)->dreg = alloc_ireg_mp ((cfg)); \
448 (dest)->sreg1 = (sr1); \
449 (dest)->sreg2 = (sr2); \
450 (dest)->inst_imm = (imm); \
451 (dest)->backend.shift_amount = (shift); \
452 MONO_ADD_INS ((cfg)->cbb, (dest)); \
456 /* Emit conversions so both operands of a binary opcode are of the same type */
458 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
460 MonoInst *arg1 = *arg1_ref;
461 MonoInst *arg2 = *arg2_ref;
464 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
465 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
468 /* Mixing r4/r8 is allowed by the spec */
469 if (arg1->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
473 conv->type = STACK_R8;
477 if (arg2->type == STACK_R4) {
478 int dreg = alloc_freg (cfg);
480 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
481 conv->type = STACK_R8;
487 #if SIZEOF_REGISTER == 8
488 /* FIXME: Need to add many more cases */
489 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
492 int dr = alloc_preg (cfg);
493 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
494 (ins)->sreg2 = widen->dreg;
499 #define ADD_BINOP(op) do { \
500 MONO_INST_NEW (cfg, ins, (op)); \
502 ins->sreg1 = sp [0]->dreg; \
503 ins->sreg2 = sp [1]->dreg; \
504 type_from_op (cfg, ins, sp [0], sp [1]); \
506 /* Have to insert a widening op */ \
507 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
508 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
509 MONO_ADD_INS ((cfg)->cbb, (ins)); \
510 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
513 #define ADD_UNOP(op) do { \
514 MONO_INST_NEW (cfg, ins, (op)); \
516 ins->sreg1 = sp [0]->dreg; \
517 type_from_op (cfg, ins, sp [0], NULL); \
519 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
520 MONO_ADD_INS ((cfg)->cbb, (ins)); \
521 *sp++ = mono_decompose_opcode (cfg, ins); \
524 #define ADD_BINCOND(next_block) do { \
527 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
528 cmp->sreg1 = sp [0]->dreg; \
529 cmp->sreg2 = sp [1]->dreg; \
530 type_from_op (cfg, cmp, sp [0], sp [1]); \
532 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
533 type_from_op (cfg, ins, sp [0], sp [1]); \
534 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
535 GET_BBLOCK (cfg, tblock, target); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_true_bb = tblock; \
538 if ((next_block)) { \
539 link_bblock (cfg, cfg->cbb, (next_block)); \
540 ins->inst_false_bb = (next_block); \
541 start_new_bblock = 1; \
543 GET_BBLOCK (cfg, tblock, ip); \
544 link_bblock (cfg, cfg->cbb, tblock); \
545 ins->inst_false_bb = tblock; \
546 start_new_bblock = 2; \
548 if (sp != stack_start) { \
549 handle_stack_args (cfg, stack_start, sp - stack_start); \
550 CHECK_UNVERIFIABLE (cfg); \
552 MONO_ADD_INS (cfg->cbb, cmp); \
553 MONO_ADD_INS (cfg->cbb, ins); \
557 * link_bblock: Links two basic blocks
559 * links two basic blocks in the control flow graph, the 'from'
560 * argument is the starting block and the 'to' argument is the block
561 * the control flow ends to after 'from'.
564 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
566 MonoBasicBlock **newa;
570 if (from->cil_code) {
572 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
574 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
577 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
579 printf ("edge from entry to exit\n");
584 for (i = 0; i < from->out_count; ++i) {
585 if (to == from->out_bb [i]) {
591 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
592 for (i = 0; i < from->out_count; ++i) {
593 newa [i] = from->out_bb [i];
601 for (i = 0; i < to->in_count; ++i) {
602 if (from == to->in_bb [i]) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
609 for (i = 0; i < to->in_count; ++i) {
610 newa [i] = to->in_bb [i];
619 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
621 link_bblock (cfg, from, to);
625 * mono_find_block_region:
627 * We mark each basic block with a region ID. We use that to avoid BB
628 * optimizations when blocks are in different regions.
631 * A region token that encodes where this region is, and information
632 * about the clause owner for this block.
634 * The region encodes the try/catch/filter clause that owns this block
635 * as well as the type. -1 is a special value that represents a block
636 * that is in none of try/catch/filter.
639 mono_find_block_region (MonoCompile *cfg, int offset)
641 MonoMethodHeader *header = cfg->header;
642 MonoExceptionClause *clause;
645 for (i = 0; i < header->num_clauses; ++i) {
646 clause = &header->clauses [i];
647 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
648 (offset < (clause->handler_offset)))
649 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
651 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
652 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
653 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
654 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
655 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
657 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
660 for (i = 0; i < header->num_clauses; ++i) {
661 clause = &header->clauses [i];
663 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
664 return ((i + 1) << 8) | clause->flags;
671 ip_in_finally_clause (MonoCompile *cfg, int offset)
673 MonoMethodHeader *header = cfg->header;
674 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
682 if (MONO_OFFSET_IN_HANDLER (clause, offset))
689 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
691 MonoMethodHeader *header = cfg->header;
692 MonoExceptionClause *clause;
696 for (i = 0; i < header->num_clauses; ++i) {
697 clause = &header->clauses [i];
698 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
699 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
700 if (clause->flags == type)
701 res = g_list_append (res, clause);
708 mono_create_spvar_for_region (MonoCompile *cfg, int region)
712 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
717 /* prevent it from being register allocated */
718 var->flags |= MONO_INST_VOLATILE;
720 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
724 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
726 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
730 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
734 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
738 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
739 /* prevent it from being register allocated */
740 var->flags |= MONO_INST_VOLATILE;
742 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
748 * Returns the type used in the eval stack when @type is loaded.
749 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
752 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
756 type = mini_get_underlying_type (type);
757 inst->klass = klass = mono_class_from_mono_type (type);
759 inst->type = STACK_MP;
764 switch (type->type) {
766 inst->type = STACK_INV;
774 inst->type = STACK_I4;
779 case MONO_TYPE_FNPTR:
780 inst->type = STACK_PTR;
782 case MONO_TYPE_CLASS:
783 case MONO_TYPE_STRING:
784 case MONO_TYPE_OBJECT:
785 case MONO_TYPE_SZARRAY:
786 case MONO_TYPE_ARRAY:
787 inst->type = STACK_OBJ;
791 inst->type = STACK_I8;
794 inst->type = cfg->r4_stack_type;
797 inst->type = STACK_R8;
799 case MONO_TYPE_VALUETYPE:
800 if (type->data.klass->enumtype) {
801 type = mono_class_enum_basetype (type->data.klass);
805 inst->type = STACK_VTYPE;
808 case MONO_TYPE_TYPEDBYREF:
809 inst->klass = mono_defaults.typed_reference_class;
810 inst->type = STACK_VTYPE;
812 case MONO_TYPE_GENERICINST:
813 type = &type->data.generic_class->container_class->byval_arg;
817 g_assert (cfg->gshared);
818 if (mini_is_gsharedvt_type (type)) {
819 g_assert (cfg->gsharedvt);
820 inst->type = STACK_VTYPE;
822 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
826 g_error ("unknown type 0x%02x in eval stack type", type->type);
831 * The following tables are used to quickly validate the IL code in type_from_op ().
834 bin_num_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
840 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
848 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
851 /* reduce the size of this table */
853 bin_int_table [STACK_MAX] [STACK_MAX] = {
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
865 bin_comp_table [STACK_MAX] [STACK_MAX] = {
866 /* Inv i L p F & O vt r4 */
868 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
869 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
870 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
871 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
872 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
873 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
874 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
875 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
878 /* reduce the size of this table */
880 shift_table [STACK_MAX] [STACK_MAX] = {
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
892 * Tables to map from the non-specific opcode to the matching
893 * type-specific opcode.
895 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
897 binops_op_map [STACK_MAX] = {
898 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
901 /* handles from CEE_NEG to CEE_CONV_U8 */
903 unops_op_map [STACK_MAX] = {
904 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
907 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
909 ovfops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
913 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
915 ovf2ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
919 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
921 ovf3ops_op_map [STACK_MAX] = {
922 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
925 /* handles from CEE_BEQ to CEE_BLT_UN */
927 beqops_op_map [STACK_MAX] = {
928 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
931 /* handles from CEE_CEQ to CEE_CLT_UN */
933 ceqops_op_map [STACK_MAX] = {
934 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
938 * Sets ins->type (the type on the eval stack) according to the
939 * type of the opcode and the arguments to it.
940 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
942 * FIXME: this function sets ins->type unconditionally in some cases, but
943 * it should set it to invalid for some types (a conv.x on an object)
946 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
948 switch (ins->opcode) {
955 /* FIXME: check unverifiable args for STACK_MP */
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_int_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = shift_table [src1->type] [src2->type];
971 ins->opcode += binops_op_map [ins->type];
976 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
977 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
978 ins->opcode = OP_LCOMPARE;
979 else if (src1->type == STACK_R4)
980 ins->opcode = OP_RCOMPARE;
981 else if (src1->type == STACK_R8)
982 ins->opcode = OP_FCOMPARE;
984 ins->opcode = OP_ICOMPARE;
986 case OP_ICOMPARE_IMM:
987 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
988 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
989 ins->opcode = OP_LCOMPARE_IMM;
1001 ins->opcode += beqops_op_map [src1->type];
1004 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1011 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1012 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = neg_table [src1->type];
1017 ins->opcode += unops_op_map [ins->type];
1020 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1021 ins->type = src1->type;
1023 ins->type = STACK_INV;
1024 ins->opcode += unops_op_map [ins->type];
1030 ins->type = STACK_I4;
1031 ins->opcode += unops_op_map [src1->type];
1034 ins->type = STACK_R8;
1035 switch (src1->type) {
1038 ins->opcode = OP_ICONV_TO_R_UN;
1041 ins->opcode = OP_LCONV_TO_R_UN;
1045 case CEE_CONV_OVF_I1:
1046 case CEE_CONV_OVF_U1:
1047 case CEE_CONV_OVF_I2:
1048 case CEE_CONV_OVF_U2:
1049 case CEE_CONV_OVF_I4:
1050 case CEE_CONV_OVF_U4:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf3ops_op_map [src1->type];
1054 case CEE_CONV_OVF_I_UN:
1055 case CEE_CONV_OVF_U_UN:
1056 ins->type = STACK_PTR;
1057 ins->opcode += ovf2ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I1_UN:
1060 case CEE_CONV_OVF_I2_UN:
1061 case CEE_CONV_OVF_I4_UN:
1062 case CEE_CONV_OVF_U1_UN:
1063 case CEE_CONV_OVF_U2_UN:
1064 case CEE_CONV_OVF_U4_UN:
1065 ins->type = STACK_I4;
1066 ins->opcode += ovf2ops_op_map [src1->type];
1069 ins->type = STACK_PTR;
1070 switch (src1->type) {
1072 ins->opcode = OP_ICONV_TO_U;
1076 #if SIZEOF_VOID_P == 8
1077 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_MOVE;
1083 ins->opcode = OP_LCONV_TO_U;
1086 ins->opcode = OP_FCONV_TO_U;
1092 ins->type = STACK_I8;
1093 ins->opcode += unops_op_map [src1->type];
1095 case CEE_CONV_OVF_I8:
1096 case CEE_CONV_OVF_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += ovf3ops_op_map [src1->type];
1100 case CEE_CONV_OVF_U8_UN:
1101 case CEE_CONV_OVF_I8_UN:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf2ops_op_map [src1->type];
1106 ins->type = cfg->r4_stack_type;
1107 ins->opcode += unops_op_map [src1->type];
1110 ins->type = STACK_R8;
1111 ins->opcode += unops_op_map [src1->type];
1114 ins->type = STACK_R8;
1118 ins->type = STACK_I4;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_CONV_OVF_I:
1123 case CEE_CONV_OVF_U:
1124 ins->type = STACK_PTR;
1125 ins->opcode += ovfops_op_map [src1->type];
1128 case CEE_ADD_OVF_UN:
1130 case CEE_MUL_OVF_UN:
1132 case CEE_SUB_OVF_UN:
1133 ins->type = bin_num_table [src1->type] [src2->type];
1134 ins->opcode += ovfops_op_map [src1->type];
1135 if (ins->type == STACK_R8)
1136 ins->type = STACK_INV;
1138 case OP_LOAD_MEMBASE:
1139 ins->type = STACK_PTR;
1141 case OP_LOADI1_MEMBASE:
1142 case OP_LOADU1_MEMBASE:
1143 case OP_LOADI2_MEMBASE:
1144 case OP_LOADU2_MEMBASE:
1145 case OP_LOADI4_MEMBASE:
1146 case OP_LOADU4_MEMBASE:
1147 ins->type = STACK_PTR;
1149 case OP_LOADI8_MEMBASE:
1150 ins->type = STACK_I8;
1152 case OP_LOADR4_MEMBASE:
1153 ins->type = cfg->r4_stack_type;
1155 case OP_LOADR8_MEMBASE:
1156 ins->type = STACK_R8;
1159 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1163 if (ins->type == STACK_MP)
1164 ins->klass = mono_defaults.object_class;
1169 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1175 param_table [STACK_MAX] [STACK_MAX] = {
1180 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1185 switch (args->type) {
1195 for (i = 0; i < sig->param_count; ++i) {
1196 switch (args [i].type) {
1200 if (!sig->params [i]->byref)
1204 if (sig->params [i]->byref)
1206 switch (sig->params [i]->type) {
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1218 if (sig->params [i]->byref)
1220 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1229 /*if (!param_table [args [i].type] [sig->params [i]->type])
1237 * When we need a pointer to the current domain many times in a method, we
1238 * call mono_domain_get() once and we store the result in a local variable.
1239 * This function returns the variable that represents the MonoDomain*.
1241 inline static MonoInst *
1242 mono_get_domainvar (MonoCompile *cfg)
1244 if (!cfg->domainvar)
1245 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 return cfg->domainvar;
1250 * The got_var contains the address of the Global Offset Table when AOT
1254 mono_get_got_var (MonoCompile *cfg)
1256 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1258 if (!cfg->got_var) {
1259 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 return cfg->got_var;
1265 mono_create_rgctx_var (MonoCompile *cfg)
1267 if (!cfg->rgctx_var) {
1268 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1269 /* force the var to be stack allocated */
1270 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1275 mono_get_vtable_var (MonoCompile *cfg)
1277 g_assert (cfg->gshared);
1279 mono_create_rgctx_var (cfg);
1281 return cfg->rgctx_var;
1285 type_from_stack_type (MonoInst *ins) {
1286 switch (ins->type) {
1287 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1288 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1289 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1290 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1291 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1293 return &ins->klass->this_arg;
1294 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1295 case STACK_VTYPE: return &ins->klass->byval_arg;
1297 g_error ("stack type %d to monotype not handled\n", ins->type);
1302 static G_GNUC_UNUSED int
1303 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1305 t = mono_type_get_underlying_type (t);
1317 case MONO_TYPE_FNPTR:
1319 case MONO_TYPE_CLASS:
1320 case MONO_TYPE_STRING:
1321 case MONO_TYPE_OBJECT:
1322 case MONO_TYPE_SZARRAY:
1323 case MONO_TYPE_ARRAY:
1329 return cfg->r4_stack_type;
1332 case MONO_TYPE_VALUETYPE:
1333 case MONO_TYPE_TYPEDBYREF:
1335 case MONO_TYPE_GENERICINST:
1336 if (mono_type_generic_inst_is_valuetype (t))
1342 g_assert_not_reached ();
1349 array_access_to_klass (int opcode)
1353 return mono_defaults.byte_class;
1355 return mono_defaults.uint16_class;
1358 return mono_defaults.int_class;
1361 return mono_defaults.sbyte_class;
1364 return mono_defaults.int16_class;
1367 return mono_defaults.int32_class;
1369 return mono_defaults.uint32_class;
1372 return mono_defaults.int64_class;
1375 return mono_defaults.single_class;
1378 return mono_defaults.double_class;
1379 case CEE_LDELEM_REF:
1380 case CEE_STELEM_REF:
1381 return mono_defaults.object_class;
1383 g_assert_not_reached ();
1389 * We try to share variables when possible
1392 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1397 /* inlining can result in deeper stacks */
1398 if (slot >= cfg->header->max_stack)
1399 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 pos = ins->type - 1 + slot * STACK_MAX;
1403 switch (ins->type) {
1410 if ((vnum = cfg->intvars [pos]))
1411 return cfg->varinfo [vnum];
1412 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1413 cfg->intvars [pos] = res->inst_c0;
1416 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1422 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1425 * Don't use this if a generic_context is set, since that means AOT can't
1426 * look up the method using just the image+token.
1427 * table == 0 means this is a reference made from a wrapper.
1429 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1430 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1431 jump_info_token->image = image;
1432 jump_info_token->token = token;
1433 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1438 * This function is called to handle items that are left on the evaluation stack
1439 * at basic block boundaries. What happens is that we save the values to local variables
1440 * and we reload them later when first entering the target basic block (with the
1441 * handle_loaded_temps () function).
1442 * A single joint point will use the same variables (stored in the array bb->out_stack or
1443 * bb->in_stack, if the basic block is before or after the joint point).
1445 * This function needs to be called _before_ emitting the last instruction of
1446 * the bb (i.e. before emitting a branch).
1447 * If the stack merge fails at a join point, cfg->unverifiable is set.
1450 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1453 MonoBasicBlock *bb = cfg->cbb;
1454 MonoBasicBlock *outb;
1455 MonoInst *inst, **locals;
1460 if (cfg->verbose_level > 3)
1461 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1462 if (!bb->out_scount) {
1463 bb->out_scount = count;
1464 //printf ("bblock %d has out:", bb->block_num);
1466 for (i = 0; i < bb->out_count; ++i) {
1467 outb = bb->out_bb [i];
1468 /* exception handlers are linked, but they should not be considered for stack args */
1469 if (outb->flags & BB_EXCEPTION_HANDLER)
1471 //printf (" %d", outb->block_num);
1472 if (outb->in_stack) {
1474 bb->out_stack = outb->in_stack;
1480 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1481 for (i = 0; i < count; ++i) {
1483 * try to reuse temps already allocated for this purpouse, if they occupy the same
1484 * stack slot and if they are of the same type.
1485 * This won't cause conflicts since if 'local' is used to
1486 * store one of the values in the in_stack of a bblock, then
1487 * the same variable will be used for the same outgoing stack
1489 * This doesn't work when inlining methods, since the bblocks
1490 * in the inlined methods do not inherit their in_stack from
1491 * the bblock they are inlined to. See bug #58863 for an
1494 if (cfg->inlined_method)
1495 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1497 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1502 for (i = 0; i < bb->out_count; ++i) {
1503 outb = bb->out_bb [i];
1504 /* exception handlers are linked, but they should not be considered for stack args */
1505 if (outb->flags & BB_EXCEPTION_HANDLER)
1507 if (outb->in_scount) {
1508 if (outb->in_scount != bb->out_scount) {
1509 cfg->unverifiable = TRUE;
1512 continue; /* check they are the same locals */
1514 outb->in_scount = count;
1515 outb->in_stack = bb->out_stack;
1518 locals = bb->out_stack;
1520 for (i = 0; i < count; ++i) {
1521 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1522 inst->cil_code = sp [i]->cil_code;
1523 sp [i] = locals [i];
1524 if (cfg->verbose_level > 3)
1525 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1529 * It is possible that the out bblocks already have in_stack assigned, and
1530 * the in_stacks differ. In this case, we will store to all the different
1537 /* Find a bblock which has a different in_stack */
1539 while (bindex < bb->out_count) {
1540 outb = bb->out_bb [bindex];
1541 /* exception handlers are linked, but they should not be considered for stack args */
1542 if (outb->flags & BB_EXCEPTION_HANDLER) {
1546 if (outb->in_stack != locals) {
1547 for (i = 0; i < count; ++i) {
1548 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1549 inst->cil_code = sp [i]->cil_code;
1550 sp [i] = locals [i];
1551 if (cfg->verbose_level > 3)
1552 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1554 locals = outb->in_stack;
1564 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1568 if (cfg->compile_aot) {
1569 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1575 ji.type = patch_type;
1576 ji.data.target = data;
1577 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1578 mono_error_assert_ok (&error);
1580 EMIT_NEW_PCONST (cfg, ins, target);
1586 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1588 return emit_runtime_constant (cfg, patch_type, data);
1592 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1596 g_assert (val == 0);
1601 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1610 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1612 #if SIZEOF_REGISTER == 8
1614 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1620 val_reg = alloc_preg (cfg);
1622 if (SIZEOF_REGISTER == 8)
1623 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1625 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1628 /* This could be optimized further if neccesary */
1630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1637 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1675 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1676 g_assert (size < 10000);
1679 /* This could be optimized further if neccesary */
1681 cur_reg = alloc_preg (cfg);
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1690 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1692 cur_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1702 cur_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1728 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1730 int tls_offset = mono_tls_get_tls_offset (key);
1732 if (cfg->compile_aot)
1735 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1737 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1738 ins->dreg = mono_alloc_preg (cfg);
1739 ins->inst_offset = tls_offset;
1746 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1748 int tls_offset = mono_tls_get_tls_offset (key);
1750 if (cfg->compile_aot)
1753 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1755 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1756 ins->sreg1 = value->dreg;
1757 ins->inst_offset = tls_offset;
1765 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1767 MonoInst *fast_tls = NULL;
1769 if (!mini_get_debug_options ()->use_fallback_tls)
1770 fast_tls = mono_create_fast_tls_getter (cfg, key);
1773 MONO_ADD_INS (cfg->cbb, fast_tls);
1777 if (cfg->compile_aot) {
1780 * tls getters are critical pieces of code and we don't want to resolve them
1781 * through the standard plt/tramp mechanism since we might expose ourselves
1782 * to crashes and infinite recursions.
1784 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1785 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1787 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1788 return mono_emit_jit_icall (cfg, getter, NULL);
1793 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1795 MonoInst *fast_tls = NULL;
1797 if (!mini_get_debug_options ()->use_fallback_tls)
1798 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1801 MONO_ADD_INS (cfg->cbb, fast_tls);
1805 if (cfg->compile_aot) {
1807 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1808 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1810 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1811 return mono_emit_jit_icall (cfg, setter, &value);
1818 * Emit IR to push the current LMF onto the LMF stack.
1821 emit_push_lmf (MonoCompile *cfg)
1824 * Emit IR to push the LMF:
1825 * lmf_addr = <lmf_addr from tls>
1826 * lmf->lmf_addr = lmf_addr
1827 * lmf->prev_lmf = *lmf_addr
1830 MonoInst *ins, *lmf_ins;
1835 int lmf_reg, prev_lmf_reg;
1837 * Store lmf_addr in a variable, so it can be allocated to a global register.
1839 if (!cfg->lmf_addr_var)
1840 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1843 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1845 int jit_tls_dreg = ins->dreg;
1847 lmf_reg = alloc_preg (cfg);
1848 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1850 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1853 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1855 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1856 lmf_reg = ins->dreg;
1858 prev_lmf_reg = alloc_preg (cfg);
1859 /* Save previous_lmf */
1860 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1861 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1863 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1869 * Emit IR to pop the current LMF from the LMF stack.
1872 emit_pop_lmf (MonoCompile *cfg)
1874 int lmf_reg, lmf_addr_reg;
1880 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1881 lmf_reg = ins->dreg;
1885 * Emit IR to pop the LMF:
1886 * *(lmf->lmf_addr) = lmf->prev_lmf
1888 /* This could be called before emit_push_lmf () */
1889 if (!cfg->lmf_addr_var)
1890 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1891 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1893 prev_lmf_reg = alloc_preg (cfg);
1894 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1895 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1899 emit_instrumentation_call (MonoCompile *cfg, void *func)
1901 MonoInst *iargs [1];
1904 * Avoid instrumenting inlined methods since it can
1905 * distort profiling results.
1907 if (cfg->method != cfg->current_method)
1910 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1911 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1912 mono_emit_jit_icall (cfg, func, iargs);
1917 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1920 type = mini_get_underlying_type (type);
1921 switch (type->type) {
1922 case MONO_TYPE_VOID:
1923 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1930 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1934 case MONO_TYPE_FNPTR:
1935 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1936 case MONO_TYPE_CLASS:
1937 case MONO_TYPE_STRING:
1938 case MONO_TYPE_OBJECT:
1939 case MONO_TYPE_SZARRAY:
1940 case MONO_TYPE_ARRAY:
1941 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1944 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1947 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1949 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1951 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1952 case MONO_TYPE_VALUETYPE:
1953 if (type->data.klass->enumtype) {
1954 type = mono_class_enum_basetype (type->data.klass);
1957 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1958 case MONO_TYPE_TYPEDBYREF:
1959 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1960 case MONO_TYPE_GENERICINST:
1961 type = &type->data.generic_class->container_class->byval_arg;
1964 case MONO_TYPE_MVAR:
1966 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1968 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1973 //XXX this ignores if t is byref
1974 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1977 * target_type_is_incompatible:
1978 * @cfg: MonoCompile context
1980 * Check that the item @arg on the evaluation stack can be stored
1981 * in the target type (can be a local, or field, etc).
1982 * The cfg arg can be used to check if we need verification or just
1985 * Returns: non-0 value if arg can't be stored on a target.
1988 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1990 MonoType *simple_type;
1993 if (target->byref) {
1994 /* FIXME: check that the pointed to types match */
1995 if (arg->type == STACK_MP) {
1996 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1997 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1998 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2000 /* if the target is native int& or same type */
2001 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2004 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2005 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2006 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2010 if (arg->type == STACK_PTR)
2015 simple_type = mini_get_underlying_type (target);
2016 switch (simple_type->type) {
2017 case MONO_TYPE_VOID:
2025 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2029 /* STACK_MP is needed when setting pinned locals */
2030 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2035 case MONO_TYPE_FNPTR:
2037 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2038 * in native int. (#688008).
2040 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2043 case MONO_TYPE_CLASS:
2044 case MONO_TYPE_STRING:
2045 case MONO_TYPE_OBJECT:
2046 case MONO_TYPE_SZARRAY:
2047 case MONO_TYPE_ARRAY:
2048 if (arg->type != STACK_OBJ)
2050 /* FIXME: check type compatibility */
2054 if (arg->type != STACK_I8)
2058 if (arg->type != cfg->r4_stack_type)
2062 if (arg->type != STACK_R8)
2065 case MONO_TYPE_VALUETYPE:
2066 if (arg->type != STACK_VTYPE)
2068 klass = mono_class_from_mono_type (simple_type);
2069 if (klass != arg->klass)
2072 case MONO_TYPE_TYPEDBYREF:
2073 if (arg->type != STACK_VTYPE)
2075 klass = mono_class_from_mono_type (simple_type);
2076 if (klass != arg->klass)
2079 case MONO_TYPE_GENERICINST:
2080 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2081 MonoClass *target_class;
2082 if (arg->type != STACK_VTYPE)
2084 klass = mono_class_from_mono_type (simple_type);
2085 target_class = mono_class_from_mono_type (target);
2086 /* The second cases is needed when doing partial sharing */
2087 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2091 if (arg->type != STACK_OBJ)
2093 /* FIXME: check type compatibility */
2097 case MONO_TYPE_MVAR:
2098 g_assert (cfg->gshared);
2099 if (mini_type_var_is_vt (simple_type)) {
2100 if (arg->type != STACK_VTYPE)
2103 if (arg->type != STACK_OBJ)
2108 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2114 * Prepare arguments for passing to a function call.
2115 * Return a non-zero value if the arguments can't be passed to the given
2117 * The type checks are not yet complete and some conversions may need
2118 * casts on 32 or 64 bit architectures.
2120 * FIXME: implement this using target_type_is_incompatible ()
2123 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2125 MonoType *simple_type;
2129 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2133 for (i = 0; i < sig->param_count; ++i) {
2134 if (sig->params [i]->byref) {
2135 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2139 simple_type = mini_get_underlying_type (sig->params [i]);
2141 switch (simple_type->type) {
2142 case MONO_TYPE_VOID:
2151 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2157 case MONO_TYPE_FNPTR:
2158 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2161 case MONO_TYPE_CLASS:
2162 case MONO_TYPE_STRING:
2163 case MONO_TYPE_OBJECT:
2164 case MONO_TYPE_SZARRAY:
2165 case MONO_TYPE_ARRAY:
2166 if (args [i]->type != STACK_OBJ)
2171 if (args [i]->type != STACK_I8)
2175 if (args [i]->type != cfg->r4_stack_type)
2179 if (args [i]->type != STACK_R8)
2182 case MONO_TYPE_VALUETYPE:
2183 if (simple_type->data.klass->enumtype) {
2184 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2187 if (args [i]->type != STACK_VTYPE)
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (args [i]->type != STACK_VTYPE)
2194 case MONO_TYPE_GENERICINST:
2195 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2198 case MONO_TYPE_MVAR:
2200 if (args [i]->type != STACK_VTYPE)
2204 g_error ("unknown type 0x%02x in check_call_signature",
2212 callvirt_to_call (int opcode)
2215 case OP_CALL_MEMBASE:
2217 case OP_VOIDCALL_MEMBASE:
2219 case OP_FCALL_MEMBASE:
2221 case OP_RCALL_MEMBASE:
2223 case OP_VCALL_MEMBASE:
2225 case OP_LCALL_MEMBASE:
2228 g_assert_not_reached ();
2235 callvirt_to_call_reg (int opcode)
2238 case OP_CALL_MEMBASE:
2240 case OP_VOIDCALL_MEMBASE:
2241 return OP_VOIDCALL_REG;
2242 case OP_FCALL_MEMBASE:
2243 return OP_FCALL_REG;
2244 case OP_RCALL_MEMBASE:
2245 return OP_RCALL_REG;
2246 case OP_VCALL_MEMBASE:
2247 return OP_VCALL_REG;
2248 case OP_LCALL_MEMBASE:
2249 return OP_LCALL_REG;
2251 g_assert_not_reached ();
2257 /* Either METHOD or IMT_ARG needs to be set */
2259 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2263 if (COMPILE_LLVM (cfg)) {
2265 method_reg = alloc_preg (cfg);
2266 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2268 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2269 method_reg = ins->dreg;
2273 call->imt_arg_reg = method_reg;
2275 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2280 method_reg = alloc_preg (cfg);
2281 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2283 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2284 method_reg = ins->dreg;
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2290 static MonoJumpInfo *
2291 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2293 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2297 ji->data.target = target;
2303 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2306 return mono_class_check_context_used (klass);
2312 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2315 return mono_method_check_context_used (method);
2321 * check_method_sharing:
2323 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2326 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2328 gboolean pass_vtable = FALSE;
2329 gboolean pass_mrgctx = FALSE;
2331 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2332 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2333 gboolean sharable = FALSE;
2335 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2339 * Pass vtable iff target method might
2340 * be shared, which means that sharing
2341 * is enabled for its class and its
2342 * context is sharable (and it's not a
2345 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2349 if (mini_method_get_context (cmethod) &&
2350 mini_method_get_context (cmethod)->method_inst) {
2351 g_assert (!pass_vtable);
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2356 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2361 if (out_pass_vtable)
2362 *out_pass_vtable = pass_vtable;
2363 if (out_pass_mrgctx)
2364 *out_pass_mrgctx = pass_mrgctx;
2367 inline static MonoCallInst *
2368 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2369 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2373 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2381 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2383 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2385 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2388 call->signature = sig;
2389 call->rgctx_reg = rgctx;
2390 sig_ret = mini_get_underlying_type (sig->ret);
2392 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2395 if (mini_type_is_vtype (sig_ret)) {
2396 call->vret_var = cfg->vret_addr;
2397 //g_assert_not_reached ();
2399 } else if (mini_type_is_vtype (sig_ret)) {
2400 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2403 temp->backend.is_pinvoke = sig->pinvoke;
2406 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2407 * address of return value to increase optimization opportunities.
2408 * Before vtype decomposition, the dreg of the call ins itself represents the
2409 * fact the call modifies the return value. After decomposition, the call will
2410 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2411 * will be transformed into an LDADDR.
2413 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2414 loada->dreg = alloc_preg (cfg);
2415 loada->inst_p0 = temp;
2416 /* We reference the call too since call->dreg could change during optimization */
2417 loada->inst_p1 = call;
2418 MONO_ADD_INS (cfg->cbb, loada);
2420 call->inst.dreg = temp->dreg;
2422 call->vret_var = loada;
2423 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2424 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2426 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2427 if (COMPILE_SOFT_FLOAT (cfg)) {
2429 * If the call has a float argument, we would need to do an r8->r4 conversion using
2430 * an icall, but that cannot be done during the call sequence since it would clobber
2431 * the call registers + the stack. So we do it before emitting the call.
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2435 MonoInst *in = call->args [i];
2437 if (i >= sig->hasthis)
2438 t = sig->params [i - sig->hasthis];
2440 t = &mono_defaults.int_class->byval_arg;
2441 t = mono_type_get_underlying_type (t);
2443 if (!t->byref && t->type == MONO_TYPE_R4) {
2444 MonoInst *iargs [1];
2448 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2450 /* The result will be in an int vreg */
2451 call->args [i] = conv;
2457 call->need_unbox_trampoline = unbox_trampoline;
2460 if (COMPILE_LLVM (cfg))
2461 mono_llvm_emit_call (cfg, call);
2463 mono_arch_emit_call (cfg, call);
2465 mono_arch_emit_call (cfg, call);
2468 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2469 cfg->flags |= MONO_CFG_HAS_CALLS;
2475 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2477 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2478 cfg->uses_rgctx_reg = TRUE;
2479 call->rgctx_reg = TRUE;
2481 call->rgctx_arg_reg = rgctx_reg;
2485 inline static MonoInst*
2486 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2491 gboolean check_sp = FALSE;
2493 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2494 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2496 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2501 rgctx_reg = mono_alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (!cfg->stack_inbalance_var)
2507 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2509 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2510 ins->dreg = cfg->stack_inbalance_var->dreg;
2511 MONO_ADD_INS (cfg->cbb, ins);
2514 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2516 call->inst.sreg1 = addr->dreg;
2519 emit_imt_argument (cfg, call, NULL, imt_arg);
2521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 sp_reg = mono_alloc_preg (cfg);
2528 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2530 MONO_ADD_INS (cfg->cbb, ins);
2532 /* Restore the stack so we don't crash when throwing the exception */
2533 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2534 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2535 MONO_ADD_INS (cfg->cbb, ins);
2537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2538 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2542 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2544 return (MonoInst*)call;
2548 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2551 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2554 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2555 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2557 #ifndef DISABLE_REMOTING
2558 gboolean might_be_remote = FALSE;
2560 gboolean virtual_ = this_ins != NULL;
2561 gboolean enable_for_aot = TRUE;
2564 MonoInst *call_target = NULL;
2566 gboolean need_unbox_trampoline;
2569 sig = mono_method_signature (method);
2571 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2572 g_assert_not_reached ();
2575 rgctx_reg = mono_alloc_preg (cfg);
2576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2579 if (method->string_ctor) {
2580 /* Create the real signature */
2581 /* FIXME: Cache these */
2582 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2583 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2588 context_used = mini_method_check_context_used (cfg, method);
2590 #ifndef DISABLE_REMOTING
2591 might_be_remote = this_ins && sig->hasthis &&
2592 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2593 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2595 if (might_be_remote && context_used) {
2598 g_assert (cfg->gshared);
2600 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2602 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2606 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2607 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2609 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2611 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2613 #ifndef DISABLE_REMOTING
2614 if (might_be_remote)
2615 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2618 call->method = method;
2619 call->inst.flags |= MONO_INST_HAS_METHOD;
2620 call->inst.inst_left = this_ins;
2621 call->tail_call = tail;
2624 int vtable_reg, slot_reg, this_reg;
2627 this_reg = this_ins->dreg;
2629 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2630 MonoInst *dummy_use;
2632 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2634 /* Make a call to delegate->invoke_impl */
2635 call->inst.inst_basereg = this_reg;
2636 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2637 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2639 /* We must emit a dummy use here because the delegate trampoline will
2640 replace the 'this' argument with the delegate target making this activation
2641 no longer a root for the delegate.
2642 This is an issue for delegates that target collectible code such as dynamic
2643 methods of GC'able assemblies.
2645 For a test case look into #667921.
2647 FIXME: a dummy use is not the best way to do it as the local register allocator
2648 will put it on a caller save register and spil it around the call.
2649 Ideally, we would either put it on a callee save register or only do the store part.
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2653 return (MonoInst*)call;
2656 if ((!cfg->compile_aot || enable_for_aot) &&
2657 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2658 (MONO_METHOD_IS_FINAL (method) &&
2659 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2660 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2662 * the method is not virtual, we just need to ensure this is not null
2663 * and then we can call the method directly.
2665 #ifndef DISABLE_REMOTING
2666 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2668 * The check above ensures method is not gshared, this is needed since
2669 * gshared methods can't have wrappers.
2671 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2675 if (!method->string_ctor)
2676 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2678 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2679 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2681 * the method is virtual, but we can statically dispatch since either
2682 * it's class or the method itself are sealed.
2683 * But first we need to ensure it's not a null reference.
2685 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2687 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2688 } else if (call_target) {
2689 vtable_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2692 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2693 call->inst.sreg1 = call_target->dreg;
2694 call->inst.flags &= !MONO_INST_HAS_METHOD;
2696 vtable_reg = alloc_preg (cfg);
2697 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2698 if (mono_class_is_interface (method->klass)) {
2699 guint32 imt_slot = mono_method_get_imt_slot (method);
2700 emit_imt_argument (cfg, call, call->method, imt_arg);
2701 slot_reg = vtable_reg;
2702 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2704 slot_reg = vtable_reg;
2705 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2706 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2708 g_assert (mono_method_signature (method)->generic_param_count);
2709 emit_imt_argument (cfg, call, call->method, imt_arg);
2713 call->inst.sreg1 = slot_reg;
2714 call->inst.inst_offset = offset;
2715 call->is_virtual = TRUE;
2719 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2730 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2734 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2741 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2744 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2746 return (MonoInst*)call;
2750 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2752 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2756 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2760 * mono_emit_abs_call:
2762 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2764 inline static MonoInst*
2765 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2766 MonoMethodSignature *sig, MonoInst **args)
2768 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2772 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2775 if (cfg->abs_patches == NULL)
2776 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2777 g_hash_table_insert (cfg->abs_patches, ji, ji);
2778 ins = mono_emit_native_call (cfg, ji, sig, args);
2779 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2783 static MonoMethodSignature*
2784 sig_to_rgctx_sig (MonoMethodSignature *sig)
2786 // FIXME: memory allocation
2787 MonoMethodSignature *res;
2790 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2791 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2792 res->param_count = sig->param_count + 1;
2793 for (i = 0; i < sig->param_count; ++i)
2794 res->params [i] = sig->params [i];
2795 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2799 /* Make an indirect call to FSIG passing an additional argument */
2801 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2803 MonoMethodSignature *csig;
2804 MonoInst *args_buf [16];
2806 int i, pindex, tmp_reg;
2808 /* Make a call with an rgctx/extra arg */
2809 if (fsig->param_count + 2 < 16)
2812 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2815 args [pindex ++] = orig_args [0];
2816 for (i = 0; i < fsig->param_count; ++i)
2817 args [pindex ++] = orig_args [fsig->hasthis + i];
2818 tmp_reg = alloc_preg (cfg);
2819 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2820 csig = sig_to_rgctx_sig (fsig);
2821 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2824 /* Emit an indirect call to the function descriptor ADDR */
2826 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2828 int addr_reg, arg_reg;
2829 MonoInst *call_target;
2831 g_assert (cfg->llvm_only);
2834 * addr points to a <addr, arg> pair, load both of them, and
2835 * make a call to addr, passing arg as an extra arg.
2837 addr_reg = alloc_preg (cfg);
2838 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2839 arg_reg = alloc_preg (cfg);
2840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2842 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2846 direct_icalls_enabled (MonoCompile *cfg)
2850 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2852 if (cfg->compile_llvm && !cfg->llvm_only)
2855 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2861 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2864 * Call the jit icall without a wrapper if possible.
2865 * The wrapper is needed for the following reasons:
2866 * - to handle exceptions thrown using mono_raise_exceptions () from the
2867 * icall function. The EH code needs the lmf frame pushed by the
2868 * wrapper to be able to unwind back to managed code.
2869 * - to be able to do stack walks for asynchronously suspended
2870 * threads when debugging.
2872 if (info->no_raise && direct_icalls_enabled (cfg)) {
2876 if (!info->wrapper_method) {
2877 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2878 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2880 mono_memory_barrier ();
2884 * Inline the wrapper method, which is basically a call to the C icall, and
2885 * an exception check.
2887 costs = inline_method (cfg, info->wrapper_method, NULL,
2888 args, NULL, il_offset, TRUE);
2889 g_assert (costs > 0);
2890 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2894 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2899 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2901 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2902 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2906 * Native code might return non register sized integers
2907 * without initializing the upper bits.
2909 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2910 case OP_LOADI1_MEMBASE:
2911 widen_op = OP_ICONV_TO_I1;
2913 case OP_LOADU1_MEMBASE:
2914 widen_op = OP_ICONV_TO_U1;
2916 case OP_LOADI2_MEMBASE:
2917 widen_op = OP_ICONV_TO_I2;
2919 case OP_LOADU2_MEMBASE:
2920 widen_op = OP_ICONV_TO_U2;
2926 if (widen_op != -1) {
2927 int dreg = alloc_preg (cfg);
2930 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2931 widen->type = ins->type;
2942 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2944 MonoInst *args [16];
2946 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2947 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2949 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2997 if (!cfg->gen_write_barriers)
3000 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3002 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3004 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3007 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3008 wbarrier->sreg1 = ptr->dreg;
3009 wbarrier->sreg2 = value->dreg;
3010 MONO_ADD_INS (cfg->cbb, wbarrier);
3011 } else if (card_table) {
3012 int offset_reg = alloc_preg (cfg);
3017 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
3018 * collector case, so, for the serial collector, it might slightly slow down nursery
3019 * collections. We also expect that the host system and the target system have the same card
3020 * table configuration, which is the case if they have the same pointer size.
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3024 if (card_table_mask)
3025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3027 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3028 * IMM's larger than 32bits.
3030 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3031 card_reg = ins->dreg;
3033 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3034 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3036 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3037 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3040 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3044 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3046 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3047 unsigned need_wb = 0;
3052 /*types with references can't have alignment smaller than sizeof(void*) */
3053 if (align < SIZEOF_VOID_P)
3056 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3057 if (size > 32 * SIZEOF_VOID_P)
3060 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3062 /* We don't unroll more than 5 stores to avoid code bloat. */
3063 if (size > 5 * SIZEOF_VOID_P) {
3064 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3065 size += (SIZEOF_VOID_P - 1);
3066 size &= ~(SIZEOF_VOID_P - 1);
3068 EMIT_NEW_ICONST (cfg, iargs [2], size);
3069 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3070 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3074 destreg = iargs [0]->dreg;
3075 srcreg = iargs [1]->dreg;
3078 dest_ptr_reg = alloc_preg (cfg);
3079 tmp_reg = alloc_preg (cfg);
3082 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3084 while (size >= SIZEOF_VOID_P) {
3085 MonoInst *load_inst;
3086 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3087 load_inst->dreg = tmp_reg;
3088 load_inst->inst_basereg = srcreg;
3089 load_inst->inst_offset = offset;
3090 MONO_ADD_INS (cfg->cbb, load_inst);
3092 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3095 emit_write_barrier (cfg, iargs [0], load_inst);
3097 offset += SIZEOF_VOID_P;
3098 size -= SIZEOF_VOID_P;
3101 /*tmp += sizeof (void*)*/
3102 if (size >= SIZEOF_VOID_P) {
3103 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3104 MONO_ADD_INS (cfg->cbb, iargs [0]);
3108 /* Those cannot be references since size < sizeof (void*) */
3110 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3118 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3124 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3125 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3134 * Emit code to copy a valuetype of type @klass whose address is stored in
3135 * @src->dreg to memory whose address is stored at @dest->dreg.
3138 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3140 MonoInst *iargs [4];
3143 MonoMethod *memcpy_method;
3144 MonoInst *size_ins = NULL;
3145 MonoInst *memcpy_ins = NULL;
3149 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3152 * This check breaks with spilled vars... need to handle it during verification anyway.
3153 * g_assert (klass && klass == src->klass && klass == dest->klass);
3156 if (mini_is_gsharedvt_klass (klass)) {
3158 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3159 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3163 n = mono_class_native_size (klass, &align);
3165 n = mono_class_value_size (klass, &align);
3167 /* if native is true there should be no references in the struct */
3168 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3169 /* Avoid barriers when storing to the stack */
3170 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3171 (dest->opcode == OP_LDADDR))) {
3177 context_used = mini_class_check_context_used (cfg, klass);
3179 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3180 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3182 } else if (context_used) {
3183 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3185 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3186 if (!cfg->compile_aot)
3187 mono_class_compute_gc_descriptor (klass);
3191 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3193 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3198 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3199 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3200 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3205 iargs [2] = size_ins;
3207 EMIT_NEW_ICONST (cfg, iargs [2], n);
3209 memcpy_method = get_memcpy_method ();
3211 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3213 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3218 get_memset_method (void)
3220 static MonoMethod *memset_method = NULL;
3221 if (!memset_method) {
3222 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3224 g_error ("Old corlib found. Install a new one");
3226 return memset_method;
3230 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3232 MonoInst *iargs [3];
3235 MonoMethod *memset_method;
3236 MonoInst *size_ins = NULL;
3237 MonoInst *bzero_ins = NULL;
3238 static MonoMethod *bzero_method;
3240 /* FIXME: Optimize this for the case when dest is an LDADDR */
3241 mono_class_init (klass);
3242 if (mini_is_gsharedvt_klass (klass)) {
3243 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3244 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3246 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3247 g_assert (bzero_method);
3249 iargs [1] = size_ins;
3250 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3254 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3256 n = mono_class_value_size (klass, &align);
3258 if (n <= sizeof (gpointer) * 8) {
3259 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3262 memset_method = get_memset_method ();
3264 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3265 EMIT_NEW_ICONST (cfg, iargs [2], n);
3266 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3273 * Emit IR to return either the this pointer for instance method,
3274 * or the mrgctx for static methods.
3277 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3279 MonoInst *this_ins = NULL;
3281 g_assert (cfg->gshared);
3283 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3284 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3285 !method->klass->valuetype)
3286 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3288 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3289 MonoInst *mrgctx_loc, *mrgctx_var;
3291 g_assert (!this_ins);
3292 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3294 mrgctx_loc = mono_get_vtable_var (cfg);
3295 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3298 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3299 MonoInst *vtable_loc, *vtable_var;
3301 g_assert (!this_ins);
3303 vtable_loc = mono_get_vtable_var (cfg);
3304 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3306 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3307 MonoInst *mrgctx_var = vtable_var;
3310 vtable_reg = alloc_preg (cfg);
3311 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3312 vtable_var->type = STACK_PTR;
3320 vtable_reg = alloc_preg (cfg);
3321 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3326 static MonoJumpInfoRgctxEntry *
3327 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3329 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3330 res->method = method;
3331 res->in_mrgctx = in_mrgctx;
3332 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3333 res->data->type = patch_type;
3334 res->data->data.target = patch_data;
3335 res->info_type = info_type;
3340 static inline MonoInst*
3341 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3343 MonoInst *args [16];
3346 // FIXME: No fastpath since the slot is not a compile time constant
3348 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3349 if (entry->in_mrgctx)
3350 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3352 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3356 * FIXME: This can be called during decompose, which is a problem since it creates
3358 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3360 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3362 MonoBasicBlock *is_null_bb, *end_bb;
3363 MonoInst *res, *ins, *call;
3366 slot = mini_get_rgctx_entry_slot (entry);
3368 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3369 index = MONO_RGCTX_SLOT_INDEX (slot);
3371 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3372 for (depth = 0; ; ++depth) {
3373 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3375 if (index < size - 1)
3380 NEW_BBLOCK (cfg, end_bb);
3381 NEW_BBLOCK (cfg, is_null_bb);
3384 rgctx_reg = rgctx->dreg;
3386 rgctx_reg = alloc_preg (cfg);
3388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3389 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3390 NEW_BBLOCK (cfg, is_null_bb);
3392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3396 for (i = 0; i < depth; ++i) {
3397 int array_reg = alloc_preg (cfg);
3399 /* load ptr to next array */
3400 if (mrgctx && i == 0)
3401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3404 rgctx_reg = array_reg;
3405 /* is the ptr null? */
3406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3407 /* if yes, jump to actual trampoline */
3408 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3412 val_reg = alloc_preg (cfg);
3413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3414 /* is the slot null? */
3415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3416 /* if yes, jump to actual trampoline */
3417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3420 res_reg = alloc_preg (cfg);
3421 MONO_INST_NEW (cfg, ins, OP_MOVE);
3422 ins->dreg = res_reg;
3423 ins->sreg1 = val_reg;
3424 MONO_ADD_INS (cfg->cbb, ins);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3429 MONO_START_BB (cfg, is_null_bb);
3431 EMIT_NEW_ICONST (cfg, args [1], index);
3433 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3435 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3436 MONO_INST_NEW (cfg, ins, OP_MOVE);
3437 ins->dreg = res_reg;
3438 ins->sreg1 = call->dreg;
3439 MONO_ADD_INS (cfg->cbb, ins);
3440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3442 MONO_START_BB (cfg, end_bb);
3451 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3454 static inline MonoInst*
3455 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3458 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3460 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3464 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3465 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3467 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3468 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3470 return emit_rgctx_fetch (cfg, rgctx, entry);
3474 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3475 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3477 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3478 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3480 return emit_rgctx_fetch (cfg, rgctx, entry);
3484 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3485 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3487 MonoJumpInfoGSharedVtCall *call_info;
3488 MonoJumpInfoRgctxEntry *entry;
3491 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3492 call_info->sig = sig;
3493 call_info->method = cmethod;
3495 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3496 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3498 return emit_rgctx_fetch (cfg, rgctx, entry);
3502 * emit_get_rgctx_virt_method:
3504 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3507 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3508 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3510 MonoJumpInfoVirtMethod *info;
3511 MonoJumpInfoRgctxEntry *entry;
3514 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3515 info->klass = klass;
3516 info->method = virt_method;
3518 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3519 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3521 return emit_rgctx_fetch (cfg, rgctx, entry);
3525 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3526 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3528 MonoJumpInfoRgctxEntry *entry;
3531 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3532 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3534 return emit_rgctx_fetch (cfg, rgctx, entry);
3538 * emit_get_rgctx_method:
3540 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3541 * normal constants, else emit a load from the rgctx.
3544 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3545 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3547 if (!context_used) {
3550 switch (rgctx_type) {
3551 case MONO_RGCTX_INFO_METHOD:
3552 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3554 case MONO_RGCTX_INFO_METHOD_RGCTX:
3555 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3558 g_assert_not_reached ();
3561 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3562 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3564 return emit_rgctx_fetch (cfg, rgctx, entry);
3569 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3570 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3572 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3573 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3575 return emit_rgctx_fetch (cfg, rgctx, entry);
3579 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3581 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3582 MonoRuntimeGenericContextInfoTemplate *template_;
3587 for (i = 0; i < info->num_entries; ++i) {
3588 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3590 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3594 if (info->num_entries == info->count_entries) {
3595 MonoRuntimeGenericContextInfoTemplate *new_entries;
3596 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3598 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3600 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3601 info->entries = new_entries;
3602 info->count_entries = new_count_entries;
3605 idx = info->num_entries;
3606 template_ = &info->entries [idx];
3607 template_->info_type = rgctx_type;
3608 template_->data = data;
3610 info->num_entries ++;
3616 * emit_get_gsharedvt_info:
3618 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3621 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3626 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3627 /* Load info->entries [idx] */
3628 dreg = alloc_preg (cfg);
3629 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3635 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3637 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3641 * On return the caller must check @klass for load errors.
3644 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3646 MonoInst *vtable_arg;
3649 context_used = mini_class_check_context_used (cfg, klass);
3652 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3653 klass, MONO_RGCTX_INFO_VTABLE);
3655 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3659 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3662 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3666 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3667 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3669 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3670 ins->sreg1 = vtable_arg->dreg;
3671 MONO_ADD_INS (cfg->cbb, ins);
3674 MonoBasicBlock *inited_bb;
3675 MonoInst *args [16];
3677 inited_reg = alloc_ireg (cfg);
3679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3681 NEW_BBLOCK (cfg, inited_bb);
3683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3686 args [0] = vtable_arg;
3687 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3689 MONO_START_BB (cfg, inited_bb);
3694 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3698 if (cfg->gen_seq_points && cfg->method == method) {
3699 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3701 ins->flags |= MONO_INST_NONEMPTY_STACK;
3702 MONO_ADD_INS (cfg->cbb, ins);
3707 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3709 if (mini_get_debug_options ()->better_cast_details) {
3710 int vtable_reg = alloc_preg (cfg);
3711 int klass_reg = alloc_preg (cfg);
3712 MonoBasicBlock *is_null_bb = NULL;
3714 int to_klass_reg, context_used;
3717 NEW_BBLOCK (cfg, is_null_bb);
3719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3720 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3723 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3725 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3734 context_used = mini_class_check_context_used (cfg, klass);
3736 MonoInst *class_ins;
3738 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3739 to_klass_reg = class_ins->dreg;
3741 to_klass_reg = alloc_preg (cfg);
3742 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3747 MONO_START_BB (cfg, is_null_bb);
3752 mini_reset_cast_details (MonoCompile *cfg)
3754 /* Reset the variables holding the cast details */
3755 if (mini_get_debug_options ()->better_cast_details) {
3756 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3757 /* It is enough to reset the from field */
3758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3763 * On return the caller must check @array_class for load errors
3766 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3768 int vtable_reg = alloc_preg (cfg);
3771 context_used = mini_class_check_context_used (cfg, array_class);
3773 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3775 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3777 if (cfg->opt & MONO_OPT_SHARED) {
3778 int class_reg = alloc_preg (cfg);
3781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3782 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3784 } else if (context_used) {
3785 MonoInst *vtable_ins;
3787 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3788 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3790 if (cfg->compile_aot) {
3794 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3796 vt_reg = alloc_preg (cfg);
3797 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3798 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3801 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3807 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3809 mini_reset_cast_details (cfg);
3813 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3814 * generic code is generated.
3817 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3819 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3822 MonoInst *rgctx, *addr;
3824 /* FIXME: What if the class is shared? We might not
3825 have to get the address of the method from the
3827 addr = emit_get_rgctx_method (cfg, context_used, method,
3828 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3829 if (cfg->llvm_only) {
3830 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3831 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3833 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3835 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3838 gboolean pass_vtable, pass_mrgctx;
3839 MonoInst *rgctx_arg = NULL;
3841 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3842 g_assert (!pass_mrgctx);
3845 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3848 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3851 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3856 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3860 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3861 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3862 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3863 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3865 obj_reg = sp [0]->dreg;
3866 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3867 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3869 /* FIXME: generics */
3870 g_assert (klass->rank == 0);
3873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3874 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3877 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3880 MonoInst *element_class;
3882 /* This assertion is from the unboxcast insn */
3883 g_assert (klass->rank == 0);
3885 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3886 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3888 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3889 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3891 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3892 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3893 mini_reset_cast_details (cfg);
3896 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3897 MONO_ADD_INS (cfg->cbb, add);
3898 add->type = STACK_MP;
3905 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3907 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3908 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3912 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3918 args [1] = klass_inst;
3921 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3923 NEW_BBLOCK (cfg, is_ref_bb);
3924 NEW_BBLOCK (cfg, is_nullable_bb);
3925 NEW_BBLOCK (cfg, end_bb);
3926 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3928 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3930 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3931 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3933 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3934 addr_reg = alloc_dreg (cfg, STACK_MP);
3938 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3939 MONO_ADD_INS (cfg->cbb, addr);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3944 MONO_START_BB (cfg, is_ref_bb);
3946 /* Save the ref to a temporary */
3947 dreg = alloc_ireg (cfg);
3948 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3949 addr->dreg = addr_reg;
3950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3951 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3954 MONO_START_BB (cfg, is_nullable_bb);
3957 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3958 MonoInst *unbox_call;
3959 MonoMethodSignature *unbox_sig;
3961 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3962 unbox_sig->ret = &klass->byval_arg;
3963 unbox_sig->param_count = 1;
3964 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3967 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3969 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3971 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3972 addr->dreg = addr_reg;
3975 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3978 MONO_START_BB (cfg, end_bb);
3981 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3987 * Returns NULL and set the cfg exception on error.
3990 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3992 MonoInst *iargs [2];
3997 MonoRgctxInfoType rgctx_info;
3998 MonoInst *iargs [2];
3999 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4001 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4003 if (cfg->opt & MONO_OPT_SHARED)
4004 rgctx_info = MONO_RGCTX_INFO_KLASS;
4006 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4007 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4009 if (cfg->opt & MONO_OPT_SHARED) {
4010 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4012 alloc_ftn = ves_icall_object_new;
4015 alloc_ftn = ves_icall_object_new_specific;
4018 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4019 if (known_instance_size) {
4020 int size = mono_class_instance_size (klass);
4021 if (size < sizeof (MonoObject))
4022 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4024 EMIT_NEW_ICONST (cfg, iargs [1], size);
4026 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4029 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4032 if (cfg->opt & MONO_OPT_SHARED) {
4033 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4034 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4036 alloc_ftn = ves_icall_object_new;
4037 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4038 /* This happens often in argument checking code, eg. throw new FooException... */
4039 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4040 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4041 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4043 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4044 MonoMethod *managed_alloc = NULL;
4048 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4049 cfg->exception_ptr = klass;
4053 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4055 if (managed_alloc) {
4056 int size = mono_class_instance_size (klass);
4057 if (size < sizeof (MonoObject))
4058 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4060 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4061 EMIT_NEW_ICONST (cfg, iargs [1], size);
4062 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4064 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4066 guint32 lw = vtable->klass->instance_size;
4067 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4068 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4069 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4072 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4076 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4080 * Returns NULL and set the cfg exception on error.
4083 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4085 MonoInst *alloc, *ins;
4087 if (mono_class_is_nullable (klass)) {
4088 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4091 if (cfg->llvm_only && cfg->gsharedvt) {
4092 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4093 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4094 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4096 /* FIXME: What if the class is shared? We might not
4097 have to get the method address from the RGCTX. */
4098 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4099 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4100 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4102 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4105 gboolean pass_vtable, pass_mrgctx;
4106 MonoInst *rgctx_arg = NULL;
4108 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4109 g_assert (!pass_mrgctx);
4112 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4115 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4118 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4122 if (mini_is_gsharedvt_klass (klass)) {
4123 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4124 MonoInst *res, *is_ref, *src_var, *addr;
4127 dreg = alloc_ireg (cfg);
4129 NEW_BBLOCK (cfg, is_ref_bb);
4130 NEW_BBLOCK (cfg, is_nullable_bb);
4131 NEW_BBLOCK (cfg, end_bb);
4132 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4140 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4143 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4144 ins->opcode = OP_STOREV_MEMBASE;
4146 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4147 res->type = STACK_OBJ;
4149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4152 MONO_START_BB (cfg, is_ref_bb);
4154 /* val is a vtype, so has to load the value manually */
4155 src_var = get_vreg_to_inst (cfg, val->dreg);
4157 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4158 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4159 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4163 MONO_START_BB (cfg, is_nullable_bb);
4166 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4167 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4169 MonoMethodSignature *box_sig;
4172 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4173 * construct that method at JIT time, so have to do things by hand.
4175 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4176 box_sig->ret = &mono_defaults.object_class->byval_arg;
4177 box_sig->param_count = 1;
4178 box_sig->params [0] = &klass->byval_arg;
4181 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4183 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4184 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4185 res->type = STACK_OBJ;
4189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4191 MONO_START_BB (cfg, end_bb);
4195 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4204 static GHashTable* direct_icall_type_hash;
4207 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4209 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4210 if (!direct_icalls_enabled (cfg))
4214 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4215 * Whitelist a few icalls for now.
4217 if (!direct_icall_type_hash) {
4218 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4220 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4221 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4222 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4223 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4224 mono_memory_barrier ();
4225 direct_icall_type_hash = h;
4228 if (cmethod->klass == mono_defaults.math_class)
4230 /* No locking needed */
4231 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4237 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4239 if (cmethod->klass == mono_defaults.systemtype_class) {
4240 if (!strcmp (cmethod->name, "GetType"))
4246 static G_GNUC_UNUSED MonoInst*
4247 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4249 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4250 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4253 switch (enum_type->type) {
4256 #if SIZEOF_REGISTER == 8
4268 MonoInst *load, *and_, *cmp, *ceq;
4269 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4270 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4271 int dest_reg = alloc_ireg (cfg);
4273 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4274 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4275 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4276 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4278 ceq->type = STACK_I4;
4281 load = mono_decompose_opcode (cfg, load);
4282 and_ = mono_decompose_opcode (cfg, and_);
4283 cmp = mono_decompose_opcode (cfg, cmp);
4284 ceq = mono_decompose_opcode (cfg, ceq);
4292 * Returns NULL and set the cfg exception on error.
4294 static G_GNUC_UNUSED MonoInst*
4295 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4299 gpointer trampoline;
4300 MonoInst *obj, *method_ins, *tramp_ins;
4304 if (virtual_ && !cfg->llvm_only) {
4305 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4308 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4312 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4316 /* Inline the contents of mono_delegate_ctor */
4318 /* Set target field */
4319 /* Optimize away setting of NULL target */
4320 if (!MONO_INS_IS_PCONST_NULL (target)) {
4321 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4322 if (cfg->gen_write_barriers) {
4323 dreg = alloc_preg (cfg);
4324 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4325 emit_write_barrier (cfg, ptr, target);
4329 /* Set method field */
4330 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4331 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4334 * To avoid looking up the compiled code belonging to the target method
4335 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4336 * store it, and we fill it after the method has been compiled.
4338 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4339 MonoInst *code_slot_ins;
4342 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4344 domain = mono_domain_get ();
4345 mono_domain_lock (domain);
4346 if (!domain_jit_info (domain)->method_code_hash)
4347 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4348 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4350 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4351 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4353 mono_domain_unlock (domain);
4355 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4357 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4360 if (cfg->llvm_only) {
4361 MonoInst *args [16];
4366 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4367 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4370 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4376 if (cfg->compile_aot) {
4377 MonoDelegateClassMethodPair *del_tramp;
4379 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4380 del_tramp->klass = klass;
4381 del_tramp->method = context_used ? NULL : method;
4382 del_tramp->is_virtual = virtual_;
4383 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4386 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4388 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4389 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4392 /* Set invoke_impl field */
4394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4396 dreg = alloc_preg (cfg);
4397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4400 dreg = alloc_preg (cfg);
4401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4402 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4405 dreg = alloc_preg (cfg);
4406 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4409 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4415 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4417 MonoJitICallInfo *info;
4419 /* Need to register the icall so it gets an icall wrapper */
4420 info = mono_get_array_new_va_icall (rank);
4422 cfg->flags |= MONO_CFG_HAS_VARARGS;
4424 /* mono_array_new_va () needs a vararg calling convention */
4425 cfg->exception_message = g_strdup ("array-new");
4426 cfg->disable_llvm = TRUE;
4428 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4429 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4433 * handle_constrained_gsharedvt_call:
4435 * Handle constrained calls where the receiver is a gsharedvt type.
4436 * Return the instruction representing the call. Set the cfg exception on failure.
4439 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4440 gboolean *ref_emit_widen)
4442 MonoInst *ins = NULL;
4443 gboolean emit_widen = *ref_emit_widen;
4446 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4447 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4448 * pack the arguments into an array, and do the rest of the work in in an icall.
4450 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4451 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4452 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4453 MonoInst *args [16];
4456 * This case handles calls to
4457 * - object:ToString()/Equals()/GetHashCode(),
4458 * - System.IComparable<T>:CompareTo()
4459 * - System.IEquatable<T>:Equals ()
4460 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4464 if (mono_method_check_context_used (cmethod))
4465 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4467 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4468 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4470 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4471 if (fsig->hasthis && fsig->param_count) {
4472 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4473 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4474 ins->dreg = alloc_preg (cfg);
4475 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4476 MONO_ADD_INS (cfg->cbb, ins);
4479 if (mini_is_gsharedvt_type (fsig->params [0])) {
4480 int addr_reg, deref_arg_reg;
4482 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4483 deref_arg_reg = alloc_preg (cfg);
4484 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4485 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4487 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4488 addr_reg = ins->dreg;
4489 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4491 EMIT_NEW_ICONST (cfg, args [3], 0);
4492 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4495 EMIT_NEW_ICONST (cfg, args [3], 0);
4496 EMIT_NEW_ICONST (cfg, args [4], 0);
4498 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4501 if (mini_is_gsharedvt_type (fsig->ret)) {
4502 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4503 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4507 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4508 MONO_ADD_INS (cfg->cbb, add);
4510 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4511 MONO_ADD_INS (cfg->cbb, ins);
4512 /* ins represents the call result */
4515 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4518 *ref_emit_widen = emit_widen;
4527 mono_emit_load_got_addr (MonoCompile *cfg)
4529 MonoInst *getaddr, *dummy_use;
4531 if (!cfg->got_var || cfg->got_var_allocated)
4534 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4535 getaddr->cil_code = cfg->header->code;
4536 getaddr->dreg = cfg->got_var->dreg;
4538 /* Add it to the start of the first bblock */
4539 if (cfg->bb_entry->code) {
4540 getaddr->next = cfg->bb_entry->code;
4541 cfg->bb_entry->code = getaddr;
4544 MONO_ADD_INS (cfg->bb_entry, getaddr);
4546 cfg->got_var_allocated = TRUE;
4549 * Add a dummy use to keep the got_var alive, since real uses might
4550 * only be generated by the back ends.
4551 * Add it to end_bblock, so the variable's lifetime covers the whole
4553 * It would be better to make the usage of the got var explicit in all
4554 * cases when the backend needs it (i.e. calls, throw etc.), so this
4555 * wouldn't be needed.
4557 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4558 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4561 static int inline_limit;
4562 static gboolean inline_limit_inited;
4565 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4567 MonoMethodHeaderSummary header;
4569 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4570 MonoMethodSignature *sig = mono_method_signature (method);
4574 if (cfg->disable_inline)
4579 if (cfg->inline_depth > 10)
4582 if (!mono_method_get_header_summary (method, &header))
4585 /*runtime, icall and pinvoke are checked by summary call*/
4586 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4587 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4588 (mono_class_is_marshalbyref (method->klass)) ||
4592 /* also consider num_locals? */
4593 /* Do the size check early to avoid creating vtables */
4594 if (!inline_limit_inited) {
4595 if (g_getenv ("MONO_INLINELIMIT"))
4596 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4598 inline_limit = INLINE_LENGTH_LIMIT;
4599 inline_limit_inited = TRUE;
4601 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4605 * if we can initialize the class of the method right away, we do,
4606 * otherwise we don't allow inlining if the class needs initialization,
4607 * since it would mean inserting a call to mono_runtime_class_init()
4608 * inside the inlined code
4610 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4613 if (!(cfg->opt & MONO_OPT_SHARED)) {
4614 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4615 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4616 if (method->klass->has_cctor) {
4617 vtable = mono_class_vtable (cfg->domain, method->klass);
4620 if (!cfg->compile_aot) {
4622 if (!mono_runtime_class_init_full (vtable, &error)) {
4623 mono_error_cleanup (&error);
4628 } else if (mono_class_is_before_field_init (method->klass)) {
4629 if (cfg->run_cctors && method->klass->has_cctor) {
4630 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4631 if (!method->klass->runtime_info)
4632 /* No vtable created yet */
4634 vtable = mono_class_vtable (cfg->domain, method->klass);
4637 /* This makes so that inline cannot trigger */
4638 /* .cctors: too many apps depend on them */
4639 /* running with a specific order... */
4640 if (! vtable->initialized)
4643 if (!mono_runtime_class_init_full (vtable, &error)) {
4644 mono_error_cleanup (&error);
4648 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4649 if (!method->klass->runtime_info)
4650 /* No vtable created yet */
4652 vtable = mono_class_vtable (cfg->domain, method->klass);
4655 if (!vtable->initialized)
4660 * If we're compiling for shared code
4661 * the cctor will need to be run at aot method load time, for example,
4662 * or at the end of the compilation of the inlining method.
4664 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4668 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4669 if (mono_arch_is_soft_float ()) {
4671 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4673 for (i = 0; i < sig->param_count; ++i)
4674 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4679 if (g_list_find (cfg->dont_inline, method))
4686 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4688 if (!cfg->compile_aot) {
4690 if (vtable->initialized)
4694 if (mono_class_is_before_field_init (klass)) {
4695 if (cfg->method == method)
4699 if (!mono_class_needs_cctor_run (klass, method))
4702 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4703 /* The initialization is already done before the method is called */
4710 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4714 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4717 if (mini_is_gsharedvt_variable_klass (klass)) {
4720 mono_class_init (klass);
4721 size = mono_class_array_element_size (klass);
4724 mult_reg = alloc_preg (cfg);
4725 array_reg = arr->dreg;
4726 index_reg = index->dreg;
4728 #if SIZEOF_REGISTER == 8
4729 /* The array reg is 64 bits but the index reg is only 32 */
4730 if (COMPILE_LLVM (cfg)) {
4732 index2_reg = index_reg;
4734 index2_reg = alloc_preg (cfg);
4735 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4738 if (index->type == STACK_I8) {
4739 index2_reg = alloc_preg (cfg);
4740 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4742 index2_reg = index_reg;
4747 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4749 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4750 if (size == 1 || size == 2 || size == 4 || size == 8) {
4751 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4753 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4754 ins->klass = mono_class_get_element_class (klass);
4755 ins->type = STACK_MP;
4761 add_reg = alloc_ireg_mp (cfg);
4764 MonoInst *rgctx_ins;
4767 g_assert (cfg->gshared);
4768 context_used = mini_class_check_context_used (cfg, klass);
4769 g_assert (context_used);
4770 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4771 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4775 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4776 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4777 ins->klass = mono_class_get_element_class (klass);
4778 ins->type = STACK_MP;
4779 MONO_ADD_INS (cfg->cbb, ins);
4785 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4787 int bounds_reg = alloc_preg (cfg);
4788 int add_reg = alloc_ireg_mp (cfg);
4789 int mult_reg = alloc_preg (cfg);
4790 int mult2_reg = alloc_preg (cfg);
4791 int low1_reg = alloc_preg (cfg);
4792 int low2_reg = alloc_preg (cfg);
4793 int high1_reg = alloc_preg (cfg);
4794 int high2_reg = alloc_preg (cfg);
4795 int realidx1_reg = alloc_preg (cfg);
4796 int realidx2_reg = alloc_preg (cfg);
4797 int sum_reg = alloc_preg (cfg);
4798 int index1, index2, tmpreg;
4802 mono_class_init (klass);
4803 size = mono_class_array_element_size (klass);
4805 index1 = index_ins1->dreg;
4806 index2 = index_ins2->dreg;
4808 #if SIZEOF_REGISTER == 8
4809 /* The array reg is 64 bits but the index reg is only 32 */
4810 if (COMPILE_LLVM (cfg)) {
4813 tmpreg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4816 tmpreg = alloc_preg (cfg);
4817 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4821 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4825 /* range checking */
4826 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4827 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4830 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4831 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4833 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4834 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4835 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4837 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4838 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4839 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4840 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4841 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4842 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4843 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4845 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4846 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4848 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4849 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4851 ins->type = STACK_MP;
4853 MONO_ADD_INS (cfg->cbb, ins);
4859 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4863 MonoMethod *addr_method;
4865 MonoClass *eclass = cmethod->klass->element_class;
4867 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4870 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4872 /* emit_ldelema_2 depends on OP_LMUL */
4873 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4874 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4877 if (mini_is_gsharedvt_variable_klass (eclass))
4880 element_size = mono_class_array_element_size (eclass);
4881 addr_method = mono_marshal_get_array_address (rank, element_size);
4882 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4887 static MonoBreakPolicy
4888 always_insert_breakpoint (MonoMethod *method)
4890 return MONO_BREAK_POLICY_ALWAYS;
4893 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4896 * mono_set_break_policy:
4897 * \param policy_callback the new callback function
4899 * Allow embedders to decide wherther to actually obey breakpoint instructions
4900 * (both break IL instructions and \c Debugger.Break method calls), for example
4901 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4902 * untrusted or semi-trusted code.
4904 * \p policy_callback will be called every time a break point instruction needs to
4905 * be inserted with the method argument being the method that calls \c Debugger.Break
4906 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4907 * if it wants the breakpoint to not be effective in the given method.
4908 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4911 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4913 if (policy_callback)
4914 break_policy_func = policy_callback;
4916 break_policy_func = always_insert_breakpoint;
4920 should_insert_brekpoint (MonoMethod *method) {
4921 switch (break_policy_func (method)) {
4922 case MONO_BREAK_POLICY_ALWAYS:
4924 case MONO_BREAK_POLICY_NEVER:
4926 case MONO_BREAK_POLICY_ON_DBG:
4927 g_warning ("mdb no longer supported");
4930 g_warning ("Incorrect value returned from break policy callback");
4935 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4937 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4939 MonoInst *addr, *store, *load;
4940 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4942 /* the bounds check is already done by the callers */
4943 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4946 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4947 if (mini_type_is_reference (&eklass->byval_arg))
4948 emit_write_barrier (cfg, addr, load);
4950 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4951 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4958 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4960 return mini_type_is_reference (&klass->byval_arg);
4964 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4966 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4967 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4968 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4969 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4970 MonoInst *iargs [3];
4973 mono_class_setup_vtable (obj_array);
4974 g_assert (helper->slot);
4976 if (sp [0]->type != STACK_OBJ)
4978 if (sp [2]->type != STACK_OBJ)
4985 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4989 if (mini_is_gsharedvt_variable_klass (klass)) {
4992 // FIXME-VT: OP_ICONST optimization
4993 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4994 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4995 ins->opcode = OP_STOREV_MEMBASE;
4996 } else if (sp [1]->opcode == OP_ICONST) {
4997 int array_reg = sp [0]->dreg;
4998 int index_reg = sp [1]->dreg;
4999 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5001 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5002 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5005 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5006 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5008 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5009 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5010 if (generic_class_is_reference_type (cfg, klass))
5011 emit_write_barrier (cfg, addr, sp [2]);
5018 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5023 eklass = mono_class_from_mono_type (fsig->params [2]);
5025 eklass = mono_class_from_mono_type (fsig->ret);
5028 return emit_array_store (cfg, eklass, args, FALSE);
5030 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5031 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5037 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5040 int param_size, return_size;
5042 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5043 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5045 if (cfg->verbose_level > 3)
5046 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5048 //Don't allow mixing reference types with value types
5049 if (param_klass->valuetype != return_klass->valuetype) {
5050 if (cfg->verbose_level > 3)
5051 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5055 if (!param_klass->valuetype) {
5056 if (cfg->verbose_level > 3)
5057 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5062 if (param_klass->has_references || return_klass->has_references)
5065 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5066 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5067 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5068 if (cfg->verbose_level > 3)
5069 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5073 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5074 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5075 if (cfg->verbose_level > 3)
5076 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5080 param_size = mono_class_value_size (param_klass, &align);
5081 return_size = mono_class_value_size (return_klass, &align);
5083 //We can do it if sizes match
5084 if (param_size == return_size) {
5085 if (cfg->verbose_level > 3)
5086 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5090 //No simple way to handle struct if sizes don't match
5091 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5092 if (cfg->verbose_level > 3)
5093 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5098 * Same reg size category.
5099 * A quick note on why we don't require widening here.
5100 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5102 * Since the source value comes from a function argument, the JIT will already have
5103 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5105 if (param_size <= 4 && return_size <= 4) {
5106 if (cfg->verbose_level > 3)
5107 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5115 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5117 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5118 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5120 if (mini_is_gsharedvt_variable_type (fsig->ret))
5123 //Valuetypes that are semantically equivalent or numbers than can be widened to
5124 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5127 //Arrays of valuetypes that are semantically equivalent
5128 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5135 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5137 #ifdef MONO_ARCH_SIMD_INTRINSICS
5138 MonoInst *ins = NULL;
5140 if (cfg->opt & MONO_OPT_SIMD) {
5141 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5147 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5151 emit_memory_barrier (MonoCompile *cfg, int kind)
5153 MonoInst *ins = NULL;
5154 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5155 MONO_ADD_INS (cfg->cbb, ins);
5156 ins->backend.memory_barrier_kind = kind;
5162 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5164 MonoInst *ins = NULL;
5167 /* The LLVM backend supports these intrinsics */
5168 if (cmethod->klass == mono_defaults.math_class) {
5169 if (strcmp (cmethod->name, "Sin") == 0) {
5171 } else if (strcmp (cmethod->name, "Cos") == 0) {
5173 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5175 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5179 if (opcode && fsig->param_count == 1) {
5180 MONO_INST_NEW (cfg, ins, opcode);
5181 ins->type = STACK_R8;
5182 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5183 ins->sreg1 = args [0]->dreg;
5184 MONO_ADD_INS (cfg->cbb, ins);
5188 if (cfg->opt & MONO_OPT_CMOV) {
5189 if (strcmp (cmethod->name, "Min") == 0) {
5190 if (fsig->params [0]->type == MONO_TYPE_I4)
5192 if (fsig->params [0]->type == MONO_TYPE_U4)
5193 opcode = OP_IMIN_UN;
5194 else if (fsig->params [0]->type == MONO_TYPE_I8)
5196 else if (fsig->params [0]->type == MONO_TYPE_U8)
5197 opcode = OP_LMIN_UN;
5198 } else if (strcmp (cmethod->name, "Max") == 0) {
5199 if (fsig->params [0]->type == MONO_TYPE_I4)
5201 if (fsig->params [0]->type == MONO_TYPE_U4)
5202 opcode = OP_IMAX_UN;
5203 else if (fsig->params [0]->type == MONO_TYPE_I8)
5205 else if (fsig->params [0]->type == MONO_TYPE_U8)
5206 opcode = OP_LMAX_UN;
5210 if (opcode && fsig->param_count == 2) {
5211 MONO_INST_NEW (cfg, ins, opcode);
5212 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5213 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5214 ins->sreg1 = args [0]->dreg;
5215 ins->sreg2 = args [1]->dreg;
5216 MONO_ADD_INS (cfg->cbb, ins);
5224 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5226 if (cmethod->klass == mono_defaults.array_class) {
5227 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5228 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5229 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5230 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5231 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5232 return emit_array_unsafe_mov (cfg, fsig, args);
5239 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5241 MonoInst *ins = NULL;
5242 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5244 if (cmethod->klass == mono_defaults.string_class) {
5245 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5246 int dreg = alloc_ireg (cfg);
5247 int index_reg = alloc_preg (cfg);
5248 int add_reg = alloc_preg (cfg);
5250 #if SIZEOF_REGISTER == 8
5251 if (COMPILE_LLVM (cfg)) {
5252 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5254 /* The array reg is 64 bits but the index reg is only 32 */
5255 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5258 index_reg = args [1]->dreg;
5260 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5262 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5263 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5264 add_reg = ins->dreg;
5265 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5268 int mult_reg = alloc_preg (cfg);
5269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5270 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5271 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5272 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5274 type_from_op (cfg, ins, NULL, NULL);
5276 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5277 int dreg = alloc_ireg (cfg);
5278 /* Decompose later to allow more optimizations */
5279 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5280 ins->type = STACK_I4;
5281 ins->flags |= MONO_INST_FAULT;
5282 cfg->cbb->has_array_access = TRUE;
5283 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5288 } else if (cmethod->klass == mono_defaults.object_class) {
5289 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5290 int dreg = alloc_ireg_ref (cfg);
5291 int vt_reg = alloc_preg (cfg);
5292 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5293 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5294 type_from_op (cfg, ins, NULL, NULL);
5297 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5298 int dreg = alloc_ireg (cfg);
5299 int t1 = alloc_ireg (cfg);
5301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5302 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5303 ins->type = STACK_I4;
5306 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5307 MONO_INST_NEW (cfg, ins, OP_NOP);
5308 MONO_ADD_INS (cfg->cbb, ins);
5312 } else if (cmethod->klass == mono_defaults.array_class) {
5313 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5314 return emit_array_generic_access (cfg, fsig, args, FALSE);
5315 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5316 return emit_array_generic_access (cfg, fsig, args, TRUE);
5318 #ifndef MONO_BIG_ARRAYS
5320 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5323 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5324 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5325 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5326 int dreg = alloc_ireg (cfg);
5327 int bounds_reg = alloc_ireg_mp (cfg);
5328 MonoBasicBlock *end_bb, *szarray_bb;
5329 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5331 NEW_BBLOCK (cfg, end_bb);
5332 NEW_BBLOCK (cfg, szarray_bb);
5334 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5335 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5336 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5337 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5338 /* Non-szarray case */
5340 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5341 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5343 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5344 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5346 MONO_START_BB (cfg, szarray_bb);
5349 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5350 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5352 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5353 MONO_START_BB (cfg, end_bb);
5355 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5356 ins->type = STACK_I4;
5362 if (cmethod->name [0] != 'g')
5365 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5366 int dreg = alloc_ireg (cfg);
5367 int vtable_reg = alloc_preg (cfg);
5368 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5369 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5370 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5371 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5372 type_from_op (cfg, ins, NULL, NULL);
5375 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5376 int dreg = alloc_ireg (cfg);
5378 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5379 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5380 type_from_op (cfg, ins, NULL, NULL);
5385 } else if (cmethod->klass == runtime_helpers_class) {
5386 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5387 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5389 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5390 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5392 g_assert (ctx->method_inst);
5393 g_assert (ctx->method_inst->type_argc == 1);
5394 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5395 MonoClass *klass = mono_class_from_mono_type (t);
5399 mono_class_init (klass);
5400 if (MONO_TYPE_IS_REFERENCE (t))
5401 EMIT_NEW_ICONST (cfg, ins, 1);
5402 else if (MONO_TYPE_IS_PRIMITIVE (t))
5403 EMIT_NEW_ICONST (cfg, ins, 0);
5404 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5405 EMIT_NEW_ICONST (cfg, ins, 1);
5406 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5407 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5409 g_assert (cfg->gshared);
5411 int context_used = mini_class_check_context_used (cfg, klass);
5413 /* This returns 1 or 2 */
5414 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5415 int dreg = alloc_ireg (cfg);
5416 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5422 } else if (cmethod->klass == mono_defaults.monitor_class) {
5423 gboolean is_enter = FALSE;
5424 gboolean is_v4 = FALSE;
5426 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5430 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5435 * To make async stack traces work, icalls which can block should have a wrapper.
5436 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5438 MonoBasicBlock *end_bb;
5440 NEW_BBLOCK (cfg, end_bb);
5442 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5445 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5446 MONO_START_BB (cfg, end_bb);
5449 } else if (cmethod->klass == mono_defaults.thread_class) {
5450 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5451 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5452 MONO_ADD_INS (cfg->cbb, ins);
5454 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5455 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5456 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5458 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5460 if (fsig->params [0]->type == MONO_TYPE_I1)
5461 opcode = OP_LOADI1_MEMBASE;
5462 else if (fsig->params [0]->type == MONO_TYPE_U1)
5463 opcode = OP_LOADU1_MEMBASE;
5464 else if (fsig->params [0]->type == MONO_TYPE_I2)
5465 opcode = OP_LOADI2_MEMBASE;
5466 else if (fsig->params [0]->type == MONO_TYPE_U2)
5467 opcode = OP_LOADU2_MEMBASE;
5468 else if (fsig->params [0]->type == MONO_TYPE_I4)
5469 opcode = OP_LOADI4_MEMBASE;
5470 else if (fsig->params [0]->type == MONO_TYPE_U4)
5471 opcode = OP_LOADU4_MEMBASE;
5472 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5473 opcode = OP_LOADI8_MEMBASE;
5474 else if (fsig->params [0]->type == MONO_TYPE_R4)
5475 opcode = OP_LOADR4_MEMBASE;
5476 else if (fsig->params [0]->type == MONO_TYPE_R8)
5477 opcode = OP_LOADR8_MEMBASE;
5478 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5479 opcode = OP_LOAD_MEMBASE;
5482 MONO_INST_NEW (cfg, ins, opcode);
5483 ins->inst_basereg = args [0]->dreg;
5484 ins->inst_offset = 0;
5485 MONO_ADD_INS (cfg->cbb, ins);
5487 switch (fsig->params [0]->type) {
5494 ins->dreg = mono_alloc_ireg (cfg);
5495 ins->type = STACK_I4;
5499 ins->dreg = mono_alloc_lreg (cfg);
5500 ins->type = STACK_I8;
5504 ins->dreg = mono_alloc_ireg (cfg);
5505 #if SIZEOF_REGISTER == 8
5506 ins->type = STACK_I8;
5508 ins->type = STACK_I4;
5513 ins->dreg = mono_alloc_freg (cfg);
5514 ins->type = STACK_R8;
5517 g_assert (mini_type_is_reference (fsig->params [0]));
5518 ins->dreg = mono_alloc_ireg_ref (cfg);
5519 ins->type = STACK_OBJ;
5523 if (opcode == OP_LOADI8_MEMBASE)
5524 ins = mono_decompose_opcode (cfg, ins);
5526 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5530 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5532 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5534 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5535 opcode = OP_STOREI1_MEMBASE_REG;
5536 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5537 opcode = OP_STOREI2_MEMBASE_REG;
5538 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5539 opcode = OP_STOREI4_MEMBASE_REG;
5540 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5541 opcode = OP_STOREI8_MEMBASE_REG;
5542 else if (fsig->params [0]->type == MONO_TYPE_R4)
5543 opcode = OP_STORER4_MEMBASE_REG;
5544 else if (fsig->params [0]->type == MONO_TYPE_R8)
5545 opcode = OP_STORER8_MEMBASE_REG;
5546 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5547 opcode = OP_STORE_MEMBASE_REG;
5550 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5552 MONO_INST_NEW (cfg, ins, opcode);
5553 ins->sreg1 = args [1]->dreg;
5554 ins->inst_destbasereg = args [0]->dreg;
5555 ins->inst_offset = 0;
5556 MONO_ADD_INS (cfg->cbb, ins);
5558 if (opcode == OP_STOREI8_MEMBASE_REG)
5559 ins = mono_decompose_opcode (cfg, ins);
5564 } else if (cmethod->klass->image == mono_defaults.corlib &&
5565 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5566 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5569 #if SIZEOF_REGISTER == 8
5570 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5571 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5572 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5573 ins->dreg = mono_alloc_preg (cfg);
5574 ins->sreg1 = args [0]->dreg;
5575 ins->type = STACK_I8;
5576 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5577 MONO_ADD_INS (cfg->cbb, ins);
5581 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5583 /* 64 bit reads are already atomic */
5584 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5585 load_ins->dreg = mono_alloc_preg (cfg);
5586 load_ins->inst_basereg = args [0]->dreg;
5587 load_ins->inst_offset = 0;
5588 load_ins->type = STACK_I8;
5589 MONO_ADD_INS (cfg->cbb, load_ins);
5591 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5598 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5599 MonoInst *ins_iconst;
5602 if (fsig->params [0]->type == MONO_TYPE_I4) {
5603 opcode = OP_ATOMIC_ADD_I4;
5604 cfg->has_atomic_add_i4 = TRUE;
5606 #if SIZEOF_REGISTER == 8
5607 else if (fsig->params [0]->type == MONO_TYPE_I8)
5608 opcode = OP_ATOMIC_ADD_I8;
5611 if (!mono_arch_opcode_supported (opcode))
5613 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5614 ins_iconst->inst_c0 = 1;
5615 ins_iconst->dreg = mono_alloc_ireg (cfg);
5616 MONO_ADD_INS (cfg->cbb, ins_iconst);
5618 MONO_INST_NEW (cfg, ins, opcode);
5619 ins->dreg = mono_alloc_ireg (cfg);
5620 ins->inst_basereg = args [0]->dreg;
5621 ins->inst_offset = 0;
5622 ins->sreg2 = ins_iconst->dreg;
5623 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5624 MONO_ADD_INS (cfg->cbb, ins);
5626 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5627 MonoInst *ins_iconst;
5630 if (fsig->params [0]->type == MONO_TYPE_I4) {
5631 opcode = OP_ATOMIC_ADD_I4;
5632 cfg->has_atomic_add_i4 = TRUE;
5634 #if SIZEOF_REGISTER == 8
5635 else if (fsig->params [0]->type == MONO_TYPE_I8)
5636 opcode = OP_ATOMIC_ADD_I8;
5639 if (!mono_arch_opcode_supported (opcode))
5641 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5642 ins_iconst->inst_c0 = -1;
5643 ins_iconst->dreg = mono_alloc_ireg (cfg);
5644 MONO_ADD_INS (cfg->cbb, ins_iconst);
5646 MONO_INST_NEW (cfg, ins, opcode);
5647 ins->dreg = mono_alloc_ireg (cfg);
5648 ins->inst_basereg = args [0]->dreg;
5649 ins->inst_offset = 0;
5650 ins->sreg2 = ins_iconst->dreg;
5651 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5652 MONO_ADD_INS (cfg->cbb, ins);
5654 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5657 if (fsig->params [0]->type == MONO_TYPE_I4) {
5658 opcode = OP_ATOMIC_ADD_I4;
5659 cfg->has_atomic_add_i4 = TRUE;
5661 #if SIZEOF_REGISTER == 8
5662 else if (fsig->params [0]->type == MONO_TYPE_I8)
5663 opcode = OP_ATOMIC_ADD_I8;
5666 if (!mono_arch_opcode_supported (opcode))
5668 MONO_INST_NEW (cfg, ins, opcode);
5669 ins->dreg = mono_alloc_ireg (cfg);
5670 ins->inst_basereg = args [0]->dreg;
5671 ins->inst_offset = 0;
5672 ins->sreg2 = args [1]->dreg;
5673 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5674 MONO_ADD_INS (cfg->cbb, ins);
5677 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5678 MonoInst *f2i = NULL, *i2f;
5679 guint32 opcode, f2i_opcode, i2f_opcode;
5680 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5681 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5683 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5684 fsig->params [0]->type == MONO_TYPE_R4) {
5685 opcode = OP_ATOMIC_EXCHANGE_I4;
5686 f2i_opcode = OP_MOVE_F_TO_I4;
5687 i2f_opcode = OP_MOVE_I4_TO_F;
5688 cfg->has_atomic_exchange_i4 = TRUE;
5690 #if SIZEOF_REGISTER == 8
5692 fsig->params [0]->type == MONO_TYPE_I8 ||
5693 fsig->params [0]->type == MONO_TYPE_R8 ||
5694 fsig->params [0]->type == MONO_TYPE_I) {
5695 opcode = OP_ATOMIC_EXCHANGE_I8;
5696 f2i_opcode = OP_MOVE_F_TO_I8;
5697 i2f_opcode = OP_MOVE_I8_TO_F;
5700 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5701 opcode = OP_ATOMIC_EXCHANGE_I4;
5702 cfg->has_atomic_exchange_i4 = TRUE;
5708 if (!mono_arch_opcode_supported (opcode))
5712 /* TODO: Decompose these opcodes instead of bailing here. */
5713 if (COMPILE_SOFT_FLOAT (cfg))
5716 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5717 f2i->dreg = mono_alloc_ireg (cfg);
5718 f2i->sreg1 = args [1]->dreg;
5719 if (f2i_opcode == OP_MOVE_F_TO_I4)
5720 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5721 MONO_ADD_INS (cfg->cbb, f2i);
5724 MONO_INST_NEW (cfg, ins, opcode);
5725 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5726 ins->inst_basereg = args [0]->dreg;
5727 ins->inst_offset = 0;
5728 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5729 MONO_ADD_INS (cfg->cbb, ins);
5731 switch (fsig->params [0]->type) {
5733 ins->type = STACK_I4;
5736 ins->type = STACK_I8;
5739 #if SIZEOF_REGISTER == 8
5740 ins->type = STACK_I8;
5742 ins->type = STACK_I4;
5747 ins->type = STACK_R8;
5750 g_assert (mini_type_is_reference (fsig->params [0]));
5751 ins->type = STACK_OBJ;
5756 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5757 i2f->dreg = mono_alloc_freg (cfg);
5758 i2f->sreg1 = ins->dreg;
5759 i2f->type = STACK_R8;
5760 if (i2f_opcode == OP_MOVE_I4_TO_F)
5761 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5762 MONO_ADD_INS (cfg->cbb, i2f);
5767 if (cfg->gen_write_barriers && is_ref)
5768 emit_write_barrier (cfg, args [0], args [1]);
5770 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5771 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5772 guint32 opcode, f2i_opcode, i2f_opcode;
5773 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5774 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5776 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5777 fsig->params [1]->type == MONO_TYPE_R4) {
5778 opcode = OP_ATOMIC_CAS_I4;
5779 f2i_opcode = OP_MOVE_F_TO_I4;
5780 i2f_opcode = OP_MOVE_I4_TO_F;
5781 cfg->has_atomic_cas_i4 = TRUE;
5783 #if SIZEOF_REGISTER == 8
5785 fsig->params [1]->type == MONO_TYPE_I8 ||
5786 fsig->params [1]->type == MONO_TYPE_R8 ||
5787 fsig->params [1]->type == MONO_TYPE_I) {
5788 opcode = OP_ATOMIC_CAS_I8;
5789 f2i_opcode = OP_MOVE_F_TO_I8;
5790 i2f_opcode = OP_MOVE_I8_TO_F;
5793 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5794 opcode = OP_ATOMIC_CAS_I4;
5795 cfg->has_atomic_cas_i4 = TRUE;
5801 if (!mono_arch_opcode_supported (opcode))
5805 /* TODO: Decompose these opcodes instead of bailing here. */
5806 if (COMPILE_SOFT_FLOAT (cfg))
5809 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5810 f2i_new->dreg = mono_alloc_ireg (cfg);
5811 f2i_new->sreg1 = args [1]->dreg;
5812 if (f2i_opcode == OP_MOVE_F_TO_I4)
5813 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5814 MONO_ADD_INS (cfg->cbb, f2i_new);
5816 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5817 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5818 f2i_cmp->sreg1 = args [2]->dreg;
5819 if (f2i_opcode == OP_MOVE_F_TO_I4)
5820 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5821 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5824 MONO_INST_NEW (cfg, ins, opcode);
5825 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5826 ins->sreg1 = args [0]->dreg;
5827 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5828 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5829 MONO_ADD_INS (cfg->cbb, ins);
5831 switch (fsig->params [1]->type) {
5833 ins->type = STACK_I4;
5836 ins->type = STACK_I8;
5839 #if SIZEOF_REGISTER == 8
5840 ins->type = STACK_I8;
5842 ins->type = STACK_I4;
5846 ins->type = cfg->r4_stack_type;
5849 ins->type = STACK_R8;
5852 g_assert (mini_type_is_reference (fsig->params [1]));
5853 ins->type = STACK_OBJ;
5858 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5859 i2f->dreg = mono_alloc_freg (cfg);
5860 i2f->sreg1 = ins->dreg;
5861 i2f->type = STACK_R8;
5862 if (i2f_opcode == OP_MOVE_I4_TO_F)
5863 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5864 MONO_ADD_INS (cfg->cbb, i2f);
5869 if (cfg->gen_write_barriers && is_ref)
5870 emit_write_barrier (cfg, args [0], args [1]);
5872 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5873 fsig->params [1]->type == MONO_TYPE_I4) {
5874 MonoInst *cmp, *ceq;
5876 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5879 /* int32 r = CAS (location, value, comparand); */
5880 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5881 ins->dreg = alloc_ireg (cfg);
5882 ins->sreg1 = args [0]->dreg;
5883 ins->sreg2 = args [1]->dreg;
5884 ins->sreg3 = args [2]->dreg;
5885 ins->type = STACK_I4;
5886 MONO_ADD_INS (cfg->cbb, ins);
5888 /* bool result = r == comparand; */
5889 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5890 cmp->sreg1 = ins->dreg;
5891 cmp->sreg2 = args [2]->dreg;
5892 cmp->type = STACK_I4;
5893 MONO_ADD_INS (cfg->cbb, cmp);
5895 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5896 ceq->dreg = alloc_ireg (cfg);
5897 ceq->type = STACK_I4;
5898 MONO_ADD_INS (cfg->cbb, ceq);
5900 /* *success = result; */
5901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5903 cfg->has_atomic_cas_i4 = TRUE;
5905 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5906 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5910 } else if (cmethod->klass->image == mono_defaults.corlib &&
5911 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5912 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5915 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5917 MonoType *t = fsig->params [0];
5919 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5921 g_assert (t->byref);
5922 /* t is a byref type, so the reference check is more complicated */
5923 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5924 if (t->type == MONO_TYPE_I1)
5925 opcode = OP_ATOMIC_LOAD_I1;
5926 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5927 opcode = OP_ATOMIC_LOAD_U1;
5928 else if (t->type == MONO_TYPE_I2)
5929 opcode = OP_ATOMIC_LOAD_I2;
5930 else if (t->type == MONO_TYPE_U2)
5931 opcode = OP_ATOMIC_LOAD_U2;
5932 else if (t->type == MONO_TYPE_I4)
5933 opcode = OP_ATOMIC_LOAD_I4;
5934 else if (t->type == MONO_TYPE_U4)
5935 opcode = OP_ATOMIC_LOAD_U4;
5936 else if (t->type == MONO_TYPE_R4)
5937 opcode = OP_ATOMIC_LOAD_R4;
5938 else if (t->type == MONO_TYPE_R8)
5939 opcode = OP_ATOMIC_LOAD_R8;
5940 #if SIZEOF_REGISTER == 8
5941 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5942 opcode = OP_ATOMIC_LOAD_I8;
5943 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5944 opcode = OP_ATOMIC_LOAD_U8;
5946 else if (t->type == MONO_TYPE_I)
5947 opcode = OP_ATOMIC_LOAD_I4;
5948 else if (is_ref || t->type == MONO_TYPE_U)
5949 opcode = OP_ATOMIC_LOAD_U4;
5953 if (!mono_arch_opcode_supported (opcode))
5956 MONO_INST_NEW (cfg, ins, opcode);
5957 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5958 ins->sreg1 = args [0]->dreg;
5959 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5960 MONO_ADD_INS (cfg->cbb, ins);
5963 case MONO_TYPE_BOOLEAN:
5970 ins->type = STACK_I4;
5974 ins->type = STACK_I8;
5978 #if SIZEOF_REGISTER == 8
5979 ins->type = STACK_I8;
5981 ins->type = STACK_I4;
5985 ins->type = cfg->r4_stack_type;
5988 ins->type = STACK_R8;
5992 ins->type = STACK_OBJ;
5998 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6000 MonoType *t = fsig->params [0];
6003 g_assert (t->byref);
6004 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6005 if (t->type == MONO_TYPE_I1)
6006 opcode = OP_ATOMIC_STORE_I1;
6007 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6008 opcode = OP_ATOMIC_STORE_U1;
6009 else if (t->type == MONO_TYPE_I2)
6010 opcode = OP_ATOMIC_STORE_I2;
6011 else if (t->type == MONO_TYPE_U2)
6012 opcode = OP_ATOMIC_STORE_U2;
6013 else if (t->type == MONO_TYPE_I4)
6014 opcode = OP_ATOMIC_STORE_I4;
6015 else if (t->type == MONO_TYPE_U4)
6016 opcode = OP_ATOMIC_STORE_U4;
6017 else if (t->type == MONO_TYPE_R4)
6018 opcode = OP_ATOMIC_STORE_R4;
6019 else if (t->type == MONO_TYPE_R8)
6020 opcode = OP_ATOMIC_STORE_R8;
6021 #if SIZEOF_REGISTER == 8
6022 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6023 opcode = OP_ATOMIC_STORE_I8;
6024 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6025 opcode = OP_ATOMIC_STORE_U8;
6027 else if (t->type == MONO_TYPE_I)
6028 opcode = OP_ATOMIC_STORE_I4;
6029 else if (is_ref || t->type == MONO_TYPE_U)
6030 opcode = OP_ATOMIC_STORE_U4;
6034 if (!mono_arch_opcode_supported (opcode))
6037 MONO_INST_NEW (cfg, ins, opcode);
6038 ins->dreg = args [0]->dreg;
6039 ins->sreg1 = args [1]->dreg;
6040 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6041 MONO_ADD_INS (cfg->cbb, ins);
6043 if (cfg->gen_write_barriers && is_ref)
6044 emit_write_barrier (cfg, args [0], args [1]);
6050 } else if (cmethod->klass->image == mono_defaults.corlib &&
6051 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6052 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6053 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6054 if (should_insert_brekpoint (cfg->method)) {
6055 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6057 MONO_INST_NEW (cfg, ins, OP_NOP);
6058 MONO_ADD_INS (cfg->cbb, ins);
6062 } else if (cmethod->klass->image == mono_defaults.corlib &&
6063 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6064 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6065 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6067 EMIT_NEW_ICONST (cfg, ins, 1);
6069 EMIT_NEW_ICONST (cfg, ins, 0);
6072 } else if (cmethod->klass->image == mono_defaults.corlib &&
6073 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6074 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6075 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6076 /* No stack walks are currently available, so implement this as an intrinsic */
6077 MonoInst *assembly_ins;
6079 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6080 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6083 } else if (cmethod->klass->image == mono_defaults.corlib &&
6084 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6085 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6086 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6087 /* No stack walks are currently available, so implement this as an intrinsic */
6088 MonoInst *method_ins;
6089 MonoMethod *declaring = cfg->method;
6091 /* This returns the declaring generic method */
6092 if (declaring->is_inflated)
6093 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6094 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6095 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6096 cfg->no_inline = TRUE;
6097 if (cfg->method != cfg->current_method)
6098 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6101 } else if (cmethod->klass == mono_defaults.math_class) {
6103 * There is general branchless code for Min/Max, but it does not work for
6105 * http://everything2.com/?node_id=1051618
6107 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6108 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6109 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6110 ins->dreg = alloc_preg (cfg);
6111 ins->type = STACK_I4;
6112 MONO_ADD_INS (cfg->cbb, ins);
6114 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6115 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6116 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6117 !strcmp (cmethod->klass->name, "Selector")) ||
6118 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6119 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6120 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6121 !strcmp (cmethod->klass->name, "Selector"))
6123 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6124 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6125 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6128 MonoJumpInfoToken *ji;
6131 if (args [0]->opcode == OP_GOT_ENTRY) {
6132 pi = (MonoInst *)args [0]->inst_p1;
6133 g_assert (pi->opcode == OP_PATCH_INFO);
6134 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6135 ji = (MonoJumpInfoToken *)pi->inst_p0;
6137 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6138 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6141 NULLIFY_INS (args [0]);
6143 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6144 return_val_if_nok (&cfg->error, NULL);
6146 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6147 ins->dreg = mono_alloc_ireg (cfg);
6150 MONO_ADD_INS (cfg->cbb, ins);
6155 #ifdef MONO_ARCH_SIMD_INTRINSICS
6156 if (cfg->opt & MONO_OPT_SIMD) {
6157 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6163 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6167 if (COMPILE_LLVM (cfg)) {
6168 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6173 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6177 * This entry point could be used later for arbitrary method
6180 inline static MonoInst*
6181 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6182 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6184 if (method->klass == mono_defaults.string_class) {
6185 /* managed string allocation support */
6186 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6187 MonoInst *iargs [2];
6188 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6189 MonoMethod *managed_alloc = NULL;
6191 g_assert (vtable); /*Should not fail since it System.String*/
6192 #ifndef MONO_CROSS_COMPILE
6193 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6197 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6198 iargs [1] = args [0];
6199 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6206 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6208 MonoInst *store, *temp;
6211 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6212 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6215 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6216 * would be different than the MonoInst's used to represent arguments, and
6217 * the ldelema implementation can't deal with that.
6218 * Solution: When ldelema is used on an inline argument, create a var for
6219 * it, emit ldelema on that var, and emit the saving code below in
6220 * inline_method () if needed.
6222 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6223 cfg->args [i] = temp;
6224 /* This uses cfg->args [i] which is set by the preceeding line */
6225 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6226 store->cil_code = sp [0]->cil_code;
6231 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6232 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6234 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6236 check_inline_called_method_name_limit (MonoMethod *called_method)
6239 static const char *limit = NULL;
6241 if (limit == NULL) {
6242 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6244 if (limit_string != NULL)
6245 limit = limit_string;
6250 if (limit [0] != '\0') {
6251 char *called_method_name = mono_method_full_name (called_method, TRUE);
6253 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6254 g_free (called_method_name);
6256 //return (strncmp_result <= 0);
6257 return (strncmp_result == 0);
6264 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6266 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6269 static const char *limit = NULL;
6271 if (limit == NULL) {
6272 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6273 if (limit_string != NULL) {
6274 limit = limit_string;
6280 if (limit [0] != '\0') {
6281 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6283 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6284 g_free (caller_method_name);
6286 //return (strncmp_result <= 0);
6287 return (strncmp_result == 0);
6295 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6297 static double r8_0 = 0.0;
6298 static float r4_0 = 0.0;
6302 rtype = mini_get_underlying_type (rtype);
6306 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6307 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6308 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6309 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6310 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6311 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6312 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6313 ins->type = STACK_R4;
6314 ins->inst_p0 = (void*)&r4_0;
6316 MONO_ADD_INS (cfg->cbb, ins);
6317 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6318 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6319 ins->type = STACK_R8;
6320 ins->inst_p0 = (void*)&r8_0;
6322 MONO_ADD_INS (cfg->cbb, ins);
6323 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6324 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6325 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6326 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6327 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6329 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6334 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6338 rtype = mini_get_underlying_type (rtype);
6342 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6343 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6344 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6345 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6346 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6347 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6348 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6349 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6350 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6351 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6352 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6353 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6354 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6355 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6357 emit_init_rvar (cfg, dreg, rtype);
6361 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6363 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6365 MonoInst *var = cfg->locals [local];
6366 if (COMPILE_SOFT_FLOAT (cfg)) {
6368 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6369 emit_init_rvar (cfg, reg, type);
6370 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6373 emit_init_rvar (cfg, var->dreg, type);
6375 emit_dummy_init_rvar (cfg, var->dreg, type);
6380 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6382 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6388 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6391 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6392 guchar *ip, guint real_offset, gboolean inline_always)
6395 MonoInst *ins, *rvar = NULL;
6396 MonoMethodHeader *cheader;
6397 MonoBasicBlock *ebblock, *sbblock;
6399 MonoMethod *prev_inlined_method;
6400 MonoInst **prev_locals, **prev_args;
6401 MonoType **prev_arg_types;
6402 guint prev_real_offset;
6403 GHashTable *prev_cbb_hash;
6404 MonoBasicBlock **prev_cil_offset_to_bb;
6405 MonoBasicBlock *prev_cbb;
6406 const unsigned char *prev_ip;
6407 unsigned char *prev_cil_start;
6408 guint32 prev_cil_offset_to_bb_len;
6409 MonoMethod *prev_current_method;
6410 MonoGenericContext *prev_generic_context;
6411 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6413 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6415 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6416 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6419 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6420 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6425 fsig = mono_method_signature (cmethod);
6427 if (cfg->verbose_level > 2)
6428 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6430 if (!cmethod->inline_info) {
6431 cfg->stat_inlineable_methods++;
6432 cmethod->inline_info = 1;
6435 /* allocate local variables */
6436 cheader = mono_method_get_header_checked (cmethod, &error);
6438 if (inline_always) {
6439 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6440 mono_error_move (&cfg->error, &error);
6442 mono_error_cleanup (&error);
6447 /*Must verify before creating locals as it can cause the JIT to assert.*/
6448 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6449 mono_metadata_free_mh (cheader);
6453 /* allocate space to store the return value */
6454 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6455 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6458 prev_locals = cfg->locals;
6459 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6460 for (i = 0; i < cheader->num_locals; ++i)
6461 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6463 /* allocate start and end blocks */
6464 /* This is needed so if the inline is aborted, we can clean up */
6465 NEW_BBLOCK (cfg, sbblock);
6466 sbblock->real_offset = real_offset;
6468 NEW_BBLOCK (cfg, ebblock);
6469 ebblock->block_num = cfg->num_bblocks++;
6470 ebblock->real_offset = real_offset;
6472 prev_args = cfg->args;
6473 prev_arg_types = cfg->arg_types;
6474 prev_inlined_method = cfg->inlined_method;
6475 cfg->inlined_method = cmethod;
6476 cfg->ret_var_set = FALSE;
6477 cfg->inline_depth ++;
6478 prev_real_offset = cfg->real_offset;
6479 prev_cbb_hash = cfg->cbb_hash;
6480 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6481 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6482 prev_cil_start = cfg->cil_start;
6484 prev_cbb = cfg->cbb;
6485 prev_current_method = cfg->current_method;
6486 prev_generic_context = cfg->generic_context;
6487 prev_ret_var_set = cfg->ret_var_set;
6488 prev_disable_inline = cfg->disable_inline;
6490 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6493 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6495 ret_var_set = cfg->ret_var_set;
6497 cfg->inlined_method = prev_inlined_method;
6498 cfg->real_offset = prev_real_offset;
6499 cfg->cbb_hash = prev_cbb_hash;
6500 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6501 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6502 cfg->cil_start = prev_cil_start;
6504 cfg->locals = prev_locals;
6505 cfg->args = prev_args;
6506 cfg->arg_types = prev_arg_types;
6507 cfg->current_method = prev_current_method;
6508 cfg->generic_context = prev_generic_context;
6509 cfg->ret_var_set = prev_ret_var_set;
6510 cfg->disable_inline = prev_disable_inline;
6511 cfg->inline_depth --;
6513 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6514 if (cfg->verbose_level > 2)
6515 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6517 cfg->stat_inlined_methods++;
6519 /* always add some code to avoid block split failures */
6520 MONO_INST_NEW (cfg, ins, OP_NOP);
6521 MONO_ADD_INS (prev_cbb, ins);
6523 prev_cbb->next_bb = sbblock;
6524 link_bblock (cfg, prev_cbb, sbblock);
6527 * Get rid of the begin and end bblocks if possible to aid local
6530 if (prev_cbb->out_count == 1)
6531 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6533 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6534 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6536 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6537 MonoBasicBlock *prev = ebblock->in_bb [0];
6539 if (prev->next_bb == ebblock) {
6540 mono_merge_basic_blocks (cfg, prev, ebblock);
6542 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6543 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6544 cfg->cbb = prev_cbb;
6547 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6552 * Its possible that the rvar is set in some prev bblock, but not in others.
6558 for (i = 0; i < ebblock->in_count; ++i) {
6559 bb = ebblock->in_bb [i];
6561 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6564 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6574 * If the inlined method contains only a throw, then the ret var is not
6575 * set, so set it to a dummy value.
6578 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6580 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6583 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6586 if (cfg->verbose_level > 2)
6587 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6588 cfg->exception_type = MONO_EXCEPTION_NONE;
6590 /* This gets rid of the newly added bblocks */
6591 cfg->cbb = prev_cbb;
6593 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6598 * Some of these comments may well be out-of-date.
6599 * Design decisions: we do a single pass over the IL code (and we do bblock
6600 * splitting/merging in the few cases when it's required: a back jump to an IL
6601 * address that was not already seen as bblock starting point).
6602 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6603 * Complex operations are decomposed in simpler ones right away. We need to let the
6604 * arch-specific code peek and poke inside this process somehow (except when the
6605 * optimizations can take advantage of the full semantic info of coarse opcodes).
6606 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6607 * MonoInst->opcode initially is the IL opcode or some simplification of that
6608 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6609 * opcode with value bigger than OP_LAST.
6610 * At this point the IR can be handed over to an interpreter, a dumb code generator
6611 * or to the optimizing code generator that will translate it to SSA form.
6613 * Profiling directed optimizations.
6614 * We may compile by default with few or no optimizations and instrument the code
6615 * or the user may indicate what methods to optimize the most either in a config file
6616 * or through repeated runs where the compiler applies offline the optimizations to
6617 * each method and then decides if it was worth it.
6620 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6621 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6622 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6623 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6624 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6625 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6626 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6627 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6629 /* offset from br.s -> br like opcodes */
6630 #define BIG_BRANCH_OFFSET 13
6633 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6635 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6637 return b == NULL || b == bb;
6641 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6643 unsigned char *ip = start;
6644 unsigned char *target;
6647 MonoBasicBlock *bblock;
6648 const MonoOpcode *opcode;
6651 cli_addr = ip - start;
6652 i = mono_opcode_value ((const guint8 **)&ip, end);
6655 opcode = &mono_opcodes [i];
6656 switch (opcode->argument) {
6657 case MonoInlineNone:
6660 case MonoInlineString:
6661 case MonoInlineType:
6662 case MonoInlineField:
6663 case MonoInlineMethod:
6666 case MonoShortInlineR:
6673 case MonoShortInlineVar:
6674 case MonoShortInlineI:
6677 case MonoShortInlineBrTarget:
6678 target = start + cli_addr + 2 + (signed char)ip [1];
6679 GET_BBLOCK (cfg, bblock, target);
6682 GET_BBLOCK (cfg, bblock, ip);
6684 case MonoInlineBrTarget:
6685 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6686 GET_BBLOCK (cfg, bblock, target);
6689 GET_BBLOCK (cfg, bblock, ip);
6691 case MonoInlineSwitch: {
6692 guint32 n = read32 (ip + 1);
6695 cli_addr += 5 + 4 * n;
6696 target = start + cli_addr;
6697 GET_BBLOCK (cfg, bblock, target);
6699 for (j = 0; j < n; ++j) {
6700 target = start + cli_addr + (gint32)read32 (ip);
6701 GET_BBLOCK (cfg, bblock, target);
6711 g_assert_not_reached ();
6714 if (i == CEE_THROW) {
6715 unsigned char *bb_start = ip - 1;
6717 /* Find the start of the bblock containing the throw */
6719 while ((bb_start >= start) && !bblock) {
6720 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6724 bblock->out_of_line = 1;
6734 static inline MonoMethod *
6735 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6741 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6742 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6744 method = mono_class_inflate_generic_method_checked (method, context, error);
6747 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6753 static inline MonoMethod *
6754 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6757 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6759 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6760 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6764 if (!method && !cfg)
6765 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6770 static inline MonoClass*
6771 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6776 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6777 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6779 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6780 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6783 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6784 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6787 mono_class_init (klass);
6791 static inline MonoMethodSignature*
6792 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6794 MonoMethodSignature *fsig;
6797 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6798 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6800 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6801 return_val_if_nok (error, NULL);
6804 fsig = mono_inflate_generic_signature(fsig, context, error);
6810 throw_exception (void)
6812 static MonoMethod *method = NULL;
6815 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6816 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6823 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6825 MonoMethod *thrower = throw_exception ();
6828 EMIT_NEW_PCONST (cfg, args [0], ex);
6829 mono_emit_method_call (cfg, thrower, args, NULL);
6833 * Return the original method is a wrapper is specified. We can only access
6834 * the custom attributes from the original method.
6837 get_original_method (MonoMethod *method)
6839 if (method->wrapper_type == MONO_WRAPPER_NONE)
6842 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6843 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6846 /* in other cases we need to find the original method */
6847 return mono_marshal_method_from_wrapper (method);
6851 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6853 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6854 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6856 emit_throw_exception (cfg, ex);
6860 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6862 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6863 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6865 emit_throw_exception (cfg, ex);
6869 * Check that the IL instructions at ip are the array initialization
6870 * sequence and return the pointer to the data and the size.
6873 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6876 * newarr[System.Int32]
6878 * ldtoken field valuetype ...
6879 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6881 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6883 guint32 token = read32 (ip + 7);
6884 guint32 field_token = read32 (ip + 2);
6885 guint32 field_index = field_token & 0xffffff;
6887 const char *data_ptr;
6889 MonoMethod *cmethod;
6890 MonoClass *dummy_class;
6891 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6895 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6899 *out_field_token = field_token;
6901 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6904 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6906 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6910 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6911 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6928 if (size > mono_type_size (field->type, &dummy_align))
6931 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6932 if (!image_is_dynamic (method->klass->image)) {
6933 field_index = read32 (ip + 2) & 0xffffff;
6934 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6935 data_ptr = mono_image_rva_map (method->klass->image, rva);
6936 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6937 /* for aot code we do the lookup on load */
6938 if (aot && data_ptr)
6939 return (const char *)GUINT_TO_POINTER (rva);
6941 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6943 data_ptr = mono_field_get_data (field);
6951 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6954 char *method_fname = mono_method_full_name (method, TRUE);
6956 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6959 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6960 mono_error_cleanup (&error);
6961 } else if (header->code_size == 0)
6962 method_code = g_strdup ("method body is empty.");
6964 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6965 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6966 g_free (method_fname);
6967 g_free (method_code);
6968 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6972 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6975 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6976 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6977 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6978 /* Optimize reg-reg moves away */
6980 * Can't optimize other opcodes, since sp[0] might point to
6981 * the last ins of a decomposed opcode.
6983 sp [0]->dreg = (cfg)->locals [n]->dreg;
6985 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6990 * ldloca inhibits many optimizations so try to get rid of it in common
6993 static inline unsigned char *
6994 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7004 local = read16 (ip + 2);
7008 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7009 /* From the INITOBJ case */
7010 token = read32 (ip + 2);
7011 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7012 CHECK_TYPELOAD (klass);
7013 type = mini_get_underlying_type (&klass->byval_arg);
7014 emit_init_local (cfg, local, type, TRUE);
7022 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7024 MonoInst *icall_args [16];
7025 MonoInst *call_target, *ins, *vtable_ins;
7026 int arg_reg, this_reg, vtable_reg;
7027 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7028 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7029 gboolean variant_iface = FALSE;
7032 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7035 * In llvm-only mode, vtables contain function descriptors instead of
7036 * method addresses/trampolines.
7038 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7041 slot = mono_method_get_imt_slot (cmethod);
7043 slot = mono_method_get_vtable_index (cmethod);
7045 this_reg = sp [0]->dreg;
7047 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7048 variant_iface = TRUE;
7050 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7052 * The simplest case, a normal virtual call.
7054 int slot_reg = alloc_preg (cfg);
7055 int addr_reg = alloc_preg (cfg);
7056 int arg_reg = alloc_preg (cfg);
7057 MonoBasicBlock *non_null_bb;
7059 vtable_reg = alloc_preg (cfg);
7060 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7061 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7063 /* Load the vtable slot, which contains a function descriptor. */
7064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7066 NEW_BBLOCK (cfg, non_null_bb);
7068 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7069 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7070 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7073 // FIXME: Make the wrapper use the preserveall cconv
7074 // FIXME: Use one icall per slot for small slot numbers ?
7075 icall_args [0] = vtable_ins;
7076 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7077 /* Make the icall return the vtable slot value to save some code space */
7078 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7079 ins->dreg = slot_reg;
7080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7083 MONO_START_BB (cfg, non_null_bb);
7084 /* Load the address + arg from the vtable slot */
7085 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7088 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7091 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7093 * A simple interface call
7095 * We make a call through an imt slot to obtain the function descriptor we need to call.
7096 * The imt slot contains a function descriptor for a runtime function + arg.
7098 int slot_reg = alloc_preg (cfg);
7099 int addr_reg = alloc_preg (cfg);
7100 int arg_reg = alloc_preg (cfg);
7101 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7103 vtable_reg = alloc_preg (cfg);
7104 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7105 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7108 * The slot is already initialized when the vtable is created so there is no need
7112 /* Load the imt slot, which contains a function descriptor. */
7113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7115 /* Load the address + arg of the imt thunk from the imt slot */
7116 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7117 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7119 * IMT thunks in llvm-only mode are C functions which take an info argument
7120 * plus the imt method and return the ftndesc to call.
7122 icall_args [0] = thunk_arg_ins;
7123 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7124 cmethod, MONO_RGCTX_INFO_METHOD);
7125 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7127 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7130 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7132 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7133 * dynamically extended as more instantiations are discovered.
7134 * This handles generic virtual methods both on classes and interfaces.
7136 int slot_reg = alloc_preg (cfg);
7137 int addr_reg = alloc_preg (cfg);
7138 int arg_reg = alloc_preg (cfg);
7139 int ftndesc_reg = alloc_preg (cfg);
7140 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7141 MonoBasicBlock *slowpath_bb, *end_bb;
7143 NEW_BBLOCK (cfg, slowpath_bb);
7144 NEW_BBLOCK (cfg, end_bb);
7146 vtable_reg = alloc_preg (cfg);
7147 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7149 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7151 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7153 /* Load the slot, which contains a function descriptor. */
7154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7156 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7157 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7162 /* Same as with iface calls */
7163 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7164 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7165 icall_args [0] = thunk_arg_ins;
7166 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7167 cmethod, MONO_RGCTX_INFO_METHOD);
7168 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7169 ftndesc_ins->dreg = ftndesc_reg;
7171 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7172 * they don't know about yet. Fall back to the slowpath in that case.
7174 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7177 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7180 MONO_START_BB (cfg, slowpath_bb);
7181 icall_args [0] = vtable_ins;
7182 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7183 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7184 cmethod, MONO_RGCTX_INFO_METHOD);
7186 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7188 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7189 ftndesc_ins->dreg = ftndesc_reg;
7190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7193 MONO_START_BB (cfg, end_bb);
7194 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7198 * Non-optimized cases
7200 icall_args [0] = sp [0];
7201 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7203 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7204 cmethod, MONO_RGCTX_INFO_METHOD);
7206 arg_reg = alloc_preg (cfg);
7207 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7208 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7210 g_assert (is_gsharedvt);
7212 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7214 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7217 * Pass the extra argument even if the callee doesn't receive it, most
7218 * calling conventions allow this.
7220 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7224 is_exception_class (MonoClass *klass)
7227 if (klass == mono_defaults.exception_class)
7229 klass = klass->parent;
7235 * is_jit_optimizer_disabled:
7237 * Determine whenever M's assembly has a DebuggableAttribute with the
7238 * IsJITOptimizerDisabled flag set.
7241 is_jit_optimizer_disabled (MonoMethod *m)
7244 MonoAssembly *ass = m->klass->image->assembly;
7245 MonoCustomAttrInfo* attrs;
7248 gboolean val = FALSE;
7251 if (ass->jit_optimizer_disabled_inited)
7252 return ass->jit_optimizer_disabled;
7254 klass = mono_class_try_get_debuggable_attribute_class ();
7258 ass->jit_optimizer_disabled = FALSE;
7259 mono_memory_barrier ();
7260 ass->jit_optimizer_disabled_inited = TRUE;
7264 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7265 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7267 for (i = 0; i < attrs->num_attrs; ++i) {
7268 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7270 MonoMethodSignature *sig;
7272 if (!attr->ctor || attr->ctor->klass != klass)
7274 /* Decode the attribute. See reflection.c */
7275 p = (const char*)attr->data;
7276 g_assert (read16 (p) == 0x0001);
7279 // FIXME: Support named parameters
7280 sig = mono_method_signature (attr->ctor);
7281 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7283 /* Two boolean arguments */
7287 mono_custom_attrs_free (attrs);
7290 ass->jit_optimizer_disabled = val;
7291 mono_memory_barrier ();
7292 ass->jit_optimizer_disabled_inited = TRUE;
7298 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7300 gboolean supported_tail_call;
7303 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7305 for (i = 0; i < fsig->param_count; ++i) {
7306 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7307 /* These can point to the current method's stack */
7308 supported_tail_call = FALSE;
7310 if (fsig->hasthis && cmethod->klass->valuetype)
7311 /* this might point to the current method's stack */
7312 supported_tail_call = FALSE;
7313 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7314 supported_tail_call = FALSE;
7315 if (cfg->method->save_lmf)
7316 supported_tail_call = FALSE;
7317 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7318 supported_tail_call = FALSE;
7319 if (call_opcode != CEE_CALL)
7320 supported_tail_call = FALSE;
7322 /* Debugging support */
7324 if (supported_tail_call) {
7325 if (!mono_debug_count ())
7326 supported_tail_call = FALSE;
7330 return supported_tail_call;
7336 * Handle calls made to ctors from NEWOBJ opcodes.
7339 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7340 MonoInst **sp, guint8 *ip, int *inline_costs)
7342 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7344 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7345 mono_method_is_generic_sharable (cmethod, TRUE)) {
7346 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7347 mono_class_vtable (cfg->domain, cmethod->klass);
7348 CHECK_TYPELOAD (cmethod->klass);
7350 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7351 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7354 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7355 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7357 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7359 CHECK_TYPELOAD (cmethod->klass);
7360 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7365 /* Avoid virtual calls to ctors if possible */
7366 if (mono_class_is_marshalbyref (cmethod->klass))
7367 callvirt_this_arg = sp [0];
7369 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7370 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7371 CHECK_CFG_EXCEPTION;
7372 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7373 mono_method_check_inlining (cfg, cmethod) &&
7374 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7377 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7378 cfg->real_offset += 5;
7380 *inline_costs += costs - 5;
7382 INLINE_FAILURE ("inline failure");
7383 // FIXME-VT: Clean this up
7384 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7385 GSHAREDVT_FAILURE(*ip);
7386 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7388 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7391 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7393 if (cfg->llvm_only) {
7394 // FIXME: Avoid initializing vtable_arg
7395 emit_llvmonly_calli (cfg, fsig, sp, addr);
7397 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7399 } else if (context_used &&
7400 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7401 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7402 MonoInst *cmethod_addr;
7404 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7406 if (cfg->llvm_only) {
7407 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7408 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7409 emit_llvmonly_calli (cfg, fsig, sp, addr);
7411 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7412 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7414 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7417 INLINE_FAILURE ("ctor call");
7418 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7419 callvirt_this_arg, NULL, vtable_arg);
7426 emit_setret (MonoCompile *cfg, MonoInst *val)
7428 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7431 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7434 if (!cfg->vret_addr) {
7435 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7437 EMIT_NEW_RETLOADA (cfg, ret_addr);
7439 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7440 ins->klass = mono_class_from_mono_type (ret_type);
7443 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7444 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7445 MonoInst *iargs [1];
7449 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7450 mono_arch_emit_setret (cfg, cfg->method, conv);
7452 mono_arch_emit_setret (cfg, cfg->method, val);
7455 mono_arch_emit_setret (cfg, cfg->method, val);
7461 * mono_method_to_ir:
7463 * Translate the .net IL into linear IR.
7465 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7466 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7467 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7468 * @inline_args: if not NULL, contains the arguments to the inline call
7469 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7470 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7472 * This method is used to turn ECMA IL into Mono's internal Linear IR
7473 * reprensetation. It is used both for entire methods, as well as
7474 * inlining existing methods. In the former case, the @start_bblock,
7475 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7476 * inline_offset is set to zero.
7478 * Returns: the inline cost, or -1 if there was an error processing this method.
7481 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7482 MonoInst *return_var, MonoInst **inline_args,
7483 guint inline_offset, gboolean is_virtual_call)
7486 MonoInst *ins, **sp, **stack_start;
7487 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7488 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7489 MonoMethod *cmethod, *method_definition;
7490 MonoInst **arg_array;
7491 MonoMethodHeader *header;
7493 guint32 token, ins_flag;
7495 MonoClass *constrained_class = NULL;
7496 unsigned char *ip, *end, *target, *err_pos;
7497 MonoMethodSignature *sig;
7498 MonoGenericContext *generic_context = NULL;
7499 MonoGenericContainer *generic_container = NULL;
7500 MonoType **param_types;
7501 int i, n, start_new_bblock, dreg;
7502 int num_calls = 0, inline_costs = 0;
7503 int breakpoint_id = 0;
7505 GSList *class_inits = NULL;
7506 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7508 gboolean init_locals, seq_points, skip_dead_blocks;
7509 gboolean sym_seq_points = FALSE;
7510 MonoDebugMethodInfo *minfo;
7511 MonoBitSet *seq_point_locs = NULL;
7512 MonoBitSet *seq_point_set_locs = NULL;
7514 cfg->disable_inline = is_jit_optimizer_disabled (method);
7516 /* serialization and xdomain stuff may need access to private fields and methods */
7517 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7518 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7519 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7520 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7521 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7522 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7524 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7525 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7526 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7527 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7528 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7530 image = method->klass->image;
7531 header = mono_method_get_header_checked (method, &cfg->error);
7533 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7534 goto exception_exit;
7536 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7539 generic_container = mono_method_get_generic_container (method);
7540 sig = mono_method_signature (method);
7541 num_args = sig->hasthis + sig->param_count;
7542 ip = (unsigned char*)header->code;
7543 cfg->cil_start = ip;
7544 end = ip + header->code_size;
7545 cfg->stat_cil_code_size += header->code_size;
7547 seq_points = cfg->gen_seq_points && cfg->method == method;
7549 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7550 /* We could hit a seq point before attaching to the JIT (#8338) */
7554 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7555 minfo = mono_debug_lookup_method (method);
7557 MonoSymSeqPoint *sps;
7558 int i, n_il_offsets;
7560 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7561 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7562 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7563 sym_seq_points = TRUE;
7564 for (i = 0; i < n_il_offsets; ++i) {
7565 if (sps [i].il_offset < header->code_size)
7566 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7570 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7572 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7574 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7575 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7577 mono_debug_free_method_async_debug_info (asyncMethod);
7579 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7580 /* Methods without line number info like auto-generated property accessors */
7581 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7582 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7583 sym_seq_points = TRUE;
7588 * Methods without init_locals set could cause asserts in various passes
7589 * (#497220). To work around this, we emit dummy initialization opcodes
7590 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7591 * on some platforms.
7593 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7594 init_locals = header->init_locals;
7598 method_definition = method;
7599 while (method_definition->is_inflated) {
7600 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7601 method_definition = imethod->declaring;
7604 /* SkipVerification is not allowed if core-clr is enabled */
7605 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7607 dont_verify_stloc = TRUE;
7610 if (sig->is_inflated)
7611 generic_context = mono_method_get_context (method);
7612 else if (generic_container)
7613 generic_context = &generic_container->context;
7614 cfg->generic_context = generic_context;
7617 g_assert (!sig->has_type_parameters);
7619 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7620 g_assert (method->is_inflated);
7621 g_assert (mono_method_get_context (method)->method_inst);
7623 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7624 g_assert (sig->generic_param_count);
7626 if (cfg->method == method) {
7627 cfg->real_offset = 0;
7629 cfg->real_offset = inline_offset;
7632 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7633 cfg->cil_offset_to_bb_len = header->code_size;
7635 cfg->current_method = method;
7637 if (cfg->verbose_level > 2)
7638 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7640 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7642 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7643 for (n = 0; n < sig->param_count; ++n)
7644 param_types [n + sig->hasthis] = sig->params [n];
7645 cfg->arg_types = param_types;
7647 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7648 if (cfg->method == method) {
7650 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7651 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7654 NEW_BBLOCK (cfg, start_bblock);
7655 cfg->bb_entry = start_bblock;
7656 start_bblock->cil_code = NULL;
7657 start_bblock->cil_length = 0;
7660 NEW_BBLOCK (cfg, end_bblock);
7661 cfg->bb_exit = end_bblock;
7662 end_bblock->cil_code = NULL;
7663 end_bblock->cil_length = 0;
7664 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7665 g_assert (cfg->num_bblocks == 2);
7667 arg_array = cfg->args;
7669 if (header->num_clauses) {
7670 cfg->spvars = g_hash_table_new (NULL, NULL);
7671 cfg->exvars = g_hash_table_new (NULL, NULL);
7673 /* handle exception clauses */
7674 for (i = 0; i < header->num_clauses; ++i) {
7675 MonoBasicBlock *try_bb;
7676 MonoExceptionClause *clause = &header->clauses [i];
7677 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7679 try_bb->real_offset = clause->try_offset;
7680 try_bb->try_start = TRUE;
7681 try_bb->region = ((i + 1) << 8) | clause->flags;
7682 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7683 tblock->real_offset = clause->handler_offset;
7684 tblock->flags |= BB_EXCEPTION_HANDLER;
7687 * Linking the try block with the EH block hinders inlining as we won't be able to
7688 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7690 if (COMPILE_LLVM (cfg))
7691 link_bblock (cfg, try_bb, tblock);
7693 if (*(ip + clause->handler_offset) == CEE_POP)
7694 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7696 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7697 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7698 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7699 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7700 MONO_ADD_INS (tblock, ins);
7702 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7703 /* finally clauses already have a seq point */
7704 /* seq points for filter clauses are emitted below */
7705 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7706 MONO_ADD_INS (tblock, ins);
7709 /* todo: is a fault block unsafe to optimize? */
7710 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7711 tblock->flags |= BB_EXCEPTION_UNSAFE;
7714 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7716 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7718 /* catch and filter blocks get the exception object on the stack */
7719 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7720 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7722 /* mostly like handle_stack_args (), but just sets the input args */
7723 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7724 tblock->in_scount = 1;
7725 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7726 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7730 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7731 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7732 if (!cfg->compile_llvm) {
7733 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7734 ins->dreg = tblock->in_stack [0]->dreg;
7735 MONO_ADD_INS (tblock, ins);
7738 MonoInst *dummy_use;
7741 * Add a dummy use for the exvar so its liveness info will be
7744 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7747 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7748 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7749 MONO_ADD_INS (tblock, ins);
7752 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7753 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7754 tblock->flags |= BB_EXCEPTION_HANDLER;
7755 tblock->real_offset = clause->data.filter_offset;
7756 tblock->in_scount = 1;
7757 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7758 /* The filter block shares the exvar with the handler block */
7759 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7760 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7761 MONO_ADD_INS (tblock, ins);
7765 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7766 clause->data.catch_class &&
7768 mono_class_check_context_used (clause->data.catch_class)) {
7770 * In shared generic code with catch
7771 * clauses containing type variables
7772 * the exception handling code has to
7773 * be able to get to the rgctx.
7774 * Therefore we have to make sure that
7775 * the vtable/mrgctx argument (for
7776 * static or generic methods) or the
7777 * "this" argument (for non-static
7778 * methods) are live.
7780 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7781 mini_method_get_context (method)->method_inst ||
7782 method->klass->valuetype) {
7783 mono_get_vtable_var (cfg);
7785 MonoInst *dummy_use;
7787 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7792 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7793 cfg->cbb = start_bblock;
7794 cfg->args = arg_array;
7795 mono_save_args (cfg, sig, inline_args);
7798 /* FIRST CODE BLOCK */
7799 NEW_BBLOCK (cfg, tblock);
7800 tblock->cil_code = ip;
7804 ADD_BBLOCK (cfg, tblock);
7806 if (cfg->method == method) {
7807 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7808 if (breakpoint_id) {
7809 MONO_INST_NEW (cfg, ins, OP_BREAK);
7810 MONO_ADD_INS (cfg->cbb, ins);
7814 /* we use a separate basic block for the initialization code */
7815 NEW_BBLOCK (cfg, init_localsbb);
7816 if (cfg->method == method)
7817 cfg->bb_init = init_localsbb;
7818 init_localsbb->real_offset = cfg->real_offset;
7819 start_bblock->next_bb = init_localsbb;
7820 init_localsbb->next_bb = cfg->cbb;
7821 link_bblock (cfg, start_bblock, init_localsbb);
7822 link_bblock (cfg, init_localsbb, cfg->cbb);
7824 cfg->cbb = init_localsbb;
7826 if (cfg->gsharedvt && cfg->method == method) {
7827 MonoGSharedVtMethodInfo *info;
7828 MonoInst *var, *locals_var;
7831 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7832 info->method = cfg->method;
7833 info->count_entries = 16;
7834 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7835 cfg->gsharedvt_info = info;
7837 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7838 /* prevent it from being register allocated */
7839 //var->flags |= MONO_INST_VOLATILE;
7840 cfg->gsharedvt_info_var = var;
7842 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7843 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7845 /* Allocate locals */
7846 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7847 /* prevent it from being register allocated */
7848 //locals_var->flags |= MONO_INST_VOLATILE;
7849 cfg->gsharedvt_locals_var = locals_var;
7851 dreg = alloc_ireg (cfg);
7852 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7854 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7855 ins->dreg = locals_var->dreg;
7857 MONO_ADD_INS (cfg->cbb, ins);
7858 cfg->gsharedvt_locals_var_ins = ins;
7860 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7863 ins->flags |= MONO_INST_INIT;
7867 if (mono_security_core_clr_enabled ()) {
7868 /* check if this is native code, e.g. an icall or a p/invoke */
7869 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7870 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7872 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7873 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7875 /* if this ia a native call then it can only be JITted from platform code */
7876 if ((icall || pinvk) && method->klass && method->klass->image) {
7877 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7878 MonoException *ex = icall ? mono_get_exception_security () :
7879 mono_get_exception_method_access ();
7880 emit_throw_exception (cfg, ex);
7887 CHECK_CFG_EXCEPTION;
7889 if (header->code_size == 0)
7892 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7897 if (cfg->method == method)
7898 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7900 for (n = 0; n < header->num_locals; ++n) {
7901 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7906 /* We force the vtable variable here for all shared methods
7907 for the possibility that they might show up in a stack
7908 trace where their exact instantiation is needed. */
7909 if (cfg->gshared && method == cfg->method) {
7910 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7911 mini_method_get_context (method)->method_inst ||
7912 method->klass->valuetype) {
7913 mono_get_vtable_var (cfg);
7915 /* FIXME: Is there a better way to do this?
7916 We need the variable live for the duration
7917 of the whole method. */
7918 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7922 /* add a check for this != NULL to inlined methods */
7923 if (is_virtual_call) {
7926 NEW_ARGLOAD (cfg, arg_ins, 0);
7927 MONO_ADD_INS (cfg->cbb, arg_ins);
7928 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7931 skip_dead_blocks = !dont_verify;
7932 if (skip_dead_blocks) {
7933 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7938 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7939 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7942 start_new_bblock = 0;
7944 if (cfg->method == method)
7945 cfg->real_offset = ip - header->code;
7947 cfg->real_offset = inline_offset;
7952 if (start_new_bblock) {
7953 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7954 if (start_new_bblock == 2) {
7955 g_assert (ip == tblock->cil_code);
7957 GET_BBLOCK (cfg, tblock, ip);
7959 cfg->cbb->next_bb = tblock;
7961 start_new_bblock = 0;
7962 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7963 if (cfg->verbose_level > 3)
7964 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7965 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7969 g_slist_free (class_inits);
7972 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7973 link_bblock (cfg, cfg->cbb, tblock);
7974 if (sp != stack_start) {
7975 handle_stack_args (cfg, stack_start, sp - stack_start);
7977 CHECK_UNVERIFIABLE (cfg);
7979 cfg->cbb->next_bb = tblock;
7981 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7982 if (cfg->verbose_level > 3)
7983 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7984 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7987 g_slist_free (class_inits);
7992 if (skip_dead_blocks) {
7993 int ip_offset = ip - header->code;
7995 if (ip_offset == bb->end)
7999 int op_size = mono_opcode_size (ip, end);
8000 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8002 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8004 if (ip_offset + op_size == bb->end) {
8005 MONO_INST_NEW (cfg, ins, OP_NOP);
8006 MONO_ADD_INS (cfg->cbb, ins);
8007 start_new_bblock = 1;
8015 * Sequence points are points where the debugger can place a breakpoint.
8016 * Currently, we generate these automatically at points where the IL
8019 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8021 * Make methods interruptable at the beginning, and at the targets of
8022 * backward branches.
8023 * Also, do this at the start of every bblock in methods with clauses too,
8024 * to be able to handle instructions with inprecise control flow like
8026 * Backward branches are handled at the end of method-to-ir ().
8028 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8029 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8031 /* Avoid sequence points on empty IL like .volatile */
8032 // FIXME: Enable this
8033 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8034 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8035 if ((sp != stack_start) && !sym_seq_point)
8036 ins->flags |= MONO_INST_NONEMPTY_STACK;
8037 MONO_ADD_INS (cfg->cbb, ins);
8040 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8043 cfg->cbb->real_offset = cfg->real_offset;
8045 if ((cfg->method == method) && cfg->coverage_info) {
8046 guint32 cil_offset = ip - header->code;
8047 cfg->coverage_info->data [cil_offset].cil_code = ip;
8049 /* TODO: Use an increment here */
8050 #if defined(TARGET_X86)
8051 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8052 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8054 MONO_ADD_INS (cfg->cbb, ins);
8056 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8057 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8061 if (cfg->verbose_level > 3)
8062 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8066 if (seq_points && !sym_seq_points && sp != stack_start) {
8068 * The C# compiler uses these nops to notify the JIT that it should
8069 * insert seq points.
8071 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8072 MONO_ADD_INS (cfg->cbb, ins);
8074 if (cfg->keep_cil_nops)
8075 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8077 MONO_INST_NEW (cfg, ins, OP_NOP);
8079 MONO_ADD_INS (cfg->cbb, ins);
8082 if (should_insert_brekpoint (cfg->method)) {
8083 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8085 MONO_INST_NEW (cfg, ins, OP_NOP);
8088 MONO_ADD_INS (cfg->cbb, ins);
8094 CHECK_STACK_OVF (1);
8095 n = (*ip)-CEE_LDARG_0;
8097 EMIT_NEW_ARGLOAD (cfg, ins, n);
8105 CHECK_STACK_OVF (1);
8106 n = (*ip)-CEE_LDLOC_0;
8108 EMIT_NEW_LOCLOAD (cfg, ins, n);
8117 n = (*ip)-CEE_STLOC_0;
8120 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8122 emit_stloc_ir (cfg, sp, header, n);
8129 CHECK_STACK_OVF (1);
8132 EMIT_NEW_ARGLOAD (cfg, ins, n);
8138 CHECK_STACK_OVF (1);
8141 NEW_ARGLOADA (cfg, ins, n);
8142 MONO_ADD_INS (cfg->cbb, ins);
8152 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8154 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8159 CHECK_STACK_OVF (1);
8162 EMIT_NEW_LOCLOAD (cfg, ins, n);
8166 case CEE_LDLOCA_S: {
8167 unsigned char *tmp_ip;
8169 CHECK_STACK_OVF (1);
8170 CHECK_LOCAL (ip [1]);
8172 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8178 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8187 CHECK_LOCAL (ip [1]);
8188 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8190 emit_stloc_ir (cfg, sp, header, ip [1]);
8195 CHECK_STACK_OVF (1);
8196 EMIT_NEW_PCONST (cfg, ins, NULL);
8197 ins->type = STACK_OBJ;
8202 CHECK_STACK_OVF (1);
8203 EMIT_NEW_ICONST (cfg, ins, -1);
8216 CHECK_STACK_OVF (1);
8217 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8223 CHECK_STACK_OVF (1);
8225 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8231 CHECK_STACK_OVF (1);
8232 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8238 CHECK_STACK_OVF (1);
8239 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8240 ins->type = STACK_I8;
8241 ins->dreg = alloc_dreg (cfg, STACK_I8);
8243 ins->inst_l = (gint64)read64 (ip);
8244 MONO_ADD_INS (cfg->cbb, ins);
8250 gboolean use_aotconst = FALSE;
8252 #ifdef TARGET_POWERPC
8253 /* FIXME: Clean this up */
8254 if (cfg->compile_aot)
8255 use_aotconst = TRUE;
8258 /* FIXME: we should really allocate this only late in the compilation process */
8259 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8261 CHECK_STACK_OVF (1);
8267 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8269 dreg = alloc_freg (cfg);
8270 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8271 ins->type = cfg->r4_stack_type;
8273 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8274 ins->type = cfg->r4_stack_type;
8275 ins->dreg = alloc_dreg (cfg, STACK_R8);
8277 MONO_ADD_INS (cfg->cbb, ins);
8287 gboolean use_aotconst = FALSE;
8289 #ifdef TARGET_POWERPC
8290 /* FIXME: Clean this up */
8291 if (cfg->compile_aot)
8292 use_aotconst = TRUE;
8295 /* FIXME: we should really allocate this only late in the compilation process */
8296 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8298 CHECK_STACK_OVF (1);
8304 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8306 dreg = alloc_freg (cfg);
8307 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8308 ins->type = STACK_R8;
8310 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8311 ins->type = STACK_R8;
8312 ins->dreg = alloc_dreg (cfg, STACK_R8);
8314 MONO_ADD_INS (cfg->cbb, ins);
8323 MonoInst *temp, *store;
8325 CHECK_STACK_OVF (1);
8329 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8330 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8332 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8335 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8348 if (sp [0]->type == STACK_R8)
8349 /* we need to pop the value from the x86 FP stack */
8350 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8355 MonoMethodSignature *fsig;
8358 INLINE_FAILURE ("jmp");
8359 GSHAREDVT_FAILURE (*ip);
8362 if (stack_start != sp)
8364 token = read32 (ip + 1);
8365 /* FIXME: check the signature matches */
8366 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8369 if (cfg->gshared && mono_method_check_context_used (cmethod))
8370 GENERIC_SHARING_FAILURE (CEE_JMP);
8372 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8374 fsig = mono_method_signature (cmethod);
8375 n = fsig->param_count + fsig->hasthis;
8376 if (cfg->llvm_only) {
8379 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8380 for (i = 0; i < n; ++i)
8381 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8382 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8384 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8385 * have to emit a normal return since llvm expects it.
8388 emit_setret (cfg, ins);
8389 MONO_INST_NEW (cfg, ins, OP_BR);
8390 ins->inst_target_bb = end_bblock;
8391 MONO_ADD_INS (cfg->cbb, ins);
8392 link_bblock (cfg, cfg->cbb, end_bblock);
8395 } else if (cfg->backend->have_op_tail_call) {
8396 /* Handle tail calls similarly to calls */
8399 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8400 call->method = cmethod;
8401 call->tail_call = TRUE;
8402 call->signature = mono_method_signature (cmethod);
8403 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8404 call->inst.inst_p0 = cmethod;
8405 for (i = 0; i < n; ++i)
8406 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8408 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8409 call->vret_var = cfg->vret_addr;
8411 mono_arch_emit_call (cfg, call);
8412 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8413 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8415 for (i = 0; i < num_args; ++i)
8416 /* Prevent arguments from being optimized away */
8417 arg_array [i]->flags |= MONO_INST_VOLATILE;
8419 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8420 ins = (MonoInst*)call;
8421 ins->inst_p0 = cmethod;
8422 MONO_ADD_INS (cfg->cbb, ins);
8426 start_new_bblock = 1;
8431 MonoMethodSignature *fsig;
8434 token = read32 (ip + 1);
8438 //GSHAREDVT_FAILURE (*ip);
8443 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8446 if (method->dynamic && fsig->pinvoke) {
8450 * This is a call through a function pointer using a pinvoke
8451 * signature. Have to create a wrapper and call that instead.
8452 * FIXME: This is very slow, need to create a wrapper at JIT time
8453 * instead based on the signature.
8455 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8456 EMIT_NEW_PCONST (cfg, args [1], fsig);
8458 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8461 n = fsig->param_count + fsig->hasthis;
8465 //g_assert (!virtual_ || fsig->hasthis);
8469 inline_costs += 10 * num_calls++;
8472 * Making generic calls out of gsharedvt methods.
8473 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8474 * patching gshared method addresses into a gsharedvt method.
8476 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8478 * We pass the address to the gsharedvt trampoline in the rgctx reg
8480 MonoInst *callee = addr;
8482 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8484 GSHAREDVT_FAILURE (*ip);
8488 GSHAREDVT_FAILURE (*ip);
8490 addr = emit_get_rgctx_sig (cfg, context_used,
8491 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8492 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8496 /* Prevent inlining of methods with indirect calls */
8497 INLINE_FAILURE ("indirect call");
8499 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8500 MonoJumpInfoType info_type;
8504 * Instead of emitting an indirect call, emit a direct call
8505 * with the contents of the aotconst as the patch info.
8507 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8508 info_type = (MonoJumpInfoType)addr->inst_c1;
8509 info_data = addr->inst_p0;
8511 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8512 info_data = addr->inst_right->inst_left;
8515 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8516 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8519 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8520 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8525 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8529 /* End of call, INS should contain the result of the call, if any */
8531 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8533 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8536 CHECK_CFG_EXCEPTION;
8540 constrained_class = NULL;
8544 case CEE_CALLVIRT: {
8545 MonoInst *addr = NULL;
8546 MonoMethodSignature *fsig = NULL;
8548 int virtual_ = *ip == CEE_CALLVIRT;
8549 gboolean pass_imt_from_rgctx = FALSE;
8550 MonoInst *imt_arg = NULL;
8551 MonoInst *keep_this_alive = NULL;
8552 gboolean pass_vtable = FALSE;
8553 gboolean pass_mrgctx = FALSE;
8554 MonoInst *vtable_arg = NULL;
8555 gboolean check_this = FALSE;
8556 gboolean supported_tail_call = FALSE;
8557 gboolean tail_call = FALSE;
8558 gboolean need_seq_point = FALSE;
8559 guint32 call_opcode = *ip;
8560 gboolean emit_widen = TRUE;
8561 gboolean push_res = TRUE;
8562 gboolean skip_ret = FALSE;
8563 gboolean delegate_invoke = FALSE;
8564 gboolean direct_icall = FALSE;
8565 gboolean constrained_partial_call = FALSE;
8566 MonoMethod *cil_method;
8569 token = read32 (ip + 1);
8573 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8576 cil_method = cmethod;
8578 if (constrained_class) {
8579 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8580 if (!mini_is_gsharedvt_klass (constrained_class)) {
8581 g_assert (!cmethod->klass->valuetype);
8582 if (!mini_type_is_reference (&constrained_class->byval_arg))
8583 constrained_partial_call = TRUE;
8587 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8588 if (cfg->verbose_level > 2)
8589 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8590 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8591 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8593 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8597 if (cfg->verbose_level > 2)
8598 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8600 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8602 * This is needed since get_method_constrained can't find
8603 * the method in klass representing a type var.
8604 * The type var is guaranteed to be a reference type in this
8607 if (!mini_is_gsharedvt_klass (constrained_class))
8608 g_assert (!cmethod->klass->valuetype);
8610 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8615 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8616 /* Use the corresponding method from the base type to avoid boxing */
8617 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8618 g_assert (base_type);
8619 constrained_class = mono_class_from_mono_type (base_type);
8620 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8625 if (!dont_verify && !cfg->skip_visibility) {
8626 MonoMethod *target_method = cil_method;
8627 if (method->is_inflated) {
8628 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8631 if (!mono_method_can_access_method (method_definition, target_method) &&
8632 !mono_method_can_access_method (method, cil_method))
8633 emit_method_access_failure (cfg, method, cil_method);
8636 if (mono_security_core_clr_enabled ())
8637 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8639 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8640 /* MS.NET seems to silently convert this to a callvirt */
8645 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8646 * converts to a callvirt.
8648 * tests/bug-515884.il is an example of this behavior
8650 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8651 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8652 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8656 if (!cmethod->klass->inited)
8657 if (!mono_class_init (cmethod->klass))
8658 TYPE_LOAD_ERROR (cmethod->klass);
8660 fsig = mono_method_signature (cmethod);
8663 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8664 mini_class_is_system_array (cmethod->klass)) {
8665 array_rank = cmethod->klass->rank;
8666 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8667 direct_icall = TRUE;
8668 } else if (fsig->pinvoke) {
8669 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8670 fsig = mono_method_signature (wrapper);
8671 } else if (constrained_class) {
8673 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8677 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8678 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8680 /* See code below */
8681 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8682 MonoBasicBlock *tbb;
8684 GET_BBLOCK (cfg, tbb, ip + 5);
8685 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8687 * We want to extend the try block to cover the call, but we can't do it if the
8688 * call is made directly since its followed by an exception check.
8690 direct_icall = FALSE;
8694 mono_save_token_info (cfg, image, token, cil_method);
8696 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8697 need_seq_point = TRUE;
8699 /* Don't support calls made using type arguments for now */
8701 if (cfg->gsharedvt) {
8702 if (mini_is_gsharedvt_signature (fsig))
8703 GSHAREDVT_FAILURE (*ip);
8707 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8708 g_assert_not_reached ();
8710 n = fsig->param_count + fsig->hasthis;
8712 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8716 g_assert (!mono_method_check_context_used (cmethod));
8720 //g_assert (!virtual_ || fsig->hasthis);
8725 * We have the `constrained.' prefix opcode.
8727 if (constrained_class) {
8728 if (mini_is_gsharedvt_klass (constrained_class)) {
8729 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8730 /* The 'Own method' case below */
8731 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8732 /* 'The type parameter is instantiated as a reference type' case below. */
8734 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8735 CHECK_CFG_EXCEPTION;
8741 if (constrained_partial_call) {
8742 gboolean need_box = TRUE;
8745 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8746 * called method is not known at compile time either. The called method could end up being
8747 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8748 * to box the receiver.
8749 * A simple solution would be to box always and make a normal virtual call, but that would
8750 * be bad performance wise.
8752 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8754 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8759 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8760 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8761 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8762 ins->klass = constrained_class;
8763 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8764 CHECK_CFG_EXCEPTION;
8765 } else if (need_box) {
8767 MonoBasicBlock *is_ref_bb, *end_bb;
8768 MonoInst *nonbox_call;
8771 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8773 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8774 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8776 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8778 NEW_BBLOCK (cfg, is_ref_bb);
8779 NEW_BBLOCK (cfg, end_bb);
8781 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8783 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8786 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8791 MONO_START_BB (cfg, is_ref_bb);
8792 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8793 ins->klass = constrained_class;
8794 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8795 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8799 MONO_START_BB (cfg, end_bb);
8802 nonbox_call->dreg = ins->dreg;
8805 g_assert (mono_class_is_interface (cmethod->klass));
8806 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8807 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8810 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8812 * The type parameter is instantiated as a valuetype,
8813 * but that type doesn't override the method we're
8814 * calling, so we need to box `this'.
8816 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8817 ins->klass = constrained_class;
8818 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8819 CHECK_CFG_EXCEPTION;
8820 } else if (!constrained_class->valuetype) {
8821 int dreg = alloc_ireg_ref (cfg);
8824 * The type parameter is instantiated as a reference
8825 * type. We have a managed pointer on the stack, so
8826 * we need to dereference it here.
8828 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8829 ins->type = STACK_OBJ;
8832 if (cmethod->klass->valuetype) {
8835 /* Interface method */
8838 mono_class_setup_vtable (constrained_class);
8839 CHECK_TYPELOAD (constrained_class);
8840 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8842 TYPE_LOAD_ERROR (constrained_class);
8843 slot = mono_method_get_vtable_slot (cmethod);
8845 TYPE_LOAD_ERROR (cmethod->klass);
8846 cmethod = constrained_class->vtable [ioffset + slot];
8848 if (cmethod->klass == mono_defaults.enum_class) {
8849 /* Enum implements some interfaces, so treat this as the first case */
8850 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8851 ins->klass = constrained_class;
8852 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8853 CHECK_CFG_EXCEPTION;
8858 constrained_class = NULL;
8861 if (check_call_signature (cfg, fsig, sp))
8864 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8865 delegate_invoke = TRUE;
8867 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8868 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8869 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8877 * If the callee is a shared method, then its static cctor
8878 * might not get called after the call was patched.
8880 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8881 emit_class_init (cfg, cmethod->klass);
8882 CHECK_TYPELOAD (cmethod->klass);
8885 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8888 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8890 context_used = mini_method_check_context_used (cfg, cmethod);
8892 if (context_used && mono_class_is_interface (cmethod->klass)) {
8893 /* Generic method interface
8894 calls are resolved via a
8895 helper function and don't
8897 if (!cmethod_context || !cmethod_context->method_inst)
8898 pass_imt_from_rgctx = TRUE;
8902 * If a shared method calls another
8903 * shared method then the caller must
8904 * have a generic sharing context
8905 * because the magic trampoline
8906 * requires it. FIXME: We shouldn't
8907 * have to force the vtable/mrgctx
8908 * variable here. Instead there
8909 * should be a flag in the cfg to
8910 * request a generic sharing context.
8913 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8914 mono_get_vtable_var (cfg);
8919 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8921 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8923 CHECK_TYPELOAD (cmethod->klass);
8924 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8929 g_assert (!vtable_arg);
8931 if (!cfg->compile_aot) {
8933 * emit_get_rgctx_method () calls mono_class_vtable () so check
8934 * for type load errors before.
8936 mono_class_setup_vtable (cmethod->klass);
8937 CHECK_TYPELOAD (cmethod->klass);
8940 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8942 /* !marshalbyref is needed to properly handle generic methods + remoting */
8943 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8944 MONO_METHOD_IS_FINAL (cmethod)) &&
8945 !mono_class_is_marshalbyref (cmethod->klass)) {
8952 if (pass_imt_from_rgctx) {
8953 g_assert (!pass_vtable);
8955 imt_arg = emit_get_rgctx_method (cfg, context_used,
8956 cmethod, MONO_RGCTX_INFO_METHOD);
8960 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8962 /* Calling virtual generic methods */
8963 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8964 !(MONO_METHOD_IS_FINAL (cmethod) &&
8965 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8966 fsig->generic_param_count &&
8967 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8969 MonoInst *this_temp, *this_arg_temp, *store;
8970 MonoInst *iargs [4];
8972 g_assert (fsig->is_inflated);
8974 /* Prevent inlining of methods that contain indirect calls */
8975 INLINE_FAILURE ("virtual generic call");
8977 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8978 GSHAREDVT_FAILURE (*ip);
8980 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8981 g_assert (!imt_arg);
8983 g_assert (cmethod->is_inflated);
8984 imt_arg = emit_get_rgctx_method (cfg, context_used,
8985 cmethod, MONO_RGCTX_INFO_METHOD);
8986 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8988 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8989 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8990 MONO_ADD_INS (cfg->cbb, store);
8992 /* FIXME: This should be a managed pointer */
8993 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8995 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8996 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8997 cmethod, MONO_RGCTX_INFO_METHOD);
8998 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8999 addr = mono_emit_jit_icall (cfg,
9000 mono_helper_compile_generic_method, iargs);
9002 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9004 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9011 * Implement a workaround for the inherent races involved in locking:
9017 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9018 * try block, the Exit () won't be executed, see:
9019 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9020 * To work around this, we extend such try blocks to include the last x bytes
9021 * of the Monitor.Enter () call.
9023 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9024 MonoBasicBlock *tbb;
9026 GET_BBLOCK (cfg, tbb, ip + 5);
9028 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9029 * from Monitor.Enter like ArgumentNullException.
9031 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9032 /* Mark this bblock as needing to be extended */
9033 tbb->extend_try_block = TRUE;
9037 /* Conversion to a JIT intrinsic */
9038 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9039 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9040 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9048 if ((cfg->opt & MONO_OPT_INLINE) &&
9049 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9050 mono_method_check_inlining (cfg, cmethod)) {
9052 gboolean always = FALSE;
9054 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9055 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9056 /* Prevent inlining of methods that call wrappers */
9057 INLINE_FAILURE ("wrapper call");
9058 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9062 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9064 cfg->real_offset += 5;
9066 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9067 /* *sp is already set by inline_method */
9072 inline_costs += costs;
9078 /* Tail recursion elimination */
9079 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9080 gboolean has_vtargs = FALSE;
9083 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9084 INLINE_FAILURE ("tail call");
9086 /* keep it simple */
9087 for (i = fsig->param_count - 1; i >= 0; i--) {
9088 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9093 if (need_seq_point) {
9094 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9095 need_seq_point = FALSE;
9097 for (i = 0; i < n; ++i)
9098 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9099 MONO_INST_NEW (cfg, ins, OP_BR);
9100 MONO_ADD_INS (cfg->cbb, ins);
9101 tblock = start_bblock->out_bb [0];
9102 link_bblock (cfg, cfg->cbb, tblock);
9103 ins->inst_target_bb = tblock;
9104 start_new_bblock = 1;
9106 /* skip the CEE_RET, too */
9107 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9114 inline_costs += 10 * num_calls++;
9117 * Synchronized wrappers.
9118 * Its hard to determine where to replace a method with its synchronized
9119 * wrapper without causing an infinite recursion. The current solution is
9120 * to add the synchronized wrapper in the trampolines, and to
9121 * change the called method to a dummy wrapper, and resolve that wrapper
9122 * to the real method in mono_jit_compile_method ().
9124 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9125 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9126 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9127 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9131 * Making generic calls out of gsharedvt methods.
9132 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9133 * patching gshared method addresses into a gsharedvt method.
9135 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9136 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9137 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9138 MonoRgctxInfoType info_type;
9141 //if (mono_class_is_interface (cmethod->klass))
9142 //GSHAREDVT_FAILURE (*ip);
9143 // disable for possible remoting calls
9144 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9145 GSHAREDVT_FAILURE (*ip);
9146 if (fsig->generic_param_count) {
9147 /* virtual generic call */
9148 g_assert (!imt_arg);
9149 /* Same as the virtual generic case above */
9150 imt_arg = emit_get_rgctx_method (cfg, context_used,
9151 cmethod, MONO_RGCTX_INFO_METHOD);
9152 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9154 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9155 /* This can happen when we call a fully instantiated iface method */
9156 imt_arg = emit_get_rgctx_method (cfg, context_used,
9157 cmethod, MONO_RGCTX_INFO_METHOD);
9162 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9163 keep_this_alive = sp [0];
9165 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9166 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9168 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9169 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9171 if (cfg->llvm_only) {
9172 // FIXME: Avoid initializing vtable_arg
9173 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9175 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9180 /* Generic sharing */
9183 * Use this if the callee is gsharedvt sharable too, since
9184 * at runtime we might find an instantiation so the call cannot
9185 * be patched (the 'no_patch' code path in mini-trampolines.c).
9187 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9188 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9189 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9190 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9191 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9192 INLINE_FAILURE ("gshared");
9194 g_assert (cfg->gshared && cmethod);
9198 * We are compiling a call to a
9199 * generic method from shared code,
9200 * which means that we have to look up
9201 * the method in the rgctx and do an
9205 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9207 if (cfg->llvm_only) {
9208 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9209 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9211 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9212 // FIXME: Avoid initializing imt_arg/vtable_arg
9213 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9215 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9216 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9221 /* Direct calls to icalls */
9223 MonoMethod *wrapper;
9226 /* Inline the wrapper */
9227 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9229 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9230 g_assert (costs > 0);
9231 cfg->real_offset += 5;
9233 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9234 /* *sp is already set by inline_method */
9239 inline_costs += costs;
9248 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9249 MonoInst *val = sp [fsig->param_count];
9251 if (val->type == STACK_OBJ) {
9252 MonoInst *iargs [2];
9257 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9260 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9261 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9262 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9263 emit_write_barrier (cfg, addr, val);
9264 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9265 GSHAREDVT_FAILURE (*ip);
9266 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9267 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9269 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9270 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9271 if (!cmethod->klass->element_class->valuetype && !readonly)
9272 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9273 CHECK_TYPELOAD (cmethod->klass);
9276 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9279 g_assert_not_reached ();
9286 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9290 /* Tail prefix / tail call optimization */
9292 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9293 /* FIXME: runtime generic context pointer for jumps? */
9294 /* FIXME: handle this for generic sharing eventually */
9295 if ((ins_flag & MONO_INST_TAILCALL) &&
9296 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9297 supported_tail_call = TRUE;
9299 if (supported_tail_call) {
9302 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9303 INLINE_FAILURE ("tail call");
9305 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9307 if (cfg->backend->have_op_tail_call) {
9308 /* Handle tail calls similarly to normal calls */
9311 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9313 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9314 call->tail_call = TRUE;
9315 call->method = cmethod;
9316 call->signature = mono_method_signature (cmethod);
9319 * We implement tail calls by storing the actual arguments into the
9320 * argument variables, then emitting a CEE_JMP.
9322 for (i = 0; i < n; ++i) {
9323 /* Prevent argument from being register allocated */
9324 arg_array [i]->flags |= MONO_INST_VOLATILE;
9325 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9327 ins = (MonoInst*)call;
9328 ins->inst_p0 = cmethod;
9329 ins->inst_p1 = arg_array [0];
9330 MONO_ADD_INS (cfg->cbb, ins);
9331 link_bblock (cfg, cfg->cbb, end_bblock);
9332 start_new_bblock = 1;
9334 // FIXME: Eliminate unreachable epilogs
9337 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9338 * only reachable from this call.
9340 GET_BBLOCK (cfg, tblock, ip + 5);
9341 if (tblock == cfg->cbb || tblock->in_count == 0)
9350 * Virtual calls in llvm-only mode.
9352 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9353 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9358 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9359 INLINE_FAILURE ("call");
9360 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9361 imt_arg, vtable_arg);
9363 if (tail_call && !cfg->llvm_only) {
9364 link_bblock (cfg, cfg->cbb, end_bblock);
9365 start_new_bblock = 1;
9367 // FIXME: Eliminate unreachable epilogs
9370 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9371 * only reachable from this call.
9373 GET_BBLOCK (cfg, tblock, ip + 5);
9374 if (tblock == cfg->cbb || tblock->in_count == 0)
9381 /* End of call, INS should contain the result of the call, if any */
9383 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9386 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9391 if (keep_this_alive) {
9392 MonoInst *dummy_use;
9394 /* See mono_emit_method_call_full () */
9395 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9398 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9400 * Clang can convert these calls to tail calls which screw up the stack
9401 * walk. This happens even when the -fno-optimize-sibling-calls
9402 * option is passed to clang.
9403 * Work around this by emitting a dummy call.
9405 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9408 CHECK_CFG_EXCEPTION;
9412 g_assert (*ip == CEE_RET);
9416 constrained_class = NULL;
9418 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9422 if (cfg->method != method) {
9423 /* return from inlined method */
9425 * If in_count == 0, that means the ret is unreachable due to
9426 * being preceeded by a throw. In that case, inline_method () will
9427 * handle setting the return value
9428 * (test case: test_0_inline_throw ()).
9430 if (return_var && cfg->cbb->in_count) {
9431 MonoType *ret_type = mono_method_signature (method)->ret;
9437 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9440 //g_assert (returnvar != -1);
9441 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9442 cfg->ret_var_set = TRUE;
9445 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9447 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9451 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9453 if (seq_points && !sym_seq_points) {
9455 * Place a seq point here too even through the IL stack is not
9456 * empty, so a step over on
9459 * will work correctly.
9461 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9462 MONO_ADD_INS (cfg->cbb, ins);
9465 g_assert (!return_var);
9469 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9472 emit_setret (cfg, *sp);
9475 if (sp != stack_start)
9477 MONO_INST_NEW (cfg, ins, OP_BR);
9479 ins->inst_target_bb = end_bblock;
9480 MONO_ADD_INS (cfg->cbb, ins);
9481 link_bblock (cfg, cfg->cbb, end_bblock);
9482 start_new_bblock = 1;
9486 MONO_INST_NEW (cfg, ins, OP_BR);
9488 target = ip + 1 + (signed char)(*ip);
9490 GET_BBLOCK (cfg, tblock, target);
9491 link_bblock (cfg, cfg->cbb, tblock);
9492 ins->inst_target_bb = tblock;
9493 if (sp != stack_start) {
9494 handle_stack_args (cfg, stack_start, sp - stack_start);
9496 CHECK_UNVERIFIABLE (cfg);
9498 MONO_ADD_INS (cfg->cbb, ins);
9499 start_new_bblock = 1;
9500 inline_costs += BRANCH_COST;
9514 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9516 target = ip + 1 + *(signed char*)ip;
9522 inline_costs += BRANCH_COST;
9526 MONO_INST_NEW (cfg, ins, OP_BR);
9529 target = ip + 4 + (gint32)read32(ip);
9531 GET_BBLOCK (cfg, tblock, target);
9532 link_bblock (cfg, cfg->cbb, tblock);
9533 ins->inst_target_bb = tblock;
9534 if (sp != stack_start) {
9535 handle_stack_args (cfg, stack_start, sp - stack_start);
9537 CHECK_UNVERIFIABLE (cfg);
9540 MONO_ADD_INS (cfg->cbb, ins);
9542 start_new_bblock = 1;
9543 inline_costs += BRANCH_COST;
9550 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9551 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9552 guint32 opsize = is_short ? 1 : 4;
9554 CHECK_OPSIZE (opsize);
9556 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9559 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9564 GET_BBLOCK (cfg, tblock, target);
9565 link_bblock (cfg, cfg->cbb, tblock);
9566 GET_BBLOCK (cfg, tblock, ip);
9567 link_bblock (cfg, cfg->cbb, tblock);
9569 if (sp != stack_start) {
9570 handle_stack_args (cfg, stack_start, sp - stack_start);
9571 CHECK_UNVERIFIABLE (cfg);
9574 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9575 cmp->sreg1 = sp [0]->dreg;
9576 type_from_op (cfg, cmp, sp [0], NULL);
9579 #if SIZEOF_REGISTER == 4
9580 if (cmp->opcode == OP_LCOMPARE_IMM) {
9581 /* Convert it to OP_LCOMPARE */
9582 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9583 ins->type = STACK_I8;
9584 ins->dreg = alloc_dreg (cfg, STACK_I8);
9586 MONO_ADD_INS (cfg->cbb, ins);
9587 cmp->opcode = OP_LCOMPARE;
9588 cmp->sreg2 = ins->dreg;
9591 MONO_ADD_INS (cfg->cbb, cmp);
9593 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9594 type_from_op (cfg, ins, sp [0], NULL);
9595 MONO_ADD_INS (cfg->cbb, ins);
9596 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9597 GET_BBLOCK (cfg, tblock, target);
9598 ins->inst_true_bb = tblock;
9599 GET_BBLOCK (cfg, tblock, ip);
9600 ins->inst_false_bb = tblock;
9601 start_new_bblock = 2;
9604 inline_costs += BRANCH_COST;
9619 MONO_INST_NEW (cfg, ins, *ip);
9621 target = ip + 4 + (gint32)read32(ip);
9627 inline_costs += BRANCH_COST;
9631 MonoBasicBlock **targets;
9632 MonoBasicBlock *default_bblock;
9633 MonoJumpInfoBBTable *table;
9634 int offset_reg = alloc_preg (cfg);
9635 int target_reg = alloc_preg (cfg);
9636 int table_reg = alloc_preg (cfg);
9637 int sum_reg = alloc_preg (cfg);
9638 gboolean use_op_switch;
9642 n = read32 (ip + 1);
9645 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9649 CHECK_OPSIZE (n * sizeof (guint32));
9650 target = ip + n * sizeof (guint32);
9652 GET_BBLOCK (cfg, default_bblock, target);
9653 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9655 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9656 for (i = 0; i < n; ++i) {
9657 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9658 targets [i] = tblock;
9659 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9663 if (sp != stack_start) {
9665 * Link the current bb with the targets as well, so handle_stack_args
9666 * will set their in_stack correctly.
9668 link_bblock (cfg, cfg->cbb, default_bblock);
9669 for (i = 0; i < n; ++i)
9670 link_bblock (cfg, cfg->cbb, targets [i]);
9672 handle_stack_args (cfg, stack_start, sp - stack_start);
9674 CHECK_UNVERIFIABLE (cfg);
9676 /* Undo the links */
9677 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9678 for (i = 0; i < n; ++i)
9679 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9685 for (i = 0; i < n; ++i)
9686 link_bblock (cfg, cfg->cbb, targets [i]);
9688 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9689 table->table = targets;
9690 table->table_size = n;
9692 use_op_switch = FALSE;
9694 /* ARM implements SWITCH statements differently */
9695 /* FIXME: Make it use the generic implementation */
9696 if (!cfg->compile_aot)
9697 use_op_switch = TRUE;
9700 if (COMPILE_LLVM (cfg))
9701 use_op_switch = TRUE;
9703 cfg->cbb->has_jump_table = 1;
9705 if (use_op_switch) {
9706 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9707 ins->sreg1 = src1->dreg;
9708 ins->inst_p0 = table;
9709 ins->inst_many_bb = targets;
9710 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9711 MONO_ADD_INS (cfg->cbb, ins);
9713 if (sizeof (gpointer) == 8)
9714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9718 #if SIZEOF_REGISTER == 8
9719 /* The upper word might not be zero, and we add it to a 64 bit address later */
9720 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9723 if (cfg->compile_aot) {
9724 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9726 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9727 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9728 ins->inst_p0 = table;
9729 ins->dreg = table_reg;
9730 MONO_ADD_INS (cfg->cbb, ins);
9733 /* FIXME: Use load_memindex */
9734 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9736 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9738 start_new_bblock = 1;
9739 inline_costs += (BRANCH_COST * 2);
9759 dreg = alloc_freg (cfg);
9762 dreg = alloc_lreg (cfg);
9765 dreg = alloc_ireg_ref (cfg);
9768 dreg = alloc_preg (cfg);
9771 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9772 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9773 if (*ip == CEE_LDIND_R4)
9774 ins->type = cfg->r4_stack_type;
9775 ins->flags |= ins_flag;
9776 MONO_ADD_INS (cfg->cbb, ins);
9778 if (ins_flag & MONO_INST_VOLATILE) {
9779 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9780 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9796 if (ins_flag & MONO_INST_VOLATILE) {
9797 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9798 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9801 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9802 ins->flags |= ins_flag;
9805 MONO_ADD_INS (cfg->cbb, ins);
9807 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9808 emit_write_barrier (cfg, sp [0], sp [1]);
9817 MONO_INST_NEW (cfg, ins, (*ip));
9819 ins->sreg1 = sp [0]->dreg;
9820 ins->sreg2 = sp [1]->dreg;
9821 type_from_op (cfg, ins, sp [0], sp [1]);
9823 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9825 /* Use the immediate opcodes if possible */
9826 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9827 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9828 if (imm_opcode != -1) {
9829 ins->opcode = imm_opcode;
9830 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9833 NULLIFY_INS (sp [1]);
9837 MONO_ADD_INS ((cfg)->cbb, (ins));
9839 *sp++ = mono_decompose_opcode (cfg, ins);
9856 MONO_INST_NEW (cfg, ins, (*ip));
9858 ins->sreg1 = sp [0]->dreg;
9859 ins->sreg2 = sp [1]->dreg;
9860 type_from_op (cfg, ins, sp [0], sp [1]);
9862 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9863 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9865 /* FIXME: Pass opcode to is_inst_imm */
9867 /* Use the immediate opcodes if possible */
9868 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9869 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9870 if (imm_opcode != -1) {
9871 ins->opcode = imm_opcode;
9872 if (sp [1]->opcode == OP_I8CONST) {
9873 #if SIZEOF_REGISTER == 8
9874 ins->inst_imm = sp [1]->inst_l;
9876 ins->inst_ls_word = sp [1]->inst_ls_word;
9877 ins->inst_ms_word = sp [1]->inst_ms_word;
9881 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9884 /* Might be followed by an instruction added by add_widen_op */
9885 if (sp [1]->next == NULL)
9886 NULLIFY_INS (sp [1]);
9889 MONO_ADD_INS ((cfg)->cbb, (ins));
9891 *sp++ = mono_decompose_opcode (cfg, ins);
9904 case CEE_CONV_OVF_I8:
9905 case CEE_CONV_OVF_U8:
9909 /* Special case this earlier so we have long constants in the IR */
9910 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9911 int data = sp [-1]->inst_c0;
9912 sp [-1]->opcode = OP_I8CONST;
9913 sp [-1]->type = STACK_I8;
9914 #if SIZEOF_REGISTER == 8
9915 if ((*ip) == CEE_CONV_U8)
9916 sp [-1]->inst_c0 = (guint32)data;
9918 sp [-1]->inst_c0 = data;
9920 sp [-1]->inst_ls_word = data;
9921 if ((*ip) == CEE_CONV_U8)
9922 sp [-1]->inst_ms_word = 0;
9924 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9926 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9933 case CEE_CONV_OVF_I4:
9934 case CEE_CONV_OVF_I1:
9935 case CEE_CONV_OVF_I2:
9936 case CEE_CONV_OVF_I:
9937 case CEE_CONV_OVF_U:
9940 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9941 ADD_UNOP (CEE_CONV_OVF_I8);
9948 case CEE_CONV_OVF_U1:
9949 case CEE_CONV_OVF_U2:
9950 case CEE_CONV_OVF_U4:
9953 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9954 ADD_UNOP (CEE_CONV_OVF_U8);
9961 case CEE_CONV_OVF_I1_UN:
9962 case CEE_CONV_OVF_I2_UN:
9963 case CEE_CONV_OVF_I4_UN:
9964 case CEE_CONV_OVF_I8_UN:
9965 case CEE_CONV_OVF_U1_UN:
9966 case CEE_CONV_OVF_U2_UN:
9967 case CEE_CONV_OVF_U4_UN:
9968 case CEE_CONV_OVF_U8_UN:
9969 case CEE_CONV_OVF_I_UN:
9970 case CEE_CONV_OVF_U_UN:
9977 CHECK_CFG_EXCEPTION;
9981 case CEE_ADD_OVF_UN:
9983 case CEE_MUL_OVF_UN:
9985 case CEE_SUB_OVF_UN:
9991 GSHAREDVT_FAILURE (*ip);
9994 token = read32 (ip + 1);
9995 klass = mini_get_class (method, token, generic_context);
9996 CHECK_TYPELOAD (klass);
9998 if (generic_class_is_reference_type (cfg, klass)) {
9999 MonoInst *store, *load;
10000 int dreg = alloc_ireg_ref (cfg);
10002 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10003 load->flags |= ins_flag;
10004 MONO_ADD_INS (cfg->cbb, load);
10006 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10007 store->flags |= ins_flag;
10008 MONO_ADD_INS (cfg->cbb, store);
10010 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10011 emit_write_barrier (cfg, sp [0], sp [1]);
10013 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10019 int loc_index = -1;
10025 token = read32 (ip + 1);
10026 klass = mini_get_class (method, token, generic_context);
10027 CHECK_TYPELOAD (klass);
10029 /* Optimize the common ldobj+stloc combination */
10032 loc_index = ip [6];
10039 loc_index = ip [5] - CEE_STLOC_0;
10046 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10047 CHECK_LOCAL (loc_index);
10049 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10050 ins->dreg = cfg->locals [loc_index]->dreg;
10051 ins->flags |= ins_flag;
10054 if (ins_flag & MONO_INST_VOLATILE) {
10055 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10056 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10062 /* Optimize the ldobj+stobj combination */
10063 /* The reference case ends up being a load+store anyway */
10064 /* Skip this if the operation is volatile. */
10065 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10070 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10077 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10078 ins->flags |= ins_flag;
10081 if (ins_flag & MONO_INST_VOLATILE) {
10082 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10083 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10092 CHECK_STACK_OVF (1);
10094 n = read32 (ip + 1);
10096 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10097 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10098 ins->type = STACK_OBJ;
10101 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10102 MonoInst *iargs [1];
10103 char *str = (char *)mono_method_get_wrapper_data (method, n);
10105 if (cfg->compile_aot)
10106 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10108 EMIT_NEW_PCONST (cfg, iargs [0], str);
10109 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10111 if (cfg->opt & MONO_OPT_SHARED) {
10112 MonoInst *iargs [3];
10114 if (cfg->compile_aot) {
10115 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10117 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10118 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10119 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10120 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10121 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10124 if (cfg->cbb->out_of_line) {
10125 MonoInst *iargs [2];
10127 if (image == mono_defaults.corlib) {
10129 * Avoid relocations in AOT and save some space by using a
10130 * version of helper_ldstr specialized to mscorlib.
10132 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10133 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10135 /* Avoid creating the string object */
10136 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10137 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10138 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10142 if (cfg->compile_aot) {
10143 NEW_LDSTRCONST (cfg, ins, image, n);
10145 MONO_ADD_INS (cfg->cbb, ins);
10148 NEW_PCONST (cfg, ins, NULL);
10149 ins->type = STACK_OBJ;
10150 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10154 OUT_OF_MEMORY_FAILURE;
10157 MONO_ADD_INS (cfg->cbb, ins);
10166 MonoInst *iargs [2];
10167 MonoMethodSignature *fsig;
10170 MonoInst *vtable_arg = NULL;
10173 token = read32 (ip + 1);
10174 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10177 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10180 mono_save_token_info (cfg, image, token, cmethod);
10182 if (!mono_class_init (cmethod->klass))
10183 TYPE_LOAD_ERROR (cmethod->klass);
10185 context_used = mini_method_check_context_used (cfg, cmethod);
10187 if (mono_security_core_clr_enabled ())
10188 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10190 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10191 emit_class_init (cfg, cmethod->klass);
10192 CHECK_TYPELOAD (cmethod->klass);
10196 if (cfg->gsharedvt) {
10197 if (mini_is_gsharedvt_variable_signature (sig))
10198 GSHAREDVT_FAILURE (*ip);
10202 n = fsig->param_count;
10206 * Generate smaller code for the common newobj <exception> instruction in
10207 * argument checking code.
10209 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10210 is_exception_class (cmethod->klass) && n <= 2 &&
10211 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10212 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10213 MonoInst *iargs [3];
10217 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10220 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10223 iargs [1] = sp [0];
10224 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10227 iargs [1] = sp [0];
10228 iargs [2] = sp [1];
10229 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10232 g_assert_not_reached ();
10240 /* move the args to allow room for 'this' in the first position */
10246 /* check_call_signature () requires sp[0] to be set */
10247 this_ins.type = STACK_OBJ;
10248 sp [0] = &this_ins;
10249 if (check_call_signature (cfg, fsig, sp))
10254 if (mini_class_is_system_array (cmethod->klass)) {
10255 *sp = emit_get_rgctx_method (cfg, context_used,
10256 cmethod, MONO_RGCTX_INFO_METHOD);
10258 /* Avoid varargs in the common case */
10259 if (fsig->param_count == 1)
10260 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10261 else if (fsig->param_count == 2)
10262 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10263 else if (fsig->param_count == 3)
10264 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10265 else if (fsig->param_count == 4)
10266 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10268 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10269 } else if (cmethod->string_ctor) {
10270 g_assert (!context_used);
10271 g_assert (!vtable_arg);
10272 /* we simply pass a null pointer */
10273 EMIT_NEW_PCONST (cfg, *sp, NULL);
10274 /* now call the string ctor */
10275 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10277 if (cmethod->klass->valuetype) {
10278 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10279 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10280 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10285 * The code generated by mini_emit_virtual_call () expects
10286 * iargs [0] to be a boxed instance, but luckily the vcall
10287 * will be transformed into a normal call there.
10289 } else if (context_used) {
10290 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10293 MonoVTable *vtable = NULL;
10295 if (!cfg->compile_aot)
10296 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10297 CHECK_TYPELOAD (cmethod->klass);
10300 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10301 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10302 * As a workaround, we call class cctors before allocating objects.
10304 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10305 emit_class_init (cfg, cmethod->klass);
10306 if (cfg->verbose_level > 2)
10307 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10308 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10311 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10314 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10317 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10319 /* Now call the actual ctor */
10320 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10321 CHECK_CFG_EXCEPTION;
10324 if (alloc == NULL) {
10326 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10327 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10335 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10336 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10339 case CEE_CASTCLASS:
10344 token = read32 (ip + 1);
10345 klass = mini_get_class (method, token, generic_context);
10346 CHECK_TYPELOAD (klass);
10347 if (sp [0]->type != STACK_OBJ)
10350 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10351 ins->dreg = alloc_preg (cfg);
10352 ins->sreg1 = (*sp)->dreg;
10353 ins->klass = klass;
10354 ins->type = STACK_OBJ;
10355 MONO_ADD_INS (cfg->cbb, ins);
10357 CHECK_CFG_EXCEPTION;
10361 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10364 case CEE_UNBOX_ANY: {
10365 MonoInst *res, *addr;
10370 token = read32 (ip + 1);
10371 klass = mini_get_class (method, token, generic_context);
10372 CHECK_TYPELOAD (klass);
10374 mono_save_token_info (cfg, image, token, klass);
10376 context_used = mini_class_check_context_used (cfg, klass);
10378 if (mini_is_gsharedvt_klass (klass)) {
10379 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10381 } else if (generic_class_is_reference_type (cfg, klass)) {
10382 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10383 EMIT_NEW_PCONST (cfg, res, NULL);
10384 res->type = STACK_OBJ;
10386 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10387 res->dreg = alloc_preg (cfg);
10388 res->sreg1 = (*sp)->dreg;
10389 res->klass = klass;
10390 res->type = STACK_OBJ;
10391 MONO_ADD_INS (cfg->cbb, res);
10392 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10394 } else if (mono_class_is_nullable (klass)) {
10395 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10397 addr = handle_unbox (cfg, klass, sp, context_used);
10399 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10410 MonoClass *enum_class;
10411 MonoMethod *has_flag;
10417 token = read32 (ip + 1);
10418 klass = mini_get_class (method, token, generic_context);
10419 CHECK_TYPELOAD (klass);
10421 mono_save_token_info (cfg, image, token, klass);
10423 context_used = mini_class_check_context_used (cfg, klass);
10425 if (generic_class_is_reference_type (cfg, klass)) {
10431 if (klass == mono_defaults.void_class)
10433 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10435 /* frequent check in generic code: box (struct), brtrue */
10440 * <push int/long ptr>
10443 * constrained. MyFlags
10444 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10446 * If we find this sequence and the operand types on box and constrained
10447 * are equal, we can emit a specialized instruction sequence instead of
10448 * the very slow HasFlag () call.
10450 if ((cfg->opt & MONO_OPT_INTRINS) &&
10451 /* Cheap checks first. */
10452 ip + 5 + 6 + 5 < end &&
10453 ip [5] == CEE_PREFIX1 &&
10454 ip [6] == CEE_CONSTRAINED_ &&
10455 ip [11] == CEE_CALLVIRT &&
10456 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10457 mono_class_is_enum (klass) &&
10458 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10459 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10460 has_flag->klass == mono_defaults.enum_class &&
10461 !strcmp (has_flag->name, "HasFlag") &&
10462 has_flag->signature->hasthis &&
10463 has_flag->signature->param_count == 1) {
10464 CHECK_TYPELOAD (enum_class);
10466 if (enum_class == klass) {
10467 MonoInst *enum_this, *enum_flag;
10472 enum_this = sp [0];
10473 enum_flag = sp [1];
10475 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10480 // FIXME: LLVM can't handle the inconsistent bb linking
10481 if (!mono_class_is_nullable (klass) &&
10482 !mini_is_gsharedvt_klass (klass) &&
10483 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10484 (ip [5] == CEE_BRTRUE ||
10485 ip [5] == CEE_BRTRUE_S ||
10486 ip [5] == CEE_BRFALSE ||
10487 ip [5] == CEE_BRFALSE_S)) {
10488 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10490 MonoBasicBlock *true_bb, *false_bb;
10494 if (cfg->verbose_level > 3) {
10495 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10496 printf ("<box+brtrue opt>\n");
10501 case CEE_BRFALSE_S:
10504 target = ip + 1 + (signed char)(*ip);
10511 target = ip + 4 + (gint)(read32 (ip));
10515 g_assert_not_reached ();
10519 * We need to link both bblocks, since it is needed for handling stack
10520 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10521 * Branching to only one of them would lead to inconsistencies, so
10522 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10524 GET_BBLOCK (cfg, true_bb, target);
10525 GET_BBLOCK (cfg, false_bb, ip);
10527 mono_link_bblock (cfg, cfg->cbb, true_bb);
10528 mono_link_bblock (cfg, cfg->cbb, false_bb);
10530 if (sp != stack_start) {
10531 handle_stack_args (cfg, stack_start, sp - stack_start);
10533 CHECK_UNVERIFIABLE (cfg);
10536 if (COMPILE_LLVM (cfg)) {
10537 dreg = alloc_ireg (cfg);
10538 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10541 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10543 /* The JIT can't eliminate the iconst+compare */
10544 MONO_INST_NEW (cfg, ins, OP_BR);
10545 ins->inst_target_bb = is_true ? true_bb : false_bb;
10546 MONO_ADD_INS (cfg->cbb, ins);
10549 start_new_bblock = 1;
10553 *sp++ = handle_box (cfg, val, klass, context_used);
10555 CHECK_CFG_EXCEPTION;
10564 token = read32 (ip + 1);
10565 klass = mini_get_class (method, token, generic_context);
10566 CHECK_TYPELOAD (klass);
10568 mono_save_token_info (cfg, image, token, klass);
10570 context_used = mini_class_check_context_used (cfg, klass);
10572 if (mono_class_is_nullable (klass)) {
10575 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10576 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10580 ins = handle_unbox (cfg, klass, sp, context_used);
10593 MonoClassField *field;
10594 #ifndef DISABLE_REMOTING
10598 gboolean is_instance;
10600 gpointer addr = NULL;
10601 gboolean is_special_static;
10603 MonoInst *store_val = NULL;
10604 MonoInst *thread_ins;
10607 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10609 if (op == CEE_STFLD) {
10612 store_val = sp [1];
10617 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10619 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10622 if (op == CEE_STSFLD) {
10625 store_val = sp [0];
10630 token = read32 (ip + 1);
10631 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10632 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10633 klass = field->parent;
10636 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10639 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10640 FIELD_ACCESS_FAILURE (method, field);
10641 mono_class_init (klass);
10643 /* if the class is Critical then transparent code cannot access it's fields */
10644 if (!is_instance && mono_security_core_clr_enabled ())
10645 ensure_method_is_allowed_to_access_field (cfg, method, field);
10647 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10648 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10649 if (mono_security_core_clr_enabled ())
10650 ensure_method_is_allowed_to_access_field (cfg, method, field);
10653 ftype = mono_field_get_type (field);
10656 * LDFLD etc. is usable on static fields as well, so convert those cases to
10659 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10671 g_assert_not_reached ();
10673 is_instance = FALSE;
10676 context_used = mini_class_check_context_used (cfg, klass);
10678 /* INSTANCE CASE */
10680 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10681 if (op == CEE_STFLD) {
10682 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10684 #ifndef DISABLE_REMOTING
10685 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10686 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10687 MonoInst *iargs [5];
10689 GSHAREDVT_FAILURE (op);
10691 iargs [0] = sp [0];
10692 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10693 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10694 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10696 iargs [4] = sp [1];
10698 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10699 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10700 iargs, ip, cfg->real_offset, TRUE);
10701 CHECK_CFG_EXCEPTION;
10702 g_assert (costs > 0);
10704 cfg->real_offset += 5;
10706 inline_costs += costs;
10708 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10713 MonoInst *store, *wbarrier_ptr_ins = NULL;
10715 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10717 if (ins_flag & MONO_INST_VOLATILE) {
10718 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10719 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10722 if (mini_is_gsharedvt_klass (klass)) {
10723 MonoInst *offset_ins;
10725 context_used = mini_class_check_context_used (cfg, klass);
10727 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10728 /* The value is offset by 1 */
10729 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10730 dreg = alloc_ireg_mp (cfg);
10731 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10732 wbarrier_ptr_ins = ins;
10733 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10734 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10736 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10738 if (sp [0]->opcode != OP_LDADDR)
10739 store->flags |= MONO_INST_FAULT;
10741 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10742 if (mini_is_gsharedvt_klass (klass)) {
10743 g_assert (wbarrier_ptr_ins);
10744 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10746 /* insert call to write barrier */
10750 dreg = alloc_ireg_mp (cfg);
10751 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10752 emit_write_barrier (cfg, ptr, sp [1]);
10756 store->flags |= ins_flag;
10763 #ifndef DISABLE_REMOTING
10764 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10765 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10766 MonoInst *iargs [4];
10768 GSHAREDVT_FAILURE (op);
10770 iargs [0] = sp [0];
10771 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10772 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10773 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10774 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10775 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10776 iargs, ip, cfg->real_offset, TRUE);
10777 CHECK_CFG_EXCEPTION;
10778 g_assert (costs > 0);
10780 cfg->real_offset += 5;
10784 inline_costs += costs;
10786 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10792 if (sp [0]->type == STACK_VTYPE) {
10795 /* Have to compute the address of the variable */
10797 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10799 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10801 g_assert (var->klass == klass);
10803 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10807 if (op == CEE_LDFLDA) {
10808 if (sp [0]->type == STACK_OBJ) {
10809 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10810 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10813 dreg = alloc_ireg_mp (cfg);
10815 if (mini_is_gsharedvt_klass (klass)) {
10816 MonoInst *offset_ins;
10818 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10819 /* The value is offset by 1 */
10820 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10821 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10823 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10825 ins->klass = mono_class_from_mono_type (field->type);
10826 ins->type = STACK_MP;
10831 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10833 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10834 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10843 if (mini_is_gsharedvt_klass (klass)) {
10844 MonoInst *offset_ins;
10846 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10847 /* The value is offset by 1 */
10848 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10849 dreg = alloc_ireg_mp (cfg);
10850 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10851 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10853 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10855 load->flags |= ins_flag;
10856 if (sp [0]->opcode != OP_LDADDR)
10857 load->flags |= MONO_INST_FAULT;
10869 context_used = mini_class_check_context_used (cfg, klass);
10871 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10872 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10876 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10877 * to be called here.
10879 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10880 mono_class_vtable (cfg->domain, klass);
10881 CHECK_TYPELOAD (klass);
10883 mono_domain_lock (cfg->domain);
10884 if (cfg->domain->special_static_fields)
10885 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10886 mono_domain_unlock (cfg->domain);
10888 is_special_static = mono_class_field_is_special_static (field);
10890 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10891 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10895 /* Generate IR to compute the field address */
10896 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10898 * Fast access to TLS data
10899 * Inline version of get_thread_static_data () in
10903 int idx, static_data_reg, array_reg, dreg;
10905 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10906 GSHAREDVT_FAILURE (op);
10908 static_data_reg = alloc_ireg (cfg);
10909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10911 if (cfg->compile_aot) {
10912 int offset_reg, offset2_reg, idx_reg;
10914 /* For TLS variables, this will return the TLS offset */
10915 EMIT_NEW_SFLDACONST (cfg, ins, field);
10916 offset_reg = ins->dreg;
10917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10918 idx_reg = alloc_ireg (cfg);
10919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10921 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10922 array_reg = alloc_ireg (cfg);
10923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10924 offset2_reg = alloc_ireg (cfg);
10925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10927 dreg = alloc_ireg (cfg);
10928 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10930 offset = (gsize)addr & 0x7fffffff;
10931 idx = offset & 0x3f;
10933 array_reg = alloc_ireg (cfg);
10934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10935 dreg = alloc_ireg (cfg);
10936 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10938 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10939 (cfg->compile_aot && is_special_static) ||
10940 (context_used && is_special_static)) {
10941 MonoInst *iargs [2];
10943 g_assert (field->parent);
10944 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10945 if (context_used) {
10946 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10947 field, MONO_RGCTX_INFO_CLASS_FIELD);
10949 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10951 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10952 } else if (context_used) {
10953 MonoInst *static_data;
10956 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10957 method->klass->name_space, method->klass->name, method->name,
10958 depth, field->offset);
10961 if (mono_class_needs_cctor_run (klass, method))
10962 emit_class_init (cfg, klass);
10965 * The pointer we're computing here is
10967 * super_info.static_data + field->offset
10969 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10970 klass, MONO_RGCTX_INFO_STATIC_DATA);
10972 if (mini_is_gsharedvt_klass (klass)) {
10973 MonoInst *offset_ins;
10975 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10976 /* The value is offset by 1 */
10977 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10978 dreg = alloc_ireg_mp (cfg);
10979 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10980 } else if (field->offset == 0) {
10983 int addr_reg = mono_alloc_preg (cfg);
10984 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10986 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10987 MonoInst *iargs [2];
10989 g_assert (field->parent);
10990 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10991 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10992 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10994 MonoVTable *vtable = NULL;
10996 if (!cfg->compile_aot)
10997 vtable = mono_class_vtable (cfg->domain, klass);
10998 CHECK_TYPELOAD (klass);
11001 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11002 if (!(g_slist_find (class_inits, klass))) {
11003 emit_class_init (cfg, klass);
11004 if (cfg->verbose_level > 2)
11005 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11006 class_inits = g_slist_prepend (class_inits, klass);
11009 if (cfg->run_cctors) {
11010 /* This makes so that inline cannot trigger */
11011 /* .cctors: too many apps depend on them */
11012 /* running with a specific order... */
11014 if (! vtable->initialized)
11015 INLINE_FAILURE ("class init");
11016 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11017 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11018 goto exception_exit;
11022 if (cfg->compile_aot)
11023 EMIT_NEW_SFLDACONST (cfg, ins, field);
11026 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11028 EMIT_NEW_PCONST (cfg, ins, addr);
11031 MonoInst *iargs [1];
11032 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11033 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11037 /* Generate IR to do the actual load/store operation */
11039 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11040 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11041 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11044 if (op == CEE_LDSFLDA) {
11045 ins->klass = mono_class_from_mono_type (ftype);
11046 ins->type = STACK_PTR;
11048 } else if (op == CEE_STSFLD) {
11051 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11052 store->flags |= ins_flag;
11054 gboolean is_const = FALSE;
11055 MonoVTable *vtable = NULL;
11056 gpointer addr = NULL;
11058 if (!context_used) {
11059 vtable = mono_class_vtable (cfg->domain, klass);
11060 CHECK_TYPELOAD (klass);
11062 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11063 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11064 int ro_type = ftype->type;
11066 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11067 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11068 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11071 GSHAREDVT_FAILURE (op);
11073 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11076 case MONO_TYPE_BOOLEAN:
11078 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11082 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11085 case MONO_TYPE_CHAR:
11087 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11091 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11096 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11100 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11105 case MONO_TYPE_PTR:
11106 case MONO_TYPE_FNPTR:
11107 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11108 type_to_eval_stack_type ((cfg), field->type, *sp);
11111 case MONO_TYPE_STRING:
11112 case MONO_TYPE_OBJECT:
11113 case MONO_TYPE_CLASS:
11114 case MONO_TYPE_SZARRAY:
11115 case MONO_TYPE_ARRAY:
11116 if (!mono_gc_is_moving ()) {
11117 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11118 type_to_eval_stack_type ((cfg), field->type, *sp);
11126 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11131 case MONO_TYPE_VALUETYPE:
11141 CHECK_STACK_OVF (1);
11143 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11144 load->flags |= ins_flag;
11150 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11151 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11152 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11163 token = read32 (ip + 1);
11164 klass = mini_get_class (method, token, generic_context);
11165 CHECK_TYPELOAD (klass);
11166 if (ins_flag & MONO_INST_VOLATILE) {
11167 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11168 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11170 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11172 ins->flags |= ins_flag;
11173 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11174 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11175 /* insert call to write barrier */
11176 emit_write_barrier (cfg, sp [0], sp [1]);
11188 const char *data_ptr;
11190 guint32 field_token;
11196 token = read32 (ip + 1);
11198 klass = mini_get_class (method, token, generic_context);
11199 CHECK_TYPELOAD (klass);
11201 context_used = mini_class_check_context_used (cfg, klass);
11203 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11204 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11205 ins->sreg1 = sp [0]->dreg;
11206 ins->type = STACK_I4;
11207 ins->dreg = alloc_ireg (cfg);
11208 MONO_ADD_INS (cfg->cbb, ins);
11209 *sp = mono_decompose_opcode (cfg, ins);
11212 if (context_used) {
11213 MonoInst *args [3];
11214 MonoClass *array_class = mono_array_class_get (klass, 1);
11215 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11217 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11220 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11221 array_class, MONO_RGCTX_INFO_VTABLE);
11226 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11228 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11230 if (cfg->opt & MONO_OPT_SHARED) {
11231 /* Decompose now to avoid problems with references to the domainvar */
11232 MonoInst *iargs [3];
11234 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11235 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11236 iargs [2] = sp [0];
11238 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11240 /* Decompose later since it is needed by abcrem */
11241 MonoClass *array_type = mono_array_class_get (klass, 1);
11242 mono_class_vtable (cfg->domain, array_type);
11243 CHECK_TYPELOAD (array_type);
11245 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11246 ins->dreg = alloc_ireg_ref (cfg);
11247 ins->sreg1 = sp [0]->dreg;
11248 ins->inst_newa_class = klass;
11249 ins->type = STACK_OBJ;
11250 ins->klass = array_type;
11251 MONO_ADD_INS (cfg->cbb, ins);
11252 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11253 cfg->cbb->has_array_access = TRUE;
11255 /* Needed so mono_emit_load_get_addr () gets called */
11256 mono_get_got_var (cfg);
11266 * we inline/optimize the initialization sequence if possible.
11267 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11268 * for small sizes open code the memcpy
11269 * ensure the rva field is big enough
11271 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11272 MonoMethod *memcpy_method = get_memcpy_method ();
11273 MonoInst *iargs [3];
11274 int add_reg = alloc_ireg_mp (cfg);
11276 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11277 if (cfg->compile_aot) {
11278 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11280 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11282 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11283 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11292 if (sp [0]->type != STACK_OBJ)
11295 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11296 ins->dreg = alloc_preg (cfg);
11297 ins->sreg1 = sp [0]->dreg;
11298 ins->type = STACK_I4;
11299 /* This flag will be inherited by the decomposition */
11300 ins->flags |= MONO_INST_FAULT;
11301 MONO_ADD_INS (cfg->cbb, ins);
11302 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11303 cfg->cbb->has_array_access = TRUE;
11311 if (sp [0]->type != STACK_OBJ)
11314 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11316 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11317 CHECK_TYPELOAD (klass);
11318 /* we need to make sure that this array is exactly the type it needs
11319 * to be for correctness. the wrappers are lax with their usage
11320 * so we need to ignore them here
11322 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11323 MonoClass *array_class = mono_array_class_get (klass, 1);
11324 mini_emit_check_array_type (cfg, sp [0], array_class);
11325 CHECK_TYPELOAD (array_class);
11329 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11334 case CEE_LDELEM_I1:
11335 case CEE_LDELEM_U1:
11336 case CEE_LDELEM_I2:
11337 case CEE_LDELEM_U2:
11338 case CEE_LDELEM_I4:
11339 case CEE_LDELEM_U4:
11340 case CEE_LDELEM_I8:
11342 case CEE_LDELEM_R4:
11343 case CEE_LDELEM_R8:
11344 case CEE_LDELEM_REF: {
11350 if (*ip == CEE_LDELEM) {
11352 token = read32 (ip + 1);
11353 klass = mini_get_class (method, token, generic_context);
11354 CHECK_TYPELOAD (klass);
11355 mono_class_init (klass);
11358 klass = array_access_to_klass (*ip);
11360 if (sp [0]->type != STACK_OBJ)
11363 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11365 if (mini_is_gsharedvt_variable_klass (klass)) {
11366 // FIXME-VT: OP_ICONST optimization
11367 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11369 ins->opcode = OP_LOADV_MEMBASE;
11370 } else if (sp [1]->opcode == OP_ICONST) {
11371 int array_reg = sp [0]->dreg;
11372 int index_reg = sp [1]->dreg;
11373 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11375 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11376 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11378 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11379 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11381 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11382 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11385 if (*ip == CEE_LDELEM)
11392 case CEE_STELEM_I1:
11393 case CEE_STELEM_I2:
11394 case CEE_STELEM_I4:
11395 case CEE_STELEM_I8:
11396 case CEE_STELEM_R4:
11397 case CEE_STELEM_R8:
11398 case CEE_STELEM_REF:
11403 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11405 if (*ip == CEE_STELEM) {
11407 token = read32 (ip + 1);
11408 klass = mini_get_class (method, token, generic_context);
11409 CHECK_TYPELOAD (klass);
11410 mono_class_init (klass);
11413 klass = array_access_to_klass (*ip);
11415 if (sp [0]->type != STACK_OBJ)
11418 emit_array_store (cfg, klass, sp, TRUE);
11420 if (*ip == CEE_STELEM)
11427 case CEE_CKFINITE: {
11431 if (cfg->llvm_only) {
11432 MonoInst *iargs [1];
11434 iargs [0] = sp [0];
11435 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11437 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11438 ins->sreg1 = sp [0]->dreg;
11439 ins->dreg = alloc_freg (cfg);
11440 ins->type = STACK_R8;
11441 MONO_ADD_INS (cfg->cbb, ins);
11443 *sp++ = mono_decompose_opcode (cfg, ins);
11449 case CEE_REFANYVAL: {
11450 MonoInst *src_var, *src;
11452 int klass_reg = alloc_preg (cfg);
11453 int dreg = alloc_preg (cfg);
11455 GSHAREDVT_FAILURE (*ip);
11458 MONO_INST_NEW (cfg, ins, *ip);
11461 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11462 CHECK_TYPELOAD (klass);
11464 context_used = mini_class_check_context_used (cfg, klass);
11467 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11469 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11470 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11473 if (context_used) {
11474 MonoInst *klass_ins;
11476 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11477 klass, MONO_RGCTX_INFO_KLASS);
11480 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11481 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11483 mini_emit_class_check (cfg, klass_reg, klass);
11485 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11486 ins->type = STACK_MP;
11487 ins->klass = klass;
11492 case CEE_MKREFANY: {
11493 MonoInst *loc, *addr;
11495 GSHAREDVT_FAILURE (*ip);
11498 MONO_INST_NEW (cfg, ins, *ip);
11501 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11502 CHECK_TYPELOAD (klass);
11504 context_used = mini_class_check_context_used (cfg, klass);
11506 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11507 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11509 if (context_used) {
11510 MonoInst *const_ins;
11511 int type_reg = alloc_preg (cfg);
11513 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11518 int const_reg = alloc_preg (cfg);
11519 int type_reg = alloc_preg (cfg);
11521 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11522 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11524 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11526 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11528 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11529 ins->type = STACK_VTYPE;
11530 ins->klass = mono_defaults.typed_reference_class;
11535 case CEE_LDTOKEN: {
11537 MonoClass *handle_class;
11539 CHECK_STACK_OVF (1);
11542 n = read32 (ip + 1);
11544 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11545 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11546 handle = mono_method_get_wrapper_data (method, n);
11547 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11548 if (handle_class == mono_defaults.typehandle_class)
11549 handle = &((MonoClass*)handle)->byval_arg;
11552 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11557 mono_class_init (handle_class);
11558 if (cfg->gshared) {
11559 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11560 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11561 /* This case handles ldtoken
11562 of an open type, like for
11565 } else if (handle_class == mono_defaults.typehandle_class) {
11566 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11567 } else if (handle_class == mono_defaults.fieldhandle_class)
11568 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11569 else if (handle_class == mono_defaults.methodhandle_class)
11570 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11572 g_assert_not_reached ();
11575 if ((cfg->opt & MONO_OPT_SHARED) &&
11576 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11577 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11578 MonoInst *addr, *vtvar, *iargs [3];
11579 int method_context_used;
11581 method_context_used = mini_method_check_context_used (cfg, method);
11583 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11585 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11586 EMIT_NEW_ICONST (cfg, iargs [1], n);
11587 if (method_context_used) {
11588 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11589 method, MONO_RGCTX_INFO_METHOD);
11590 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11592 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11593 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11595 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11597 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11599 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11601 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11602 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11603 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11604 (cmethod->klass == mono_defaults.systemtype_class) &&
11605 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11606 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11608 mono_class_init (tclass);
11609 if (context_used) {
11610 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11611 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11612 } else if (cfg->compile_aot) {
11613 if (method->wrapper_type) {
11614 error_init (&error); //got to do it since there are multiple conditionals below
11615 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11616 /* Special case for static synchronized wrappers */
11617 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11619 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11620 /* FIXME: n is not a normal token */
11622 EMIT_NEW_PCONST (cfg, ins, NULL);
11625 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11628 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11630 EMIT_NEW_PCONST (cfg, ins, rt);
11632 ins->type = STACK_OBJ;
11633 ins->klass = cmethod->klass;
11636 MonoInst *addr, *vtvar;
11638 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11640 if (context_used) {
11641 if (handle_class == mono_defaults.typehandle_class) {
11642 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11643 mono_class_from_mono_type ((MonoType *)handle),
11644 MONO_RGCTX_INFO_TYPE);
11645 } else if (handle_class == mono_defaults.methodhandle_class) {
11646 ins = emit_get_rgctx_method (cfg, context_used,
11647 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11648 } else if (handle_class == mono_defaults.fieldhandle_class) {
11649 ins = emit_get_rgctx_field (cfg, context_used,
11650 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11652 g_assert_not_reached ();
11654 } else if (cfg->compile_aot) {
11655 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11657 EMIT_NEW_PCONST (cfg, ins, handle);
11659 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11660 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11661 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11671 if (sp [-1]->type != STACK_OBJ)
11674 MONO_INST_NEW (cfg, ins, OP_THROW);
11676 ins->sreg1 = sp [0]->dreg;
11678 cfg->cbb->out_of_line = TRUE;
11679 MONO_ADD_INS (cfg->cbb, ins);
11680 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11681 MONO_ADD_INS (cfg->cbb, ins);
11684 link_bblock (cfg, cfg->cbb, end_bblock);
11685 start_new_bblock = 1;
11686 /* This can complicate code generation for llvm since the return value might not be defined */
11687 if (COMPILE_LLVM (cfg))
11688 INLINE_FAILURE ("throw");
11690 case CEE_ENDFINALLY:
11691 if (!ip_in_finally_clause (cfg, ip - header->code))
11693 /* mono_save_seq_point_info () depends on this */
11694 if (sp != stack_start)
11695 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11696 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11697 MONO_ADD_INS (cfg->cbb, ins);
11699 start_new_bblock = 1;
11702 * Control will leave the method so empty the stack, otherwise
11703 * the next basic block will start with a nonempty stack.
11705 while (sp != stack_start) {
11710 case CEE_LEAVE_S: {
11713 if (*ip == CEE_LEAVE) {
11715 target = ip + 5 + (gint32)read32(ip + 1);
11718 target = ip + 2 + (signed char)(ip [1]);
11721 /* empty the stack */
11722 while (sp != stack_start) {
11727 * If this leave statement is in a catch block, check for a
11728 * pending exception, and rethrow it if necessary.
11729 * We avoid doing this in runtime invoke wrappers, since those are called
11730 * by native code which excepts the wrapper to catch all exceptions.
11732 for (i = 0; i < header->num_clauses; ++i) {
11733 MonoExceptionClause *clause = &header->clauses [i];
11736 * Use <= in the final comparison to handle clauses with multiple
11737 * leave statements, like in bug #78024.
11738 * The ordering of the exception clauses guarantees that we find the
11739 * innermost clause.
11741 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11743 MonoBasicBlock *dont_throw;
11748 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11751 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11753 NEW_BBLOCK (cfg, dont_throw);
11756 * Currently, we always rethrow the abort exception, despite the
11757 * fact that this is not correct. See thread6.cs for an example.
11758 * But propagating the abort exception is more important than
11759 * getting the sematics right.
11761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11763 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11765 MONO_START_BB (cfg, dont_throw);
11770 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11773 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11775 MonoExceptionClause *clause;
11777 for (tmp = handlers; tmp; tmp = tmp->next) {
11778 clause = (MonoExceptionClause *)tmp->data;
11779 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11781 link_bblock (cfg, cfg->cbb, tblock);
11782 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11783 ins->inst_target_bb = tblock;
11784 ins->inst_eh_block = clause;
11785 MONO_ADD_INS (cfg->cbb, ins);
11786 cfg->cbb->has_call_handler = 1;
11787 if (COMPILE_LLVM (cfg)) {
11788 MonoBasicBlock *target_bb;
11791 * Link the finally bblock with the target, since it will
11792 * conceptually branch there.
11794 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11795 GET_BBLOCK (cfg, target_bb, target);
11796 link_bblock (cfg, tblock, target_bb);
11799 g_list_free (handlers);
11802 MONO_INST_NEW (cfg, ins, OP_BR);
11803 MONO_ADD_INS (cfg->cbb, ins);
11804 GET_BBLOCK (cfg, tblock, target);
11805 link_bblock (cfg, cfg->cbb, tblock);
11806 ins->inst_target_bb = tblock;
11808 start_new_bblock = 1;
11810 if (*ip == CEE_LEAVE)
11819 * Mono specific opcodes
11821 case MONO_CUSTOM_PREFIX: {
11823 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11827 case CEE_MONO_ICALL: {
11829 MonoJitICallInfo *info;
11831 token = read32 (ip + 2);
11832 func = mono_method_get_wrapper_data (method, token);
11833 info = mono_find_jit_icall_by_addr (func);
11835 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11838 CHECK_STACK (info->sig->param_count);
11839 sp -= info->sig->param_count;
11841 ins = mono_emit_jit_icall (cfg, info->func, sp);
11842 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11846 inline_costs += 10 * num_calls++;
11850 case CEE_MONO_LDPTR_CARD_TABLE:
11851 case CEE_MONO_LDPTR_NURSERY_START:
11852 case CEE_MONO_LDPTR_NURSERY_BITS:
11853 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11854 CHECK_STACK_OVF (1);
11857 case CEE_MONO_LDPTR_CARD_TABLE:
11858 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11860 case CEE_MONO_LDPTR_NURSERY_START:
11861 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11863 case CEE_MONO_LDPTR_NURSERY_BITS:
11864 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11866 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11867 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11873 inline_costs += 10 * num_calls++;
11876 case CEE_MONO_LDPTR: {
11879 CHECK_STACK_OVF (1);
11881 token = read32 (ip + 2);
11883 ptr = mono_method_get_wrapper_data (method, token);
11884 EMIT_NEW_PCONST (cfg, ins, ptr);
11887 inline_costs += 10 * num_calls++;
11888 /* Can't embed random pointers into AOT code */
11892 case CEE_MONO_JIT_ICALL_ADDR: {
11893 MonoJitICallInfo *callinfo;
11896 CHECK_STACK_OVF (1);
11898 token = read32 (ip + 2);
11900 ptr = mono_method_get_wrapper_data (method, token);
11901 callinfo = mono_find_jit_icall_by_addr (ptr);
11902 g_assert (callinfo);
11903 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11906 inline_costs += 10 * num_calls++;
11909 case CEE_MONO_ICALL_ADDR: {
11910 MonoMethod *cmethod;
11913 CHECK_STACK_OVF (1);
11915 token = read32 (ip + 2);
11917 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11919 if (cfg->compile_aot) {
11920 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11922 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11923 * before the call, its not needed when using direct pinvoke.
11924 * This is not an optimization, but its used to avoid looking up pinvokes
11925 * on platforms which don't support dlopen ().
11927 EMIT_NEW_PCONST (cfg, ins, NULL);
11929 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11932 ptr = mono_lookup_internal_call (cmethod);
11934 EMIT_NEW_PCONST (cfg, ins, ptr);
11940 case CEE_MONO_VTADDR: {
11941 MonoInst *src_var, *src;
11947 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11948 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11953 case CEE_MONO_NEWOBJ: {
11954 MonoInst *iargs [2];
11956 CHECK_STACK_OVF (1);
11958 token = read32 (ip + 2);
11959 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11960 mono_class_init (klass);
11961 NEW_DOMAINCONST (cfg, iargs [0]);
11962 MONO_ADD_INS (cfg->cbb, iargs [0]);
11963 NEW_CLASSCONST (cfg, iargs [1], klass);
11964 MONO_ADD_INS (cfg->cbb, iargs [1]);
11965 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11967 inline_costs += 10 * num_calls++;
11970 case CEE_MONO_OBJADDR:
11973 MONO_INST_NEW (cfg, ins, OP_MOVE);
11974 ins->dreg = alloc_ireg_mp (cfg);
11975 ins->sreg1 = sp [0]->dreg;
11976 ins->type = STACK_MP;
11977 MONO_ADD_INS (cfg->cbb, ins);
11981 case CEE_MONO_LDNATIVEOBJ:
11983 * Similar to LDOBJ, but instead load the unmanaged
11984 * representation of the vtype to the stack.
11989 token = read32 (ip + 2);
11990 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11991 g_assert (klass->valuetype);
11992 mono_class_init (klass);
11995 MonoInst *src, *dest, *temp;
11998 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11999 temp->backend.is_pinvoke = 1;
12000 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12001 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12003 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12004 dest->type = STACK_VTYPE;
12005 dest->klass = klass;
12011 case CEE_MONO_RETOBJ: {
12013 * Same as RET, but return the native representation of a vtype
12016 g_assert (cfg->ret);
12017 g_assert (mono_method_signature (method)->pinvoke);
12022 token = read32 (ip + 2);
12023 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12025 if (!cfg->vret_addr) {
12026 g_assert (cfg->ret_var_is_local);
12028 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12030 EMIT_NEW_RETLOADA (cfg, ins);
12032 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12034 if (sp != stack_start)
12037 MONO_INST_NEW (cfg, ins, OP_BR);
12038 ins->inst_target_bb = end_bblock;
12039 MONO_ADD_INS (cfg->cbb, ins);
12040 link_bblock (cfg, cfg->cbb, end_bblock);
12041 start_new_bblock = 1;
12045 case CEE_MONO_SAVE_LMF:
12046 case CEE_MONO_RESTORE_LMF:
12049 case CEE_MONO_CLASSCONST:
12050 CHECK_STACK_OVF (1);
12052 token = read32 (ip + 2);
12053 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12056 inline_costs += 10 * num_calls++;
12058 case CEE_MONO_NOT_TAKEN:
12059 cfg->cbb->out_of_line = TRUE;
12062 case CEE_MONO_TLS: {
12065 CHECK_STACK_OVF (1);
12067 key = (MonoTlsKey)read32 (ip + 2);
12068 g_assert (key < TLS_KEY_NUM);
12070 ins = mono_create_tls_get (cfg, key);
12072 ins->type = STACK_PTR;
12077 case CEE_MONO_DYN_CALL: {
12078 MonoCallInst *call;
12080 /* It would be easier to call a trampoline, but that would put an
12081 * extra frame on the stack, confusing exception handling. So
12082 * implement it inline using an opcode for now.
12085 if (!cfg->dyn_call_var) {
12086 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12087 /* prevent it from being register allocated */
12088 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12091 /* Has to use a call inst since it local regalloc expects it */
12092 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12093 ins = (MonoInst*)call;
12095 ins->sreg1 = sp [0]->dreg;
12096 ins->sreg2 = sp [1]->dreg;
12097 MONO_ADD_INS (cfg->cbb, ins);
12099 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12102 inline_costs += 10 * num_calls++;
12106 case CEE_MONO_MEMORY_BARRIER: {
12108 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12112 case CEE_MONO_ATOMIC_STORE_I4: {
12113 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12119 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12120 ins->dreg = sp [0]->dreg;
12121 ins->sreg1 = sp [1]->dreg;
12122 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12123 MONO_ADD_INS (cfg->cbb, ins);
12128 case CEE_MONO_JIT_ATTACH: {
12129 MonoInst *args [16], *domain_ins;
12130 MonoInst *ad_ins, *jit_tls_ins;
12131 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12133 g_assert (!mono_threads_is_coop_enabled ());
12135 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12137 EMIT_NEW_PCONST (cfg, ins, NULL);
12138 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12140 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12141 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12143 if (ad_ins && jit_tls_ins) {
12144 NEW_BBLOCK (cfg, next_bb);
12145 NEW_BBLOCK (cfg, call_bb);
12147 if (cfg->compile_aot) {
12148 /* AOT code is only used in the root domain */
12149 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12151 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12153 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12160 MONO_START_BB (cfg, call_bb);
12163 /* AOT code is only used in the root domain */
12164 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12165 if (cfg->compile_aot) {
12169 * This is called on unattached threads, so it cannot go through the trampoline
12170 * infrastructure. Use an indirect call through a got slot initialized at load time
12173 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12174 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12176 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12178 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12181 MONO_START_BB (cfg, next_bb);
12186 case CEE_MONO_JIT_DETACH: {
12187 MonoInst *args [16];
12189 /* Restore the original domain */
12190 dreg = alloc_ireg (cfg);
12191 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12192 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12196 case CEE_MONO_CALLI_EXTRA_ARG: {
12198 MonoMethodSignature *fsig;
12202 * This is the same as CEE_CALLI, but passes an additional argument
12203 * to the called method in llvmonly mode.
12204 * This is only used by delegate invoke wrappers to call the
12205 * actual delegate method.
12207 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12210 token = read32 (ip + 2);
12218 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12221 if (cfg->llvm_only)
12222 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12224 n = fsig->param_count + fsig->hasthis + 1;
12231 if (cfg->llvm_only) {
12233 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12234 * cconv. This is set by mono_init_delegate ().
12236 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12237 MonoInst *callee = addr;
12238 MonoInst *call, *localloc_ins;
12239 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12240 int low_bit_reg = alloc_preg (cfg);
12242 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12243 NEW_BBLOCK (cfg, end_bb);
12245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12249 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12250 addr = emit_get_rgctx_sig (cfg, context_used,
12251 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12253 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12255 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12256 ins->dreg = alloc_preg (cfg);
12257 ins->inst_imm = 2 * SIZEOF_VOID_P;
12258 MONO_ADD_INS (cfg->cbb, ins);
12259 localloc_ins = ins;
12260 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12261 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12262 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12264 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12267 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12268 MONO_START_BB (cfg, is_gsharedvt_bb);
12269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12270 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12271 ins->dreg = call->dreg;
12273 MONO_START_BB (cfg, end_bb);
12275 /* Caller uses a normal calling conv */
12277 MonoInst *callee = addr;
12278 MonoInst *call, *localloc_ins;
12279 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12280 int low_bit_reg = alloc_preg (cfg);
12282 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12283 NEW_BBLOCK (cfg, end_bb);
12285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12289 /* Normal case: callee uses a normal cconv, no conversion is needed */
12290 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12291 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12292 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12293 MONO_START_BB (cfg, is_gsharedvt_bb);
12294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12295 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12296 MONO_ADD_INS (cfg->cbb, addr);
12298 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12300 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12301 ins->dreg = alloc_preg (cfg);
12302 ins->inst_imm = 2 * SIZEOF_VOID_P;
12303 MONO_ADD_INS (cfg->cbb, ins);
12304 localloc_ins = ins;
12305 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12309 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12310 ins->dreg = call->dreg;
12311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12313 MONO_START_BB (cfg, end_bb);
12316 /* Same as CEE_CALLI */
12317 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12319 * We pass the address to the gsharedvt trampoline in the rgctx reg
12321 MonoInst *callee = addr;
12323 addr = emit_get_rgctx_sig (cfg, context_used,
12324 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12325 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12327 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12331 if (!MONO_TYPE_IS_VOID (fsig->ret))
12332 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12334 CHECK_CFG_EXCEPTION;
12338 constrained_class = NULL;
12341 case CEE_MONO_LDDOMAIN:
12342 CHECK_STACK_OVF (1);
12343 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12347 case CEE_MONO_GET_LAST_ERROR:
12349 CHECK_STACK_OVF (1);
12351 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12352 ins->dreg = alloc_dreg (cfg, STACK_I4);
12353 ins->type = STACK_I4;
12354 MONO_ADD_INS (cfg->cbb, ins);
12359 case CEE_MONO_GET_RGCTX_ARG:
12361 CHECK_STACK_OVF (1);
12363 mono_create_rgctx_var (cfg);
12365 MONO_INST_NEW (cfg, ins, OP_MOVE);
12366 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12367 ins->sreg1 = cfg->rgctx_var->dreg;
12368 ins->type = STACK_PTR;
12369 MONO_ADD_INS (cfg->cbb, ins);
12375 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12381 case CEE_PREFIX1: {
12384 case CEE_ARGLIST: {
12385 /* somewhat similar to LDTOKEN */
12386 MonoInst *addr, *vtvar;
12387 CHECK_STACK_OVF (1);
12388 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12390 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12391 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12393 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12394 ins->type = STACK_VTYPE;
12395 ins->klass = mono_defaults.argumenthandle_class;
12405 MonoInst *cmp, *arg1, *arg2;
12413 * The following transforms:
12414 * CEE_CEQ into OP_CEQ
12415 * CEE_CGT into OP_CGT
12416 * CEE_CGT_UN into OP_CGT_UN
12417 * CEE_CLT into OP_CLT
12418 * CEE_CLT_UN into OP_CLT_UN
12420 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12422 MONO_INST_NEW (cfg, ins, cmp->opcode);
12423 cmp->sreg1 = arg1->dreg;
12424 cmp->sreg2 = arg2->dreg;
12425 type_from_op (cfg, cmp, arg1, arg2);
12427 add_widen_op (cfg, cmp, &arg1, &arg2);
12428 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12429 cmp->opcode = OP_LCOMPARE;
12430 else if (arg1->type == STACK_R4)
12431 cmp->opcode = OP_RCOMPARE;
12432 else if (arg1->type == STACK_R8)
12433 cmp->opcode = OP_FCOMPARE;
12435 cmp->opcode = OP_ICOMPARE;
12436 MONO_ADD_INS (cfg->cbb, cmp);
12437 ins->type = STACK_I4;
12438 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12439 type_from_op (cfg, ins, arg1, arg2);
12441 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12443 * The backends expect the fceq opcodes to do the
12446 ins->sreg1 = cmp->sreg1;
12447 ins->sreg2 = cmp->sreg2;
12450 MONO_ADD_INS (cfg->cbb, ins);
12456 MonoInst *argconst;
12457 MonoMethod *cil_method;
12459 CHECK_STACK_OVF (1);
12461 n = read32 (ip + 2);
12462 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12465 mono_class_init (cmethod->klass);
12467 mono_save_token_info (cfg, image, n, cmethod);
12469 context_used = mini_method_check_context_used (cfg, cmethod);
12471 cil_method = cmethod;
12472 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12473 emit_method_access_failure (cfg, method, cil_method);
12475 if (mono_security_core_clr_enabled ())
12476 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12479 * Optimize the common case of ldftn+delegate creation
12481 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12482 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12483 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12484 MonoInst *target_ins, *handle_ins;
12485 MonoMethod *invoke;
12486 int invoke_context_used;
12488 invoke = mono_get_delegate_invoke (ctor_method->klass);
12489 if (!invoke || !mono_method_signature (invoke))
12492 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12494 target_ins = sp [-1];
12496 if (mono_security_core_clr_enabled ())
12497 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12499 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12500 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12501 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12503 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12507 /* FIXME: SGEN support */
12508 if (invoke_context_used == 0 || cfg->llvm_only) {
12510 if (cfg->verbose_level > 3)
12511 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12512 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12515 CHECK_CFG_EXCEPTION;
12525 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12526 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12530 inline_costs += 10 * num_calls++;
12533 case CEE_LDVIRTFTN: {
12534 MonoInst *args [2];
12538 n = read32 (ip + 2);
12539 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12542 mono_class_init (cmethod->klass);
12544 context_used = mini_method_check_context_used (cfg, cmethod);
12546 if (mono_security_core_clr_enabled ())
12547 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12550 * Optimize the common case of ldvirtftn+delegate creation
12552 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12553 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12554 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12555 MonoInst *target_ins, *handle_ins;
12556 MonoMethod *invoke;
12557 int invoke_context_used;
12558 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12560 invoke = mono_get_delegate_invoke (ctor_method->klass);
12561 if (!invoke || !mono_method_signature (invoke))
12564 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12566 target_ins = sp [-1];
12568 if (mono_security_core_clr_enabled ())
12569 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12571 /* FIXME: SGEN support */
12572 if (invoke_context_used == 0 || cfg->llvm_only) {
12574 if (cfg->verbose_level > 3)
12575 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12576 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12579 CHECK_CFG_EXCEPTION;
12592 args [1] = emit_get_rgctx_method (cfg, context_used,
12593 cmethod, MONO_RGCTX_INFO_METHOD);
12596 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12598 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12601 inline_costs += 10 * num_calls++;
12605 CHECK_STACK_OVF (1);
12607 n = read16 (ip + 2);
12609 EMIT_NEW_ARGLOAD (cfg, ins, n);
12614 CHECK_STACK_OVF (1);
12616 n = read16 (ip + 2);
12618 NEW_ARGLOADA (cfg, ins, n);
12619 MONO_ADD_INS (cfg->cbb, ins);
12627 n = read16 (ip + 2);
12629 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12631 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12635 CHECK_STACK_OVF (1);
12637 n = read16 (ip + 2);
12639 EMIT_NEW_LOCLOAD (cfg, ins, n);
12644 unsigned char *tmp_ip;
12645 CHECK_STACK_OVF (1);
12647 n = read16 (ip + 2);
12650 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12656 EMIT_NEW_LOCLOADA (cfg, ins, n);
12665 n = read16 (ip + 2);
12667 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12669 emit_stloc_ir (cfg, sp, header, n);
12673 case CEE_LOCALLOC: {
12675 MonoBasicBlock *non_zero_bb, *end_bb;
12676 int alloc_ptr = alloc_preg (cfg);
12678 if (sp != stack_start)
12680 if (cfg->method != method)
12682 * Inlining this into a loop in a parent could lead to
12683 * stack overflows which is different behavior than the
12684 * non-inlined case, thus disable inlining in this case.
12686 INLINE_FAILURE("localloc");
12688 NEW_BBLOCK (cfg, non_zero_bb);
12689 NEW_BBLOCK (cfg, end_bb);
12691 /* if size != zero */
12692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12695 //size is zero, so result is NULL
12696 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12699 MONO_START_BB (cfg, non_zero_bb);
12700 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12701 ins->dreg = alloc_ptr;
12702 ins->sreg1 = sp [0]->dreg;
12703 ins->type = STACK_PTR;
12704 MONO_ADD_INS (cfg->cbb, ins);
12706 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12708 ins->flags |= MONO_INST_INIT;
12710 MONO_START_BB (cfg, end_bb);
12711 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12712 ins->type = STACK_PTR;
12718 case CEE_ENDFILTER: {
12719 MonoExceptionClause *clause, *nearest;
12724 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12726 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12727 ins->sreg1 = (*sp)->dreg;
12728 MONO_ADD_INS (cfg->cbb, ins);
12729 start_new_bblock = 1;
12733 for (cc = 0; cc < header->num_clauses; ++cc) {
12734 clause = &header->clauses [cc];
12735 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12736 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12737 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12740 g_assert (nearest);
12741 if ((ip - header->code) != nearest->handler_offset)
12746 case CEE_UNALIGNED_:
12747 ins_flag |= MONO_INST_UNALIGNED;
12748 /* FIXME: record alignment? we can assume 1 for now */
12752 case CEE_VOLATILE_:
12753 ins_flag |= MONO_INST_VOLATILE;
12757 ins_flag |= MONO_INST_TAILCALL;
12758 cfg->flags |= MONO_CFG_HAS_TAIL;
12759 /* Can't inline tail calls at this time */
12760 inline_costs += 100000;
12767 token = read32 (ip + 2);
12768 klass = mini_get_class (method, token, generic_context);
12769 CHECK_TYPELOAD (klass);
12770 if (generic_class_is_reference_type (cfg, klass))
12771 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12773 mini_emit_initobj (cfg, *sp, NULL, klass);
12777 case CEE_CONSTRAINED_:
12779 token = read32 (ip + 2);
12780 constrained_class = mini_get_class (method, token, generic_context);
12781 CHECK_TYPELOAD (constrained_class);
12785 case CEE_INITBLK: {
12786 MonoInst *iargs [3];
12790 /* Skip optimized paths for volatile operations. */
12791 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12792 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12793 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12794 /* emit_memset only works when val == 0 */
12795 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12798 iargs [0] = sp [0];
12799 iargs [1] = sp [1];
12800 iargs [2] = sp [2];
12801 if (ip [1] == CEE_CPBLK) {
12803 * FIXME: It's unclear whether we should be emitting both the acquire
12804 * and release barriers for cpblk. It is technically both a load and
12805 * store operation, so it seems like that's the sensible thing to do.
12807 * FIXME: We emit full barriers on both sides of the operation for
12808 * simplicity. We should have a separate atomic memcpy method instead.
12810 MonoMethod *memcpy_method = get_memcpy_method ();
12812 if (ins_flag & MONO_INST_VOLATILE)
12813 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12815 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12816 call->flags |= ins_flag;
12818 if (ins_flag & MONO_INST_VOLATILE)
12819 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12821 MonoMethod *memset_method = get_memset_method ();
12822 if (ins_flag & MONO_INST_VOLATILE) {
12823 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12824 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12826 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12827 call->flags |= ins_flag;
12838 ins_flag |= MONO_INST_NOTYPECHECK;
12840 ins_flag |= MONO_INST_NORANGECHECK;
12841 /* we ignore the no-nullcheck for now since we
12842 * really do it explicitly only when doing callvirt->call
12846 case CEE_RETHROW: {
12848 int handler_offset = -1;
12850 for (i = 0; i < header->num_clauses; ++i) {
12851 MonoExceptionClause *clause = &header->clauses [i];
12852 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12853 handler_offset = clause->handler_offset;
12858 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12860 if (handler_offset == -1)
12863 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12864 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12865 ins->sreg1 = load->dreg;
12866 MONO_ADD_INS (cfg->cbb, ins);
12868 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12869 MONO_ADD_INS (cfg->cbb, ins);
12872 link_bblock (cfg, cfg->cbb, end_bblock);
12873 start_new_bblock = 1;
12881 CHECK_STACK_OVF (1);
12883 token = read32 (ip + 2);
12884 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12885 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12888 val = mono_type_size (type, &ialign);
12890 MonoClass *klass = mini_get_class (method, token, generic_context);
12891 CHECK_TYPELOAD (klass);
12893 val = mono_type_size (&klass->byval_arg, &ialign);
12895 if (mini_is_gsharedvt_klass (klass))
12896 GSHAREDVT_FAILURE (*ip);
12898 EMIT_NEW_ICONST (cfg, ins, val);
12903 case CEE_REFANYTYPE: {
12904 MonoInst *src_var, *src;
12906 GSHAREDVT_FAILURE (*ip);
12912 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12914 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12915 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12921 case CEE_READONLY_:
12934 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12944 g_warning ("opcode 0x%02x not handled", *ip);
12948 if (start_new_bblock != 1)
12951 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12952 if (cfg->cbb->next_bb) {
12953 /* This could already be set because of inlining, #693905 */
12954 MonoBasicBlock *bb = cfg->cbb;
12956 while (bb->next_bb)
12958 bb->next_bb = end_bblock;
12960 cfg->cbb->next_bb = end_bblock;
12963 if (cfg->method == method && cfg->domainvar) {
12965 MonoInst *get_domain;
12967 cfg->cbb = init_localsbb;
12969 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12970 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12971 MONO_ADD_INS (cfg->cbb, store);
12974 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12975 if (cfg->compile_aot)
12976 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12977 mono_get_got_var (cfg);
12980 if (cfg->method == method && cfg->got_var)
12981 mono_emit_load_got_addr (cfg);
12983 if (init_localsbb) {
12984 cfg->cbb = init_localsbb;
12986 for (i = 0; i < header->num_locals; ++i) {
12987 emit_init_local (cfg, i, header->locals [i], init_locals);
12991 if (cfg->init_ref_vars && cfg->method == method) {
12992 /* Emit initialization for ref vars */
12993 // FIXME: Avoid duplication initialization for IL locals.
12994 for (i = 0; i < cfg->num_varinfo; ++i) {
12995 MonoInst *ins = cfg->varinfo [i];
12997 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12998 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13002 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13003 cfg->cbb = init_localsbb;
13004 emit_push_lmf (cfg);
13007 cfg->cbb = init_localsbb;
13008 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13011 MonoBasicBlock *bb;
13014 * Make seq points at backward branch targets interruptable.
13016 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13017 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13018 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13021 /* Add a sequence point for method entry/exit events */
13022 if (seq_points && cfg->gen_sdb_seq_points) {
13023 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13024 MONO_ADD_INS (init_localsbb, ins);
13025 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13026 MONO_ADD_INS (cfg->bb_exit, ins);
13030 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13031 * the code they refer to was dead (#11880).
13033 if (sym_seq_points) {
13034 for (i = 0; i < header->code_size; ++i) {
13035 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13038 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13039 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13046 if (cfg->method == method) {
13047 MonoBasicBlock *bb;
13048 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13049 if (bb == cfg->bb_init)
13052 bb->region = mono_find_block_region (cfg, bb->real_offset);
13054 mono_create_spvar_for_region (cfg, bb->region);
13055 if (cfg->verbose_level > 2)
13056 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13059 MonoBasicBlock *bb;
13060 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13061 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13062 bb->real_offset = inline_offset;
13066 if (inline_costs < 0) {
13069 /* Method is too large */
13070 mname = mono_method_full_name (method, TRUE);
13071 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13075 if ((cfg->verbose_level > 2) && (cfg->method == method))
13076 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13081 g_assert (!mono_error_ok (&cfg->error));
13085 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13089 set_exception_type_from_invalid_il (cfg, method, ip);
13093 g_slist_free (class_inits);
13094 mono_basic_block_free (original_bb);
13095 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13096 if (cfg->exception_type)
13099 return inline_costs;
13103 store_membase_reg_to_store_membase_imm (int opcode)
13106 case OP_STORE_MEMBASE_REG:
13107 return OP_STORE_MEMBASE_IMM;
13108 case OP_STOREI1_MEMBASE_REG:
13109 return OP_STOREI1_MEMBASE_IMM;
13110 case OP_STOREI2_MEMBASE_REG:
13111 return OP_STOREI2_MEMBASE_IMM;
13112 case OP_STOREI4_MEMBASE_REG:
13113 return OP_STOREI4_MEMBASE_IMM;
13114 case OP_STOREI8_MEMBASE_REG:
13115 return OP_STOREI8_MEMBASE_IMM;
13117 g_assert_not_reached ();
13124 mono_op_to_op_imm (int opcode)
13128 return OP_IADD_IMM;
13130 return OP_ISUB_IMM;
13132 return OP_IDIV_IMM;
13134 return OP_IDIV_UN_IMM;
13136 return OP_IREM_IMM;
13138 return OP_IREM_UN_IMM;
13140 return OP_IMUL_IMM;
13142 return OP_IAND_IMM;
13146 return OP_IXOR_IMM;
13148 return OP_ISHL_IMM;
13150 return OP_ISHR_IMM;
13152 return OP_ISHR_UN_IMM;
13155 return OP_LADD_IMM;
13157 return OP_LSUB_IMM;
13159 return OP_LAND_IMM;
13163 return OP_LXOR_IMM;
13165 return OP_LSHL_IMM;
13167 return OP_LSHR_IMM;
13169 return OP_LSHR_UN_IMM;
13170 #if SIZEOF_REGISTER == 8
13172 return OP_LREM_IMM;
13176 return OP_COMPARE_IMM;
13178 return OP_ICOMPARE_IMM;
13180 return OP_LCOMPARE_IMM;
13182 case OP_STORE_MEMBASE_REG:
13183 return OP_STORE_MEMBASE_IMM;
13184 case OP_STOREI1_MEMBASE_REG:
13185 return OP_STOREI1_MEMBASE_IMM;
13186 case OP_STOREI2_MEMBASE_REG:
13187 return OP_STOREI2_MEMBASE_IMM;
13188 case OP_STOREI4_MEMBASE_REG:
13189 return OP_STOREI4_MEMBASE_IMM;
13191 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13193 return OP_X86_PUSH_IMM;
13194 case OP_X86_COMPARE_MEMBASE_REG:
13195 return OP_X86_COMPARE_MEMBASE_IMM;
13197 #if defined(TARGET_AMD64)
13198 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13199 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13201 case OP_VOIDCALL_REG:
13202 return OP_VOIDCALL;
13210 return OP_LOCALLOC_IMM;
13217 ldind_to_load_membase (int opcode)
13221 return OP_LOADI1_MEMBASE;
13223 return OP_LOADU1_MEMBASE;
13225 return OP_LOADI2_MEMBASE;
13227 return OP_LOADU2_MEMBASE;
13229 return OP_LOADI4_MEMBASE;
13231 return OP_LOADU4_MEMBASE;
13233 return OP_LOAD_MEMBASE;
13234 case CEE_LDIND_REF:
13235 return OP_LOAD_MEMBASE;
13237 return OP_LOADI8_MEMBASE;
13239 return OP_LOADR4_MEMBASE;
13241 return OP_LOADR8_MEMBASE;
13243 g_assert_not_reached ();
13250 stind_to_store_membase (int opcode)
13254 return OP_STOREI1_MEMBASE_REG;
13256 return OP_STOREI2_MEMBASE_REG;
13258 return OP_STOREI4_MEMBASE_REG;
13260 case CEE_STIND_REF:
13261 return OP_STORE_MEMBASE_REG;
13263 return OP_STOREI8_MEMBASE_REG;
13265 return OP_STORER4_MEMBASE_REG;
13267 return OP_STORER8_MEMBASE_REG;
13269 g_assert_not_reached ();
13276 mono_load_membase_to_load_mem (int opcode)
13278 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13279 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13281 case OP_LOAD_MEMBASE:
13282 return OP_LOAD_MEM;
13283 case OP_LOADU1_MEMBASE:
13284 return OP_LOADU1_MEM;
13285 case OP_LOADU2_MEMBASE:
13286 return OP_LOADU2_MEM;
13287 case OP_LOADI4_MEMBASE:
13288 return OP_LOADI4_MEM;
13289 case OP_LOADU4_MEMBASE:
13290 return OP_LOADU4_MEM;
13291 #if SIZEOF_REGISTER == 8
13292 case OP_LOADI8_MEMBASE:
13293 return OP_LOADI8_MEM;
13302 op_to_op_dest_membase (int store_opcode, int opcode)
13304 #if defined(TARGET_X86)
13305 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13310 return OP_X86_ADD_MEMBASE_REG;
13312 return OP_X86_SUB_MEMBASE_REG;
13314 return OP_X86_AND_MEMBASE_REG;
13316 return OP_X86_OR_MEMBASE_REG;
13318 return OP_X86_XOR_MEMBASE_REG;
13321 return OP_X86_ADD_MEMBASE_IMM;
13324 return OP_X86_SUB_MEMBASE_IMM;
13327 return OP_X86_AND_MEMBASE_IMM;
13330 return OP_X86_OR_MEMBASE_IMM;
13333 return OP_X86_XOR_MEMBASE_IMM;
13339 #if defined(TARGET_AMD64)
13340 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13345 return OP_X86_ADD_MEMBASE_REG;
13347 return OP_X86_SUB_MEMBASE_REG;
13349 return OP_X86_AND_MEMBASE_REG;
13351 return OP_X86_OR_MEMBASE_REG;
13353 return OP_X86_XOR_MEMBASE_REG;
13355 return OP_X86_ADD_MEMBASE_IMM;
13357 return OP_X86_SUB_MEMBASE_IMM;
13359 return OP_X86_AND_MEMBASE_IMM;
13361 return OP_X86_OR_MEMBASE_IMM;
13363 return OP_X86_XOR_MEMBASE_IMM;
13365 return OP_AMD64_ADD_MEMBASE_REG;
13367 return OP_AMD64_SUB_MEMBASE_REG;
13369 return OP_AMD64_AND_MEMBASE_REG;
13371 return OP_AMD64_OR_MEMBASE_REG;
13373 return OP_AMD64_XOR_MEMBASE_REG;
13376 return OP_AMD64_ADD_MEMBASE_IMM;
13379 return OP_AMD64_SUB_MEMBASE_IMM;
13382 return OP_AMD64_AND_MEMBASE_IMM;
13385 return OP_AMD64_OR_MEMBASE_IMM;
13388 return OP_AMD64_XOR_MEMBASE_IMM;
13398 op_to_op_store_membase (int store_opcode, int opcode)
13400 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13403 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13404 return OP_X86_SETEQ_MEMBASE;
13406 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13407 return OP_X86_SETNE_MEMBASE;
13415 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13418 /* FIXME: This has sign extension issues */
13420 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13421 return OP_X86_COMPARE_MEMBASE8_IMM;
13424 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13429 return OP_X86_PUSH_MEMBASE;
13430 case OP_COMPARE_IMM:
13431 case OP_ICOMPARE_IMM:
13432 return OP_X86_COMPARE_MEMBASE_IMM;
13435 return OP_X86_COMPARE_MEMBASE_REG;
13439 #ifdef TARGET_AMD64
13440 /* FIXME: This has sign extension issues */
13442 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13443 return OP_X86_COMPARE_MEMBASE8_IMM;
13448 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13449 return OP_X86_PUSH_MEMBASE;
13451 /* FIXME: This only works for 32 bit immediates
13452 case OP_COMPARE_IMM:
13453 case OP_LCOMPARE_IMM:
13454 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13455 return OP_AMD64_COMPARE_MEMBASE_IMM;
13457 case OP_ICOMPARE_IMM:
13458 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13459 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13463 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13464 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13465 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13466 return OP_AMD64_COMPARE_MEMBASE_REG;
13469 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13470 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13479 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13482 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13488 return OP_X86_COMPARE_REG_MEMBASE;
13490 return OP_X86_ADD_REG_MEMBASE;
13492 return OP_X86_SUB_REG_MEMBASE;
13494 return OP_X86_AND_REG_MEMBASE;
13496 return OP_X86_OR_REG_MEMBASE;
13498 return OP_X86_XOR_REG_MEMBASE;
13502 #ifdef TARGET_AMD64
13503 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13506 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13508 return OP_X86_ADD_REG_MEMBASE;
13510 return OP_X86_SUB_REG_MEMBASE;
13512 return OP_X86_AND_REG_MEMBASE;
13514 return OP_X86_OR_REG_MEMBASE;
13516 return OP_X86_XOR_REG_MEMBASE;
13518 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13522 return OP_AMD64_COMPARE_REG_MEMBASE;
13524 return OP_AMD64_ADD_REG_MEMBASE;
13526 return OP_AMD64_SUB_REG_MEMBASE;
13528 return OP_AMD64_AND_REG_MEMBASE;
13530 return OP_AMD64_OR_REG_MEMBASE;
13532 return OP_AMD64_XOR_REG_MEMBASE;
13541 mono_op_to_op_imm_noemul (int opcode)
13544 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13550 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13557 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13562 return mono_op_to_op_imm (opcode);
13567 * mono_handle_global_vregs:
13569 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13573 mono_handle_global_vregs (MonoCompile *cfg)
13575 gint32 *vreg_to_bb;
13576 MonoBasicBlock *bb;
13579 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13581 #ifdef MONO_ARCH_SIMD_INTRINSICS
13582 if (cfg->uses_simd_intrinsics)
13583 mono_simd_simplify_indirection (cfg);
13586 /* Find local vregs used in more than one bb */
13587 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13588 MonoInst *ins = bb->code;
13589 int block_num = bb->block_num;
13591 if (cfg->verbose_level > 2)
13592 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13595 for (; ins; ins = ins->next) {
13596 const char *spec = INS_INFO (ins->opcode);
13597 int regtype = 0, regindex;
13600 if (G_UNLIKELY (cfg->verbose_level > 2))
13601 mono_print_ins (ins);
13603 g_assert (ins->opcode >= MONO_CEE_LAST);
13605 for (regindex = 0; regindex < 4; regindex ++) {
13608 if (regindex == 0) {
13609 regtype = spec [MONO_INST_DEST];
13610 if (regtype == ' ')
13613 } else if (regindex == 1) {
13614 regtype = spec [MONO_INST_SRC1];
13615 if (regtype == ' ')
13618 } else if (regindex == 2) {
13619 regtype = spec [MONO_INST_SRC2];
13620 if (regtype == ' ')
13623 } else if (regindex == 3) {
13624 regtype = spec [MONO_INST_SRC3];
13625 if (regtype == ' ')
13630 #if SIZEOF_REGISTER == 4
13631 /* In the LLVM case, the long opcodes are not decomposed */
13632 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13634 * Since some instructions reference the original long vreg,
13635 * and some reference the two component vregs, it is quite hard
13636 * to determine when it needs to be global. So be conservative.
13638 if (!get_vreg_to_inst (cfg, vreg)) {
13639 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13641 if (cfg->verbose_level > 2)
13642 printf ("LONG VREG R%d made global.\n", vreg);
13646 * Make the component vregs volatile since the optimizations can
13647 * get confused otherwise.
13649 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13650 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13654 g_assert (vreg != -1);
13656 prev_bb = vreg_to_bb [vreg];
13657 if (prev_bb == 0) {
13658 /* 0 is a valid block num */
13659 vreg_to_bb [vreg] = block_num + 1;
13660 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13661 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13664 if (!get_vreg_to_inst (cfg, vreg)) {
13665 if (G_UNLIKELY (cfg->verbose_level > 2))
13666 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13670 if (vreg_is_ref (cfg, vreg))
13671 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13673 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13676 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13679 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13683 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13686 g_assert_not_reached ();
13690 /* Flag as having been used in more than one bb */
13691 vreg_to_bb [vreg] = -1;
13697 /* If a variable is used in only one bblock, convert it into a local vreg */
13698 for (i = 0; i < cfg->num_varinfo; i++) {
13699 MonoInst *var = cfg->varinfo [i];
13700 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13702 switch (var->type) {
13708 #if SIZEOF_REGISTER == 8
13711 #if !defined(TARGET_X86)
13712 /* Enabling this screws up the fp stack on x86 */
13715 if (mono_arch_is_soft_float ())
13719 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13723 /* Arguments are implicitly global */
13724 /* Putting R4 vars into registers doesn't work currently */
13725 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13726 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13728 * Make that the variable's liveness interval doesn't contain a call, since
13729 * that would cause the lvreg to be spilled, making the whole optimization
13732 /* This is too slow for JIT compilation */
13734 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13736 int def_index, call_index, ins_index;
13737 gboolean spilled = FALSE;
13742 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13743 const char *spec = INS_INFO (ins->opcode);
13745 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13746 def_index = ins_index;
13748 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13749 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13750 if (call_index > def_index) {
13756 if (MONO_IS_CALL (ins))
13757 call_index = ins_index;
13767 if (G_UNLIKELY (cfg->verbose_level > 2))
13768 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13769 var->flags |= MONO_INST_IS_DEAD;
13770 cfg->vreg_to_inst [var->dreg] = NULL;
13777 * Compress the varinfo and vars tables so the liveness computation is faster and
13778 * takes up less space.
13781 for (i = 0; i < cfg->num_varinfo; ++i) {
13782 MonoInst *var = cfg->varinfo [i];
13783 if (pos < i && cfg->locals_start == i)
13784 cfg->locals_start = pos;
13785 if (!(var->flags & MONO_INST_IS_DEAD)) {
13787 cfg->varinfo [pos] = cfg->varinfo [i];
13788 cfg->varinfo [pos]->inst_c0 = pos;
13789 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13790 cfg->vars [pos].idx = pos;
13791 #if SIZEOF_REGISTER == 4
13792 if (cfg->varinfo [pos]->type == STACK_I8) {
13793 /* Modify the two component vars too */
13796 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13797 var1->inst_c0 = pos;
13798 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13799 var1->inst_c0 = pos;
13806 cfg->num_varinfo = pos;
13807 if (cfg->locals_start > cfg->num_varinfo)
13808 cfg->locals_start = cfg->num_varinfo;
13812 * mono_allocate_gsharedvt_vars:
13814 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13815 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13818 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13822 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13824 for (i = 0; i < cfg->num_varinfo; ++i) {
13825 MonoInst *ins = cfg->varinfo [i];
13828 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13829 if (i >= cfg->locals_start) {
13831 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13832 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13833 ins->opcode = OP_GSHAREDVT_LOCAL;
13834 ins->inst_imm = idx;
13837 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13838 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13845 * mono_spill_global_vars:
13847 * Generate spill code for variables which are not allocated to registers,
13848 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13849 * code is generated which could be optimized by the local optimization passes.
13852 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13854 MonoBasicBlock *bb;
13856 int orig_next_vreg;
13857 guint32 *vreg_to_lvreg;
13859 guint32 i, lvregs_len, lvregs_size;
13860 gboolean dest_has_lvreg = FALSE;
13861 MonoStackType stacktypes [128];
13862 MonoInst **live_range_start, **live_range_end;
13863 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13865 *need_local_opts = FALSE;
13867 memset (spec2, 0, sizeof (spec2));
13869 /* FIXME: Move this function to mini.c */
13870 stacktypes ['i'] = STACK_PTR;
13871 stacktypes ['l'] = STACK_I8;
13872 stacktypes ['f'] = STACK_R8;
13873 #ifdef MONO_ARCH_SIMD_INTRINSICS
13874 stacktypes ['x'] = STACK_VTYPE;
13877 #if SIZEOF_REGISTER == 4
13878 /* Create MonoInsts for longs */
13879 for (i = 0; i < cfg->num_varinfo; i++) {
13880 MonoInst *ins = cfg->varinfo [i];
13882 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13883 switch (ins->type) {
13888 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13891 g_assert (ins->opcode == OP_REGOFFSET);
13893 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13895 tree->opcode = OP_REGOFFSET;
13896 tree->inst_basereg = ins->inst_basereg;
13897 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13899 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13901 tree->opcode = OP_REGOFFSET;
13902 tree->inst_basereg = ins->inst_basereg;
13903 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13913 if (cfg->compute_gc_maps) {
13914 /* registers need liveness info even for !non refs */
13915 for (i = 0; i < cfg->num_varinfo; i++) {
13916 MonoInst *ins = cfg->varinfo [i];
13918 if (ins->opcode == OP_REGVAR)
13919 ins->flags |= MONO_INST_GC_TRACK;
13923 /* FIXME: widening and truncation */
13926 * As an optimization, when a variable allocated to the stack is first loaded into
13927 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13928 * the variable again.
13930 orig_next_vreg = cfg->next_vreg;
13931 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13932 lvregs_size = 1024;
13933 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13937 * These arrays contain the first and last instructions accessing a given
13939 * Since we emit bblocks in the same order we process them here, and we
13940 * don't split live ranges, these will precisely describe the live range of
13941 * the variable, i.e. the instruction range where a valid value can be found
13942 * in the variables location.
13943 * The live range is computed using the liveness info computed by the liveness pass.
13944 * We can't use vmv->range, since that is an abstract live range, and we need
13945 * one which is instruction precise.
13946 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13948 /* FIXME: Only do this if debugging info is requested */
13949 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13950 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13951 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13952 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13954 /* Add spill loads/stores */
13955 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13958 if (cfg->verbose_level > 2)
13959 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13961 /* Clear vreg_to_lvreg array */
13962 for (i = 0; i < lvregs_len; i++)
13963 vreg_to_lvreg [lvregs [i]] = 0;
13967 MONO_BB_FOR_EACH_INS (bb, ins) {
13968 const char *spec = INS_INFO (ins->opcode);
13969 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13970 gboolean store, no_lvreg;
13971 int sregs [MONO_MAX_SRC_REGS];
13973 if (G_UNLIKELY (cfg->verbose_level > 2))
13974 mono_print_ins (ins);
13976 if (ins->opcode == OP_NOP)
13980 * We handle LDADDR here as well, since it can only be decomposed
13981 * when variable addresses are known.
13983 if (ins->opcode == OP_LDADDR) {
13984 MonoInst *var = (MonoInst *)ins->inst_p0;
13986 if (var->opcode == OP_VTARG_ADDR) {
13987 /* Happens on SPARC/S390 where vtypes are passed by reference */
13988 MonoInst *vtaddr = var->inst_left;
13989 if (vtaddr->opcode == OP_REGVAR) {
13990 ins->opcode = OP_MOVE;
13991 ins->sreg1 = vtaddr->dreg;
13993 else if (var->inst_left->opcode == OP_REGOFFSET) {
13994 ins->opcode = OP_LOAD_MEMBASE;
13995 ins->inst_basereg = vtaddr->inst_basereg;
13996 ins->inst_offset = vtaddr->inst_offset;
13999 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14000 /* gsharedvt arg passed by ref */
14001 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14003 ins->opcode = OP_LOAD_MEMBASE;
14004 ins->inst_basereg = var->inst_basereg;
14005 ins->inst_offset = var->inst_offset;
14006 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14007 MonoInst *load, *load2, *load3;
14008 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14009 int reg1, reg2, reg3;
14010 MonoInst *info_var = cfg->gsharedvt_info_var;
14011 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14015 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14018 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14020 g_assert (info_var);
14021 g_assert (locals_var);
14023 /* Mark the instruction used to compute the locals var as used */
14024 cfg->gsharedvt_locals_var_ins = NULL;
14026 /* Load the offset */
14027 if (info_var->opcode == OP_REGOFFSET) {
14028 reg1 = alloc_ireg (cfg);
14029 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14030 } else if (info_var->opcode == OP_REGVAR) {
14032 reg1 = info_var->dreg;
14034 g_assert_not_reached ();
14036 reg2 = alloc_ireg (cfg);
14037 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14038 /* Load the locals area address */
14039 reg3 = alloc_ireg (cfg);
14040 if (locals_var->opcode == OP_REGOFFSET) {
14041 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14042 } else if (locals_var->opcode == OP_REGVAR) {
14043 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14045 g_assert_not_reached ();
14047 /* Compute the address */
14048 ins->opcode = OP_PADD;
14052 mono_bblock_insert_before_ins (bb, ins, load3);
14053 mono_bblock_insert_before_ins (bb, load3, load2);
14055 mono_bblock_insert_before_ins (bb, load2, load);
14057 g_assert (var->opcode == OP_REGOFFSET);
14059 ins->opcode = OP_ADD_IMM;
14060 ins->sreg1 = var->inst_basereg;
14061 ins->inst_imm = var->inst_offset;
14064 *need_local_opts = TRUE;
14065 spec = INS_INFO (ins->opcode);
14068 if (ins->opcode < MONO_CEE_LAST) {
14069 mono_print_ins (ins);
14070 g_assert_not_reached ();
14074 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14078 if (MONO_IS_STORE_MEMBASE (ins)) {
14079 tmp_reg = ins->dreg;
14080 ins->dreg = ins->sreg2;
14081 ins->sreg2 = tmp_reg;
14084 spec2 [MONO_INST_DEST] = ' ';
14085 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14086 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14087 spec2 [MONO_INST_SRC3] = ' ';
14089 } else if (MONO_IS_STORE_MEMINDEX (ins))
14090 g_assert_not_reached ();
14095 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14096 printf ("\t %.3s %d", spec, ins->dreg);
14097 num_sregs = mono_inst_get_src_registers (ins, sregs);
14098 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14099 printf (" %d", sregs [srcindex]);
14106 regtype = spec [MONO_INST_DEST];
14107 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14110 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14111 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14112 MonoInst *store_ins;
14114 MonoInst *def_ins = ins;
14115 int dreg = ins->dreg; /* The original vreg */
14117 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14119 if (var->opcode == OP_REGVAR) {
14120 ins->dreg = var->dreg;
14121 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14123 * Instead of emitting a load+store, use a _membase opcode.
14125 g_assert (var->opcode == OP_REGOFFSET);
14126 if (ins->opcode == OP_MOVE) {
14130 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14131 ins->inst_basereg = var->inst_basereg;
14132 ins->inst_offset = var->inst_offset;
14135 spec = INS_INFO (ins->opcode);
14139 g_assert (var->opcode == OP_REGOFFSET);
14141 prev_dreg = ins->dreg;
14143 /* Invalidate any previous lvreg for this vreg */
14144 vreg_to_lvreg [ins->dreg] = 0;
14148 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14150 store_opcode = OP_STOREI8_MEMBASE_REG;
14153 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14155 #if SIZEOF_REGISTER != 8
14156 if (regtype == 'l') {
14157 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14158 mono_bblock_insert_after_ins (bb, ins, store_ins);
14159 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14160 mono_bblock_insert_after_ins (bb, ins, store_ins);
14161 def_ins = store_ins;
14166 g_assert (store_opcode != OP_STOREV_MEMBASE);
14168 /* Try to fuse the store into the instruction itself */
14169 /* FIXME: Add more instructions */
14170 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14171 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14172 ins->inst_imm = ins->inst_c0;
14173 ins->inst_destbasereg = var->inst_basereg;
14174 ins->inst_offset = var->inst_offset;
14175 spec = INS_INFO (ins->opcode);
14176 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14177 ins->opcode = store_opcode;
14178 ins->inst_destbasereg = var->inst_basereg;
14179 ins->inst_offset = var->inst_offset;
14183 tmp_reg = ins->dreg;
14184 ins->dreg = ins->sreg2;
14185 ins->sreg2 = tmp_reg;
14188 spec2 [MONO_INST_DEST] = ' ';
14189 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14190 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14191 spec2 [MONO_INST_SRC3] = ' ';
14193 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14194 // FIXME: The backends expect the base reg to be in inst_basereg
14195 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14197 ins->inst_basereg = var->inst_basereg;
14198 ins->inst_offset = var->inst_offset;
14199 spec = INS_INFO (ins->opcode);
14201 /* printf ("INS: "); mono_print_ins (ins); */
14202 /* Create a store instruction */
14203 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14205 /* Insert it after the instruction */
14206 mono_bblock_insert_after_ins (bb, ins, store_ins);
14208 def_ins = store_ins;
14211 * We can't assign ins->dreg to var->dreg here, since the
14212 * sregs could use it. So set a flag, and do it after
14215 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14216 dest_has_lvreg = TRUE;
14221 if (def_ins && !live_range_start [dreg]) {
14222 live_range_start [dreg] = def_ins;
14223 live_range_start_bb [dreg] = bb;
14226 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14229 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14230 tmp->inst_c1 = dreg;
14231 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14238 num_sregs = mono_inst_get_src_registers (ins, sregs);
14239 for (srcindex = 0; srcindex < 3; ++srcindex) {
14240 regtype = spec [MONO_INST_SRC1 + srcindex];
14241 sreg = sregs [srcindex];
14243 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14244 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14245 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14246 MonoInst *use_ins = ins;
14247 MonoInst *load_ins;
14248 guint32 load_opcode;
14250 if (var->opcode == OP_REGVAR) {
14251 sregs [srcindex] = var->dreg;
14252 //mono_inst_set_src_registers (ins, sregs);
14253 live_range_end [sreg] = use_ins;
14254 live_range_end_bb [sreg] = bb;
14256 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14259 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14260 /* var->dreg is a hreg */
14261 tmp->inst_c1 = sreg;
14262 mono_bblock_insert_after_ins (bb, ins, tmp);
14268 g_assert (var->opcode == OP_REGOFFSET);
14270 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14272 g_assert (load_opcode != OP_LOADV_MEMBASE);
14274 if (vreg_to_lvreg [sreg]) {
14275 g_assert (vreg_to_lvreg [sreg] != -1);
14277 /* The variable is already loaded to an lvreg */
14278 if (G_UNLIKELY (cfg->verbose_level > 2))
14279 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14280 sregs [srcindex] = vreg_to_lvreg [sreg];
14281 //mono_inst_set_src_registers (ins, sregs);
14285 /* Try to fuse the load into the instruction */
14286 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14287 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14288 sregs [0] = var->inst_basereg;
14289 //mono_inst_set_src_registers (ins, sregs);
14290 ins->inst_offset = var->inst_offset;
14291 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14292 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14293 sregs [1] = var->inst_basereg;
14294 //mono_inst_set_src_registers (ins, sregs);
14295 ins->inst_offset = var->inst_offset;
14297 if (MONO_IS_REAL_MOVE (ins)) {
14298 ins->opcode = OP_NOP;
14301 //printf ("%d ", srcindex); mono_print_ins (ins);
14303 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14305 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14306 if (var->dreg == prev_dreg) {
14308 * sreg refers to the value loaded by the load
14309 * emitted below, but we need to use ins->dreg
14310 * since it refers to the store emitted earlier.
14314 g_assert (sreg != -1);
14315 vreg_to_lvreg [var->dreg] = sreg;
14316 if (lvregs_len >= lvregs_size) {
14317 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14318 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14319 lvregs = new_lvregs;
14322 lvregs [lvregs_len ++] = var->dreg;
14326 sregs [srcindex] = sreg;
14327 //mono_inst_set_src_registers (ins, sregs);
14329 #if SIZEOF_REGISTER != 8
14330 if (regtype == 'l') {
14331 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14332 mono_bblock_insert_before_ins (bb, ins, load_ins);
14333 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14334 mono_bblock_insert_before_ins (bb, ins, load_ins);
14335 use_ins = load_ins;
14340 #if SIZEOF_REGISTER == 4
14341 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14343 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14344 mono_bblock_insert_before_ins (bb, ins, load_ins);
14345 use_ins = load_ins;
14349 if (var->dreg < orig_next_vreg) {
14350 live_range_end [var->dreg] = use_ins;
14351 live_range_end_bb [var->dreg] = bb;
14354 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14357 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14358 tmp->inst_c1 = var->dreg;
14359 mono_bblock_insert_after_ins (bb, ins, tmp);
14363 mono_inst_set_src_registers (ins, sregs);
14365 if (dest_has_lvreg) {
14366 g_assert (ins->dreg != -1);
14367 vreg_to_lvreg [prev_dreg] = ins->dreg;
14368 if (lvregs_len >= lvregs_size) {
14369 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14370 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14371 lvregs = new_lvregs;
14374 lvregs [lvregs_len ++] = prev_dreg;
14375 dest_has_lvreg = FALSE;
14379 tmp_reg = ins->dreg;
14380 ins->dreg = ins->sreg2;
14381 ins->sreg2 = tmp_reg;
14384 if (MONO_IS_CALL (ins)) {
14385 /* Clear vreg_to_lvreg array */
14386 for (i = 0; i < lvregs_len; i++)
14387 vreg_to_lvreg [lvregs [i]] = 0;
14389 } else if (ins->opcode == OP_NOP) {
14391 MONO_INST_NULLIFY_SREGS (ins);
14394 if (cfg->verbose_level > 2)
14395 mono_print_ins_index (1, ins);
14398 /* Extend the live range based on the liveness info */
14399 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14400 for (i = 0; i < cfg->num_varinfo; i ++) {
14401 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14403 if (vreg_is_volatile (cfg, vi->vreg))
14404 /* The liveness info is incomplete */
14407 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14408 /* Live from at least the first ins of this bb */
14409 live_range_start [vi->vreg] = bb->code;
14410 live_range_start_bb [vi->vreg] = bb;
14413 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14414 /* Live at least until the last ins of this bb */
14415 live_range_end [vi->vreg] = bb->last_ins;
14416 live_range_end_bb [vi->vreg] = bb;
14423 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14424 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14426 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14427 for (i = 0; i < cfg->num_varinfo; ++i) {
14428 int vreg = MONO_VARINFO (cfg, i)->vreg;
14431 if (live_range_start [vreg]) {
14432 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14434 ins->inst_c1 = vreg;
14435 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14437 if (live_range_end [vreg]) {
14438 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14440 ins->inst_c1 = vreg;
14441 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14442 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14444 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14449 if (cfg->gsharedvt_locals_var_ins) {
14450 /* Nullify if unused */
14451 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14452 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14455 g_free (live_range_start);
14456 g_free (live_range_end);
14457 g_free (live_range_start_bb);
14458 g_free (live_range_end_bb);
14464 * - use 'iadd' instead of 'int_add'
14465 * - handling ovf opcodes: decompose in method_to_ir.
14466 * - unify iregs/fregs
14467 * -> partly done, the missing parts are:
14468 * - a more complete unification would involve unifying the hregs as well, so
14469 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14470 * would no longer map to the machine hregs, so the code generators would need to
14471 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14472 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14473 * fp/non-fp branches speeds it up by about 15%.
14474 * - use sext/zext opcodes instead of shifts
14476 * - get rid of TEMPLOADs if possible and use vregs instead
14477 * - clean up usage of OP_P/OP_ opcodes
14478 * - cleanup usage of DUMMY_USE
14479 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14481 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14482 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14483 * - make sure handle_stack_args () is called before the branch is emitted
14484 * - when the new IR is done, get rid of all unused stuff
14485 * - COMPARE/BEQ as separate instructions or unify them ?
14486 * - keeping them separate allows specialized compare instructions like
14487 * compare_imm, compare_membase
14488 * - most back ends unify fp compare+branch, fp compare+ceq
14489 * - integrate mono_save_args into inline_method
14490 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14491 * - handle long shift opts on 32 bit platforms somehow: they require
14492 * 3 sregs (2 for arg1 and 1 for arg2)
14493 * - make byref a 'normal' type.
14494 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14495 * variable if needed.
14496 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14497 * like inline_method.
14498 * - remove inlining restrictions
14499 * - fix LNEG and enable cfold of INEG
14500 * - generalize x86 optimizations like ldelema as a peephole optimization
14501 * - add store_mem_imm for amd64
14502 * - optimize the loading of the interruption flag in the managed->native wrappers
14503 * - avoid special handling of OP_NOP in passes
14504 * - move code inserting instructions into one function/macro.
14505 * - try a coalescing phase after liveness analysis
14506 * - add float -> vreg conversion + local optimizations on !x86
14507 * - figure out how to handle decomposed branches during optimizations, ie.
14508 * compare+branch, op_jump_table+op_br etc.
14509 * - promote RuntimeXHandles to vregs
14510 * - vtype cleanups:
14511 * - add a NEW_VARLOADA_VREG macro
14512 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14513 * accessing vtype fields.
14514 * - get rid of I8CONST on 64 bit platforms
14515 * - dealing with the increase in code size due to branches created during opcode
14517 * - use extended basic blocks
14518 * - all parts of the JIT
14519 * - handle_global_vregs () && local regalloc
14520 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14521 * - sources of increase in code size:
14524 * - isinst and castclass
14525 * - lvregs not allocated to global registers even if used multiple times
14526 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14528 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14529 * - add all micro optimizations from the old JIT
14530 * - put tree optimizations into the deadce pass
14531 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14532 * specific function.
14533 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14534 * fcompare + branchCC.
14535 * - create a helper function for allocating a stack slot, taking into account
14536 * MONO_CFG_HAS_SPILLUP.
14538 * - merge the ia64 switch changes.
14539 * - optimize mono_regstate2_alloc_int/float.
14540 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14541 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14542 * parts of the tree could be separated by other instructions, killing the tree
14543 * arguments, or stores killing loads etc. Also, should we fold loads into other
14544 * instructions if the result of the load is used multiple times ?
14545 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14546 * - LAST MERGE: 108395.
14547 * - when returning vtypes in registers, generate IR and append it to the end of the
14548 * last bb instead of doing it in the epilog.
14549 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14557 - When to decompose opcodes:
14558 - earlier: this makes some optimizations hard to implement, since the low level IR
14559 no longer contains the neccessary information. But it is easier to do.
14560 - later: harder to implement, enables more optimizations.
14561 - Branches inside bblocks:
14562 - created when decomposing complex opcodes.
14563 - branches to another bblock: harmless, but not tracked by the branch
14564 optimizations, so need to branch to a label at the start of the bblock.
14565 - branches to inside the same bblock: very problematic, trips up the local
14566 reg allocator. Can be fixed by spitting the current bblock, but that is a
14567 complex operation, since some local vregs can become global vregs etc.
14568 - Local/global vregs:
14569 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14570 local register allocator.
14571 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14572 structure, created by mono_create_var (). Assigned to hregs or the stack by
14573 the global register allocator.
14574 - When to do optimizations like alu->alu_imm:
14575 - earlier -> saves work later on since the IR will be smaller/simpler
14576 - later -> can work on more instructions
14577 - Handling of valuetypes:
14578 - When a vtype is pushed on the stack, a new temporary is created, an
14579 instruction computing its address (LDADDR) is emitted and pushed on
14580 the stack. Need to optimize cases when the vtype is used immediately as in
14581 argument passing, stloc etc.
14582 - Instead of the to_end stuff in the old JIT, simply call the function handling
14583 the values on the stack before emitting the last instruction of the bb.
14586 #else /* !DISABLE_JIT */
14589 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14593 #endif /* !DISABLE_JIT */