3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 inline static MonoInst*
152 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
158 static MonoMethodSignature *helper_sig_jit_thread_attach;
159 static MonoMethodSignature *helper_sig_get_tls_tramp;
160 static MonoMethodSignature *helper_sig_set_tls_tramp;
162 /* type loading helpers */
163 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
164 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
167 * Instruction metadata
175 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
176 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
182 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 /* keep in sync with the enum in mini.h */
190 #include "mini-ops.h"
195 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
196 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
198 * This should contain the index of the last sreg + 1. This is not the same
199 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
201 const gint8 ins_sreg_counts[] = {
202 #include "mini-ops.h"
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 type = &type->data.generic_class->container_class->byval_arg;
332 g_assert (cfg->gshared);
333 if (mini_type_var_is_vt (type))
336 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
344 mono_print_bb (MonoBasicBlock *bb, const char *msg)
348 GString *str = g_string_new ("");
350 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 g_string_append_printf (str, ", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 g_string_append_printf (str, " ]\n");
358 g_print ("%s", str->str);
359 g_string_free (str, TRUE);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
372 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
373 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
376 static MONO_NEVER_INLINE void
377 break_on_unverified (void)
379 if (mini_get_debug_options ()->break_on_unverified)
383 static MONO_NEVER_INLINE void
384 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
386 char *method_fname = mono_method_full_name (method, TRUE);
387 char *field_fname = mono_field_full_name (field);
388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
389 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
390 g_free (method_fname);
391 g_free (field_fname);
394 static MONO_NEVER_INLINE void
395 inline_failure (MonoCompile *cfg, const char *msg)
397 if (cfg->verbose_level >= 2)
398 printf ("inline failed: %s\n", msg);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
402 static MONO_NEVER_INLINE void
403 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 if (cfg->verbose_level > 2) \
406 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
410 static MONO_NEVER_INLINE void
411 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
413 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
414 if (cfg->verbose_level >= 2)
415 printf ("%s\n", cfg->exception_message);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
420 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
421 * foo<T> (int i) { ldarg.0; box T; }
423 #define UNVERIFIED do { \
424 if (cfg->gsharedvt) { \
425 if (cfg->verbose_level > 2) \
426 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
428 goto exception_exit; \
430 break_on_unverified (); \
434 #define GET_BBLOCK(cfg,tblock,ip) do { \
435 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
437 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
438 NEW_BBLOCK (cfg, (tblock)); \
439 (tblock)->cil_code = (ip); \
440 ADD_BBLOCK (cfg, (tblock)); \
444 #if defined(TARGET_X86) || defined(TARGET_AMD64)
445 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
446 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
447 (dest)->dreg = alloc_ireg_mp ((cfg)); \
448 (dest)->sreg1 = (sr1); \
449 (dest)->sreg2 = (sr2); \
450 (dest)->inst_imm = (imm); \
451 (dest)->backend.shift_amount = (shift); \
452 MONO_ADD_INS ((cfg)->cbb, (dest)); \
456 /* Emit conversions so both operands of a binary opcode are of the same type */
458 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
460 MonoInst *arg1 = *arg1_ref;
461 MonoInst *arg2 = *arg2_ref;
464 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
465 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
468 /* Mixing r4/r8 is allowed by the spec */
469 if (arg1->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
473 conv->type = STACK_R8;
477 if (arg2->type == STACK_R4) {
478 int dreg = alloc_freg (cfg);
480 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
481 conv->type = STACK_R8;
487 #if SIZEOF_REGISTER == 8
488 /* FIXME: Need to add many more cases */
489 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
492 int dr = alloc_preg (cfg);
493 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
494 (ins)->sreg2 = widen->dreg;
499 #define ADD_BINOP(op) do { \
500 MONO_INST_NEW (cfg, ins, (op)); \
502 ins->sreg1 = sp [0]->dreg; \
503 ins->sreg2 = sp [1]->dreg; \
504 type_from_op (cfg, ins, sp [0], sp [1]); \
506 /* Have to insert a widening op */ \
507 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
508 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
509 MONO_ADD_INS ((cfg)->cbb, (ins)); \
510 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
513 #define ADD_UNOP(op) do { \
514 MONO_INST_NEW (cfg, ins, (op)); \
516 ins->sreg1 = sp [0]->dreg; \
517 type_from_op (cfg, ins, sp [0], NULL); \
519 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
520 MONO_ADD_INS ((cfg)->cbb, (ins)); \
521 *sp++ = mono_decompose_opcode (cfg, ins); \
524 #define ADD_BINCOND(next_block) do { \
527 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
528 cmp->sreg1 = sp [0]->dreg; \
529 cmp->sreg2 = sp [1]->dreg; \
530 type_from_op (cfg, cmp, sp [0], sp [1]); \
532 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
533 type_from_op (cfg, ins, sp [0], sp [1]); \
534 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
535 GET_BBLOCK (cfg, tblock, target); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_true_bb = tblock; \
538 if ((next_block)) { \
539 link_bblock (cfg, cfg->cbb, (next_block)); \
540 ins->inst_false_bb = (next_block); \
541 start_new_bblock = 1; \
543 GET_BBLOCK (cfg, tblock, ip); \
544 link_bblock (cfg, cfg->cbb, tblock); \
545 ins->inst_false_bb = tblock; \
546 start_new_bblock = 2; \
548 if (sp != stack_start) { \
549 handle_stack_args (cfg, stack_start, sp - stack_start); \
550 CHECK_UNVERIFIABLE (cfg); \
552 MONO_ADD_INS (cfg->cbb, cmp); \
553 MONO_ADD_INS (cfg->cbb, ins); \
557 * link_bblock: Links two basic blocks
559 * links two basic blocks in the control flow graph, the 'from'
560 * argument is the starting block and the 'to' argument is the block
561 * the control flow ends to after 'from'.
564 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
566 MonoBasicBlock **newa;
570 if (from->cil_code) {
572 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
574 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
577 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
579 printf ("edge from entry to exit\n");
584 for (i = 0; i < from->out_count; ++i) {
585 if (to == from->out_bb [i]) {
591 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
592 for (i = 0; i < from->out_count; ++i) {
593 newa [i] = from->out_bb [i];
601 for (i = 0; i < to->in_count; ++i) {
602 if (from == to->in_bb [i]) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
609 for (i = 0; i < to->in_count; ++i) {
610 newa [i] = to->in_bb [i];
619 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
621 link_bblock (cfg, from, to);
625 * mono_find_block_region:
627 * We mark each basic block with a region ID. We use that to avoid BB
628 * optimizations when blocks are in different regions.
631 * A region token that encodes where this region is, and information
632 * about the clause owner for this block.
634 * The region encodes the try/catch/filter clause that owns this block
635 * as well as the type. -1 is a special value that represents a block
636 * that is in none of try/catch/filter.
639 mono_find_block_region (MonoCompile *cfg, int offset)
641 MonoMethodHeader *header = cfg->header;
642 MonoExceptionClause *clause;
645 for (i = 0; i < header->num_clauses; ++i) {
646 clause = &header->clauses [i];
647 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
648 (offset < (clause->handler_offset)))
649 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
651 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
652 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
653 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
654 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
655 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
657 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
660 for (i = 0; i < header->num_clauses; ++i) {
661 clause = &header->clauses [i];
663 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
664 return ((i + 1) << 8) | clause->flags;
671 ip_in_finally_clause (MonoCompile *cfg, int offset)
673 MonoMethodHeader *header = cfg->header;
674 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
682 if (MONO_OFFSET_IN_HANDLER (clause, offset))
689 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
691 MonoMethodHeader *header = cfg->header;
692 MonoExceptionClause *clause;
696 for (i = 0; i < header->num_clauses; ++i) {
697 clause = &header->clauses [i];
698 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
699 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
700 if (clause->flags == type)
701 res = g_list_append (res, clause);
708 mono_create_spvar_for_region (MonoCompile *cfg, int region)
712 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
717 /* prevent it from being register allocated */
718 var->flags |= MONO_INST_VOLATILE;
720 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
724 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
726 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
730 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
734 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
738 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
739 /* prevent it from being register allocated */
740 var->flags |= MONO_INST_VOLATILE;
742 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
748 * Returns the type used in the eval stack when @type is loaded.
749 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
752 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
756 type = mini_get_underlying_type (type);
757 inst->klass = klass = mono_class_from_mono_type (type);
759 inst->type = STACK_MP;
764 switch (type->type) {
766 inst->type = STACK_INV;
774 inst->type = STACK_I4;
779 case MONO_TYPE_FNPTR:
780 inst->type = STACK_PTR;
782 case MONO_TYPE_CLASS:
783 case MONO_TYPE_STRING:
784 case MONO_TYPE_OBJECT:
785 case MONO_TYPE_SZARRAY:
786 case MONO_TYPE_ARRAY:
787 inst->type = STACK_OBJ;
791 inst->type = STACK_I8;
794 inst->type = cfg->r4_stack_type;
797 inst->type = STACK_R8;
799 case MONO_TYPE_VALUETYPE:
800 if (type->data.klass->enumtype) {
801 type = mono_class_enum_basetype (type->data.klass);
805 inst->type = STACK_VTYPE;
808 case MONO_TYPE_TYPEDBYREF:
809 inst->klass = mono_defaults.typed_reference_class;
810 inst->type = STACK_VTYPE;
812 case MONO_TYPE_GENERICINST:
813 type = &type->data.generic_class->container_class->byval_arg;
817 g_assert (cfg->gshared);
818 if (mini_is_gsharedvt_type (type)) {
819 g_assert (cfg->gsharedvt);
820 inst->type = STACK_VTYPE;
822 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
826 g_error ("unknown type 0x%02x in eval stack type", type->type);
831 * The following tables are used to quickly validate the IL code in type_from_op ().
834 bin_num_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
840 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
848 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
851 /* reduce the size of this table */
853 bin_int_table [STACK_MAX] [STACK_MAX] = {
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
865 bin_comp_table [STACK_MAX] [STACK_MAX] = {
866 /* Inv i L p F & O vt r4 */
868 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
869 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
870 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
871 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
872 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
873 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
874 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
875 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
878 /* reduce the size of this table */
880 shift_table [STACK_MAX] [STACK_MAX] = {
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
892 * Tables to map from the non-specific opcode to the matching
893 * type-specific opcode.
895 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
897 binops_op_map [STACK_MAX] = {
898 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
901 /* handles from CEE_NEG to CEE_CONV_U8 */
903 unops_op_map [STACK_MAX] = {
904 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
907 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
909 ovfops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
913 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
915 ovf2ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
919 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
921 ovf3ops_op_map [STACK_MAX] = {
922 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
925 /* handles from CEE_BEQ to CEE_BLT_UN */
927 beqops_op_map [STACK_MAX] = {
928 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
931 /* handles from CEE_CEQ to CEE_CLT_UN */
933 ceqops_op_map [STACK_MAX] = {
934 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
938 * Sets ins->type (the type on the eval stack) according to the
939 * type of the opcode and the arguments to it.
940 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
942 * FIXME: this function sets ins->type unconditionally in some cases, but
943 * it should set it to invalid for some types (a conv.x on an object)
946 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
948 switch (ins->opcode) {
955 /* FIXME: check unverifiable args for STACK_MP */
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_int_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = shift_table [src1->type] [src2->type];
971 ins->opcode += binops_op_map [ins->type];
976 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
977 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
978 ins->opcode = OP_LCOMPARE;
979 else if (src1->type == STACK_R4)
980 ins->opcode = OP_RCOMPARE;
981 else if (src1->type == STACK_R8)
982 ins->opcode = OP_FCOMPARE;
984 ins->opcode = OP_ICOMPARE;
986 case OP_ICOMPARE_IMM:
987 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
988 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
989 ins->opcode = OP_LCOMPARE_IMM;
1001 ins->opcode += beqops_op_map [src1->type];
1004 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1011 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1012 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = neg_table [src1->type];
1017 ins->opcode += unops_op_map [ins->type];
1020 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1021 ins->type = src1->type;
1023 ins->type = STACK_INV;
1024 ins->opcode += unops_op_map [ins->type];
1030 ins->type = STACK_I4;
1031 ins->opcode += unops_op_map [src1->type];
1034 ins->type = STACK_R8;
1035 switch (src1->type) {
1038 ins->opcode = OP_ICONV_TO_R_UN;
1041 ins->opcode = OP_LCONV_TO_R_UN;
1045 case CEE_CONV_OVF_I1:
1046 case CEE_CONV_OVF_U1:
1047 case CEE_CONV_OVF_I2:
1048 case CEE_CONV_OVF_U2:
1049 case CEE_CONV_OVF_I4:
1050 case CEE_CONV_OVF_U4:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf3ops_op_map [src1->type];
1054 case CEE_CONV_OVF_I_UN:
1055 case CEE_CONV_OVF_U_UN:
1056 ins->type = STACK_PTR;
1057 ins->opcode += ovf2ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I1_UN:
1060 case CEE_CONV_OVF_I2_UN:
1061 case CEE_CONV_OVF_I4_UN:
1062 case CEE_CONV_OVF_U1_UN:
1063 case CEE_CONV_OVF_U2_UN:
1064 case CEE_CONV_OVF_U4_UN:
1065 ins->type = STACK_I4;
1066 ins->opcode += ovf2ops_op_map [src1->type];
1069 ins->type = STACK_PTR;
1070 switch (src1->type) {
1072 ins->opcode = OP_ICONV_TO_U;
1076 #if SIZEOF_VOID_P == 8
1077 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_MOVE;
1083 ins->opcode = OP_LCONV_TO_U;
1086 ins->opcode = OP_FCONV_TO_U;
1092 ins->type = STACK_I8;
1093 ins->opcode += unops_op_map [src1->type];
1095 case CEE_CONV_OVF_I8:
1096 case CEE_CONV_OVF_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += ovf3ops_op_map [src1->type];
1100 case CEE_CONV_OVF_U8_UN:
1101 case CEE_CONV_OVF_I8_UN:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf2ops_op_map [src1->type];
1106 ins->type = cfg->r4_stack_type;
1107 ins->opcode += unops_op_map [src1->type];
1110 ins->type = STACK_R8;
1111 ins->opcode += unops_op_map [src1->type];
1114 ins->type = STACK_R8;
1118 ins->type = STACK_I4;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_CONV_OVF_I:
1123 case CEE_CONV_OVF_U:
1124 ins->type = STACK_PTR;
1125 ins->opcode += ovfops_op_map [src1->type];
1128 case CEE_ADD_OVF_UN:
1130 case CEE_MUL_OVF_UN:
1132 case CEE_SUB_OVF_UN:
1133 ins->type = bin_num_table [src1->type] [src2->type];
1134 ins->opcode += ovfops_op_map [src1->type];
1135 if (ins->type == STACK_R8)
1136 ins->type = STACK_INV;
1138 case OP_LOAD_MEMBASE:
1139 ins->type = STACK_PTR;
1141 case OP_LOADI1_MEMBASE:
1142 case OP_LOADU1_MEMBASE:
1143 case OP_LOADI2_MEMBASE:
1144 case OP_LOADU2_MEMBASE:
1145 case OP_LOADI4_MEMBASE:
1146 case OP_LOADU4_MEMBASE:
1147 ins->type = STACK_PTR;
1149 case OP_LOADI8_MEMBASE:
1150 ins->type = STACK_I8;
1152 case OP_LOADR4_MEMBASE:
1153 ins->type = cfg->r4_stack_type;
1155 case OP_LOADR8_MEMBASE:
1156 ins->type = STACK_R8;
1159 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1163 if (ins->type == STACK_MP)
1164 ins->klass = mono_defaults.object_class;
1169 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1175 param_table [STACK_MAX] [STACK_MAX] = {
1180 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1185 switch (args->type) {
1195 for (i = 0; i < sig->param_count; ++i) {
1196 switch (args [i].type) {
1200 if (!sig->params [i]->byref)
1204 if (sig->params [i]->byref)
1206 switch (sig->params [i]->type) {
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1218 if (sig->params [i]->byref)
1220 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1229 /*if (!param_table [args [i].type] [sig->params [i]->type])
1237 * When we need a pointer to the current domain many times in a method, we
1238 * call mono_domain_get() once and we store the result in a local variable.
1239 * This function returns the variable that represents the MonoDomain*.
1241 inline static MonoInst *
1242 mono_get_domainvar (MonoCompile *cfg)
1244 if (!cfg->domainvar)
1245 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 return cfg->domainvar;
1250 * The got_var contains the address of the Global Offset Table when AOT
1254 mono_get_got_var (MonoCompile *cfg)
1256 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1258 if (!cfg->got_var) {
1259 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 return cfg->got_var;
1265 mono_create_rgctx_var (MonoCompile *cfg)
1267 if (!cfg->rgctx_var) {
1268 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1269 /* force the var to be stack allocated */
1270 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1275 mono_get_vtable_var (MonoCompile *cfg)
1277 g_assert (cfg->gshared);
1279 mono_create_rgctx_var (cfg);
1281 return cfg->rgctx_var;
1285 type_from_stack_type (MonoInst *ins) {
1286 switch (ins->type) {
1287 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1288 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1289 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1290 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1291 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1293 return &ins->klass->this_arg;
1294 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1295 case STACK_VTYPE: return &ins->klass->byval_arg;
1297 g_error ("stack type %d to monotype not handled\n", ins->type);
1302 static G_GNUC_UNUSED int
1303 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1305 t = mono_type_get_underlying_type (t);
1317 case MONO_TYPE_FNPTR:
1319 case MONO_TYPE_CLASS:
1320 case MONO_TYPE_STRING:
1321 case MONO_TYPE_OBJECT:
1322 case MONO_TYPE_SZARRAY:
1323 case MONO_TYPE_ARRAY:
1329 return cfg->r4_stack_type;
1332 case MONO_TYPE_VALUETYPE:
1333 case MONO_TYPE_TYPEDBYREF:
1335 case MONO_TYPE_GENERICINST:
1336 if (mono_type_generic_inst_is_valuetype (t))
1342 g_assert_not_reached ();
1349 array_access_to_klass (int opcode)
1353 return mono_defaults.byte_class;
1355 return mono_defaults.uint16_class;
1358 return mono_defaults.int_class;
1361 return mono_defaults.sbyte_class;
1364 return mono_defaults.int16_class;
1367 return mono_defaults.int32_class;
1369 return mono_defaults.uint32_class;
1372 return mono_defaults.int64_class;
1375 return mono_defaults.single_class;
1378 return mono_defaults.double_class;
1379 case CEE_LDELEM_REF:
1380 case CEE_STELEM_REF:
1381 return mono_defaults.object_class;
1383 g_assert_not_reached ();
1389 * We try to share variables when possible
1392 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1397 /* inlining can result in deeper stacks */
1398 if (slot >= cfg->header->max_stack)
1399 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 pos = ins->type - 1 + slot * STACK_MAX;
1403 switch (ins->type) {
1410 if ((vnum = cfg->intvars [pos]))
1411 return cfg->varinfo [vnum];
1412 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1413 cfg->intvars [pos] = res->inst_c0;
1416 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1422 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1425 * Don't use this if a generic_context is set, since that means AOT can't
1426 * look up the method using just the image+token.
1427 * table == 0 means this is a reference made from a wrapper.
1429 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1430 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1431 jump_info_token->image = image;
1432 jump_info_token->token = token;
1433 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1438 * This function is called to handle items that are left on the evaluation stack
1439 * at basic block boundaries. What happens is that we save the values to local variables
1440 * and we reload them later when first entering the target basic block (with the
1441 * handle_loaded_temps () function).
1442 * A single joint point will use the same variables (stored in the array bb->out_stack or
1443 * bb->in_stack, if the basic block is before or after the joint point).
1445 * This function needs to be called _before_ emitting the last instruction of
1446 * the bb (i.e. before emitting a branch).
1447 * If the stack merge fails at a join point, cfg->unverifiable is set.
1450 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1453 MonoBasicBlock *bb = cfg->cbb;
1454 MonoBasicBlock *outb;
1455 MonoInst *inst, **locals;
1460 if (cfg->verbose_level > 3)
1461 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1462 if (!bb->out_scount) {
1463 bb->out_scount = count;
1464 //printf ("bblock %d has out:", bb->block_num);
1466 for (i = 0; i < bb->out_count; ++i) {
1467 outb = bb->out_bb [i];
1468 /* exception handlers are linked, but they should not be considered for stack args */
1469 if (outb->flags & BB_EXCEPTION_HANDLER)
1471 //printf (" %d", outb->block_num);
1472 if (outb->in_stack) {
1474 bb->out_stack = outb->in_stack;
1480 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1481 for (i = 0; i < count; ++i) {
1483 * try to reuse temps already allocated for this purpouse, if they occupy the same
1484 * stack slot and if they are of the same type.
1485 * This won't cause conflicts since if 'local' is used to
1486 * store one of the values in the in_stack of a bblock, then
1487 * the same variable will be used for the same outgoing stack
1489 * This doesn't work when inlining methods, since the bblocks
1490 * in the inlined methods do not inherit their in_stack from
1491 * the bblock they are inlined to. See bug #58863 for an
1494 if (cfg->inlined_method)
1495 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1497 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1502 for (i = 0; i < bb->out_count; ++i) {
1503 outb = bb->out_bb [i];
1504 /* exception handlers are linked, but they should not be considered for stack args */
1505 if (outb->flags & BB_EXCEPTION_HANDLER)
1507 if (outb->in_scount) {
1508 if (outb->in_scount != bb->out_scount) {
1509 cfg->unverifiable = TRUE;
1512 continue; /* check they are the same locals */
1514 outb->in_scount = count;
1515 outb->in_stack = bb->out_stack;
1518 locals = bb->out_stack;
1520 for (i = 0; i < count; ++i) {
1521 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1522 inst->cil_code = sp [i]->cil_code;
1523 sp [i] = locals [i];
1524 if (cfg->verbose_level > 3)
1525 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1529 * It is possible that the out bblocks already have in_stack assigned, and
1530 * the in_stacks differ. In this case, we will store to all the different
1537 /* Find a bblock which has a different in_stack */
1539 while (bindex < bb->out_count) {
1540 outb = bb->out_bb [bindex];
1541 /* exception handlers are linked, but they should not be considered for stack args */
1542 if (outb->flags & BB_EXCEPTION_HANDLER) {
1546 if (outb->in_stack != locals) {
1547 for (i = 0; i < count; ++i) {
1548 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1549 inst->cil_code = sp [i]->cil_code;
1550 sp [i] = locals [i];
1551 if (cfg->verbose_level > 3)
1552 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1554 locals = outb->in_stack;
1564 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1568 if (cfg->compile_aot) {
1569 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1575 ji.type = patch_type;
1576 ji.data.target = data;
1577 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1578 mono_error_assert_ok (&error);
1580 EMIT_NEW_PCONST (cfg, ins, target);
1586 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1588 return emit_runtime_constant (cfg, patch_type, data);
1592 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1596 g_assert (val == 0);
1601 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1610 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1612 #if SIZEOF_REGISTER == 8
1614 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1620 val_reg = alloc_preg (cfg);
1622 if (SIZEOF_REGISTER == 8)
1623 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1625 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1628 /* This could be optimized further if neccesary */
1630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1637 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1675 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1676 g_assert (size < 10000);
1679 /* This could be optimized further if neccesary */
1681 cur_reg = alloc_preg (cfg);
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1690 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1692 cur_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1702 cur_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1728 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1730 int tls_offset = mono_tls_get_tls_offset (key);
1732 if (cfg->compile_aot)
1735 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1737 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1738 ins->dreg = mono_alloc_preg (cfg);
1739 ins->inst_offset = tls_offset;
1746 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1748 int tls_offset = mono_tls_get_tls_offset (key);
1750 if (cfg->compile_aot)
1753 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1755 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1756 ins->sreg1 = value->dreg;
1757 ins->inst_offset = tls_offset;
1765 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1767 MonoInst *fast_tls = NULL;
1769 if (!mini_get_debug_options ()->use_fallback_tls)
1770 fast_tls = mono_create_fast_tls_getter (cfg, key);
1773 MONO_ADD_INS (cfg->cbb, fast_tls);
1777 if (cfg->compile_aot) {
1780 * tls getters are critical pieces of code and we don't want to resolve them
1781 * through the standard plt/tramp mechanism since we might expose ourselves
1782 * to crashes and infinite recursions.
1784 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1785 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1787 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1788 return mono_emit_jit_icall (cfg, getter, NULL);
1793 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1795 MonoInst *fast_tls = NULL;
1797 if (!mini_get_debug_options ()->use_fallback_tls)
1798 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1801 MONO_ADD_INS (cfg->cbb, fast_tls);
1805 if (cfg->compile_aot) {
1807 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1808 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1810 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1811 return mono_emit_jit_icall (cfg, setter, &value);
1818 * Emit IR to push the current LMF onto the LMF stack.
1821 emit_push_lmf (MonoCompile *cfg)
1824 * Emit IR to push the LMF:
1825 * lmf_addr = <lmf_addr from tls>
1826 * lmf->lmf_addr = lmf_addr
1827 * lmf->prev_lmf = *lmf_addr
1830 MonoInst *ins, *lmf_ins;
1835 int lmf_reg, prev_lmf_reg;
1837 * Store lmf_addr in a variable, so it can be allocated to a global register.
1839 if (!cfg->lmf_addr_var)
1840 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1843 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1845 int jit_tls_dreg = ins->dreg;
1847 lmf_reg = alloc_preg (cfg);
1848 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1850 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1853 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1855 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1856 lmf_reg = ins->dreg;
1858 prev_lmf_reg = alloc_preg (cfg);
1859 /* Save previous_lmf */
1860 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1861 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1863 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1869 * Emit IR to pop the current LMF from the LMF stack.
1872 emit_pop_lmf (MonoCompile *cfg)
1874 int lmf_reg, lmf_addr_reg;
1880 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1881 lmf_reg = ins->dreg;
1885 * Emit IR to pop the LMF:
1886 * *(lmf->lmf_addr) = lmf->prev_lmf
1888 /* This could be called before emit_push_lmf () */
1889 if (!cfg->lmf_addr_var)
1890 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1891 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1893 prev_lmf_reg = alloc_preg (cfg);
1894 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1895 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1899 emit_instrumentation_call (MonoCompile *cfg, void *func)
1901 MonoInst *iargs [1];
1904 * Avoid instrumenting inlined methods since it can
1905 * distort profiling results.
1907 if (cfg->method != cfg->current_method)
1910 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1911 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1912 mono_emit_jit_icall (cfg, func, iargs);
1917 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1920 type = mini_get_underlying_type (type);
1921 switch (type->type) {
1922 case MONO_TYPE_VOID:
1923 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1930 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1934 case MONO_TYPE_FNPTR:
1935 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1936 case MONO_TYPE_CLASS:
1937 case MONO_TYPE_STRING:
1938 case MONO_TYPE_OBJECT:
1939 case MONO_TYPE_SZARRAY:
1940 case MONO_TYPE_ARRAY:
1941 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1944 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1947 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1949 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1951 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1952 case MONO_TYPE_VALUETYPE:
1953 if (type->data.klass->enumtype) {
1954 type = mono_class_enum_basetype (type->data.klass);
1957 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1958 case MONO_TYPE_TYPEDBYREF:
1959 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1960 case MONO_TYPE_GENERICINST:
1961 type = &type->data.generic_class->container_class->byval_arg;
1964 case MONO_TYPE_MVAR:
1966 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1968 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1973 //XXX this ignores if t is byref
1974 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1977 * target_type_is_incompatible:
1978 * @cfg: MonoCompile context
1980 * Check that the item @arg on the evaluation stack can be stored
1981 * in the target type (can be a local, or field, etc).
1982 * The cfg arg can be used to check if we need verification or just
1985 * Returns: non-0 value if arg can't be stored on a target.
1988 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1990 MonoType *simple_type;
1993 if (target->byref) {
1994 /* FIXME: check that the pointed to types match */
1995 if (arg->type == STACK_MP) {
1996 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1997 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1998 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2000 /* if the target is native int& or same type */
2001 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2004 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2005 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2006 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2010 if (arg->type == STACK_PTR)
2015 simple_type = mini_get_underlying_type (target);
2016 switch (simple_type->type) {
2017 case MONO_TYPE_VOID:
2025 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2029 /* STACK_MP is needed when setting pinned locals */
2030 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2035 case MONO_TYPE_FNPTR:
2037 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2038 * in native int. (#688008).
2040 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2043 case MONO_TYPE_CLASS:
2044 case MONO_TYPE_STRING:
2045 case MONO_TYPE_OBJECT:
2046 case MONO_TYPE_SZARRAY:
2047 case MONO_TYPE_ARRAY:
2048 if (arg->type != STACK_OBJ)
2050 /* FIXME: check type compatibility */
2054 if (arg->type != STACK_I8)
2058 if (arg->type != cfg->r4_stack_type)
2062 if (arg->type != STACK_R8)
2065 case MONO_TYPE_VALUETYPE:
2066 if (arg->type != STACK_VTYPE)
2068 klass = mono_class_from_mono_type (simple_type);
2069 if (klass != arg->klass)
2072 case MONO_TYPE_TYPEDBYREF:
2073 if (arg->type != STACK_VTYPE)
2075 klass = mono_class_from_mono_type (simple_type);
2076 if (klass != arg->klass)
2079 case MONO_TYPE_GENERICINST:
2080 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2081 MonoClass *target_class;
2082 if (arg->type != STACK_VTYPE)
2084 klass = mono_class_from_mono_type (simple_type);
2085 target_class = mono_class_from_mono_type (target);
2086 /* The second cases is needed when doing partial sharing */
2087 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2091 if (arg->type != STACK_OBJ)
2093 /* FIXME: check type compatibility */
2097 case MONO_TYPE_MVAR:
2098 g_assert (cfg->gshared);
2099 if (mini_type_var_is_vt (simple_type)) {
2100 if (arg->type != STACK_VTYPE)
2103 if (arg->type != STACK_OBJ)
2108 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2114 * Prepare arguments for passing to a function call.
2115 * Return a non-zero value if the arguments can't be passed to the given
2117 * The type checks are not yet complete and some conversions may need
2118 * casts on 32 or 64 bit architectures.
2120 * FIXME: implement this using target_type_is_incompatible ()
2123 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2125 MonoType *simple_type;
2129 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2133 for (i = 0; i < sig->param_count; ++i) {
2134 if (sig->params [i]->byref) {
2135 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2139 simple_type = mini_get_underlying_type (sig->params [i]);
2141 switch (simple_type->type) {
2142 case MONO_TYPE_VOID:
2151 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2157 case MONO_TYPE_FNPTR:
2158 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2161 case MONO_TYPE_CLASS:
2162 case MONO_TYPE_STRING:
2163 case MONO_TYPE_OBJECT:
2164 case MONO_TYPE_SZARRAY:
2165 case MONO_TYPE_ARRAY:
2166 if (args [i]->type != STACK_OBJ)
2171 if (args [i]->type != STACK_I8)
2175 if (args [i]->type != cfg->r4_stack_type)
2179 if (args [i]->type != STACK_R8)
2182 case MONO_TYPE_VALUETYPE:
2183 if (simple_type->data.klass->enumtype) {
2184 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2187 if (args [i]->type != STACK_VTYPE)
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (args [i]->type != STACK_VTYPE)
2194 case MONO_TYPE_GENERICINST:
2195 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2198 case MONO_TYPE_MVAR:
2200 if (args [i]->type != STACK_VTYPE)
2204 g_error ("unknown type 0x%02x in check_call_signature",
2212 callvirt_to_call (int opcode)
2215 case OP_CALL_MEMBASE:
2217 case OP_VOIDCALL_MEMBASE:
2219 case OP_FCALL_MEMBASE:
2221 case OP_RCALL_MEMBASE:
2223 case OP_VCALL_MEMBASE:
2225 case OP_LCALL_MEMBASE:
2228 g_assert_not_reached ();
2235 callvirt_to_call_reg (int opcode)
2238 case OP_CALL_MEMBASE:
2240 case OP_VOIDCALL_MEMBASE:
2241 return OP_VOIDCALL_REG;
2242 case OP_FCALL_MEMBASE:
2243 return OP_FCALL_REG;
2244 case OP_RCALL_MEMBASE:
2245 return OP_RCALL_REG;
2246 case OP_VCALL_MEMBASE:
2247 return OP_VCALL_REG;
2248 case OP_LCALL_MEMBASE:
2249 return OP_LCALL_REG;
2251 g_assert_not_reached ();
2257 /* Either METHOD or IMT_ARG needs to be set */
2259 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2263 if (COMPILE_LLVM (cfg)) {
2265 method_reg = alloc_preg (cfg);
2266 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2268 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2269 method_reg = ins->dreg;
2273 call->imt_arg_reg = method_reg;
2275 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2280 method_reg = alloc_preg (cfg);
2281 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2283 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2284 method_reg = ins->dreg;
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2290 static MonoJumpInfo *
2291 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2293 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2297 ji->data.target = target;
2303 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2306 return mono_class_check_context_used (klass);
2312 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2315 return mono_method_check_context_used (method);
2321 * check_method_sharing:
2323 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2326 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2328 gboolean pass_vtable = FALSE;
2329 gboolean pass_mrgctx = FALSE;
2331 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2332 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2333 gboolean sharable = FALSE;
2335 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2339 * Pass vtable iff target method might
2340 * be shared, which means that sharing
2341 * is enabled for its class and its
2342 * context is sharable (and it's not a
2345 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2349 if (mini_method_get_context (cmethod) &&
2350 mini_method_get_context (cmethod)->method_inst) {
2351 g_assert (!pass_vtable);
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2356 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2361 if (out_pass_vtable)
2362 *out_pass_vtable = pass_vtable;
2363 if (out_pass_mrgctx)
2364 *out_pass_mrgctx = pass_mrgctx;
2367 inline static MonoCallInst *
2368 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2369 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2373 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2381 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2383 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2385 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2388 call->signature = sig;
2389 call->rgctx_reg = rgctx;
2390 sig_ret = mini_get_underlying_type (sig->ret);
2392 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2395 if (mini_type_is_vtype (sig_ret)) {
2396 call->vret_var = cfg->vret_addr;
2397 //g_assert_not_reached ();
2399 } else if (mini_type_is_vtype (sig_ret)) {
2400 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2403 temp->backend.is_pinvoke = sig->pinvoke;
2406 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2407 * address of return value to increase optimization opportunities.
2408 * Before vtype decomposition, the dreg of the call ins itself represents the
2409 * fact the call modifies the return value. After decomposition, the call will
2410 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2411 * will be transformed into an LDADDR.
2413 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2414 loada->dreg = alloc_preg (cfg);
2415 loada->inst_p0 = temp;
2416 /* We reference the call too since call->dreg could change during optimization */
2417 loada->inst_p1 = call;
2418 MONO_ADD_INS (cfg->cbb, loada);
2420 call->inst.dreg = temp->dreg;
2422 call->vret_var = loada;
2423 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2424 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2426 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2427 if (COMPILE_SOFT_FLOAT (cfg)) {
2429 * If the call has a float argument, we would need to do an r8->r4 conversion using
2430 * an icall, but that cannot be done during the call sequence since it would clobber
2431 * the call registers + the stack. So we do it before emitting the call.
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2435 MonoInst *in = call->args [i];
2437 if (i >= sig->hasthis)
2438 t = sig->params [i - sig->hasthis];
2440 t = &mono_defaults.int_class->byval_arg;
2441 t = mono_type_get_underlying_type (t);
2443 if (!t->byref && t->type == MONO_TYPE_R4) {
2444 MonoInst *iargs [1];
2448 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2450 /* The result will be in an int vreg */
2451 call->args [i] = conv;
2457 call->need_unbox_trampoline = unbox_trampoline;
2460 if (COMPILE_LLVM (cfg))
2461 mono_llvm_emit_call (cfg, call);
2463 mono_arch_emit_call (cfg, call);
2465 mono_arch_emit_call (cfg, call);
2468 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2469 cfg->flags |= MONO_CFG_HAS_CALLS;
2475 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2477 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2478 cfg->uses_rgctx_reg = TRUE;
2479 call->rgctx_reg = TRUE;
2481 call->rgctx_arg_reg = rgctx_reg;
2485 inline static MonoInst*
2486 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2491 gboolean check_sp = FALSE;
2493 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2494 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2496 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2501 rgctx_reg = mono_alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (!cfg->stack_inbalance_var)
2507 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2509 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2510 ins->dreg = cfg->stack_inbalance_var->dreg;
2511 MONO_ADD_INS (cfg->cbb, ins);
2514 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2516 call->inst.sreg1 = addr->dreg;
2519 emit_imt_argument (cfg, call, NULL, imt_arg);
2521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 sp_reg = mono_alloc_preg (cfg);
2528 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2530 MONO_ADD_INS (cfg->cbb, ins);
2532 /* Restore the stack so we don't crash when throwing the exception */
2533 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2534 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2535 MONO_ADD_INS (cfg->cbb, ins);
2537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2538 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2542 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2544 return (MonoInst*)call;
2548 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2551 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2554 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2555 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2557 #ifndef DISABLE_REMOTING
2558 gboolean might_be_remote = FALSE;
2560 gboolean virtual_ = this_ins != NULL;
2561 gboolean enable_for_aot = TRUE;
2564 MonoInst *call_target = NULL;
2566 gboolean need_unbox_trampoline;
2569 sig = mono_method_signature (method);
2571 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2572 g_assert_not_reached ();
2575 rgctx_reg = mono_alloc_preg (cfg);
2576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2579 if (method->string_ctor) {
2580 /* Create the real signature */
2581 /* FIXME: Cache these */
2582 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2583 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2588 context_used = mini_method_check_context_used (cfg, method);
2590 #ifndef DISABLE_REMOTING
2591 might_be_remote = this_ins && sig->hasthis &&
2592 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2593 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2595 if (might_be_remote && context_used) {
2598 g_assert (cfg->gshared);
2600 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2602 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2606 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2607 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2609 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2611 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2613 #ifndef DISABLE_REMOTING
2614 if (might_be_remote)
2615 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2618 call->method = method;
2619 call->inst.flags |= MONO_INST_HAS_METHOD;
2620 call->inst.inst_left = this_ins;
2621 call->tail_call = tail;
2624 int vtable_reg, slot_reg, this_reg;
2627 this_reg = this_ins->dreg;
2629 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2630 MonoInst *dummy_use;
2632 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2634 /* Make a call to delegate->invoke_impl */
2635 call->inst.inst_basereg = this_reg;
2636 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2637 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2639 /* We must emit a dummy use here because the delegate trampoline will
2640 replace the 'this' argument with the delegate target making this activation
2641 no longer a root for the delegate.
2642 This is an issue for delegates that target collectible code such as dynamic
2643 methods of GC'able assemblies.
2645 For a test case look into #667921.
2647 FIXME: a dummy use is not the best way to do it as the local register allocator
2648 will put it on a caller save register and spil it around the call.
2649 Ideally, we would either put it on a callee save register or only do the store part.
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2653 return (MonoInst*)call;
2656 if ((!cfg->compile_aot || enable_for_aot) &&
2657 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2658 (MONO_METHOD_IS_FINAL (method) &&
2659 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2660 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2662 * the method is not virtual, we just need to ensure this is not null
2663 * and then we can call the method directly.
2665 #ifndef DISABLE_REMOTING
2666 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2668 * The check above ensures method is not gshared, this is needed since
2669 * gshared methods can't have wrappers.
2671 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2675 if (!method->string_ctor)
2676 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2678 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2679 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2681 * the method is virtual, but we can statically dispatch since either
2682 * it's class or the method itself are sealed.
2683 * But first we need to ensure it's not a null reference.
2685 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2687 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2688 } else if (call_target) {
2689 vtable_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2692 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2693 call->inst.sreg1 = call_target->dreg;
2694 call->inst.flags &= !MONO_INST_HAS_METHOD;
2696 vtable_reg = alloc_preg (cfg);
2697 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2698 if (mono_class_is_interface (method->klass)) {
2699 guint32 imt_slot = mono_method_get_imt_slot (method);
2700 emit_imt_argument (cfg, call, call->method, imt_arg);
2701 slot_reg = vtable_reg;
2702 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2704 slot_reg = vtable_reg;
2705 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2706 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2708 g_assert (mono_method_signature (method)->generic_param_count);
2709 emit_imt_argument (cfg, call, call->method, imt_arg);
2713 call->inst.sreg1 = slot_reg;
2714 call->inst.inst_offset = offset;
2715 call->is_virtual = TRUE;
2719 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2730 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2734 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2741 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2744 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2746 return (MonoInst*)call;
2750 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2752 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2756 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2760 * mono_emit_abs_call:
2762 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2764 inline static MonoInst*
2765 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2766 MonoMethodSignature *sig, MonoInst **args)
2768 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2772 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2775 if (cfg->abs_patches == NULL)
2776 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2777 g_hash_table_insert (cfg->abs_patches, ji, ji);
2778 ins = mono_emit_native_call (cfg, ji, sig, args);
2779 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2783 static MonoMethodSignature*
2784 sig_to_rgctx_sig (MonoMethodSignature *sig)
2786 // FIXME: memory allocation
2787 MonoMethodSignature *res;
2790 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2791 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2792 res->param_count = sig->param_count + 1;
2793 for (i = 0; i < sig->param_count; ++i)
2794 res->params [i] = sig->params [i];
2795 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2799 /* Make an indirect call to FSIG passing an additional argument */
2801 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2803 MonoMethodSignature *csig;
2804 MonoInst *args_buf [16];
2806 int i, pindex, tmp_reg;
2808 /* Make a call with an rgctx/extra arg */
2809 if (fsig->param_count + 2 < 16)
2812 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2815 args [pindex ++] = orig_args [0];
2816 for (i = 0; i < fsig->param_count; ++i)
2817 args [pindex ++] = orig_args [fsig->hasthis + i];
2818 tmp_reg = alloc_preg (cfg);
2819 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2820 csig = sig_to_rgctx_sig (fsig);
2821 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2824 /* Emit an indirect call to the function descriptor ADDR */
2826 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2828 int addr_reg, arg_reg;
2829 MonoInst *call_target;
2831 g_assert (cfg->llvm_only);
2834 * addr points to a <addr, arg> pair, load both of them, and
2835 * make a call to addr, passing arg as an extra arg.
2837 addr_reg = alloc_preg (cfg);
2838 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2839 arg_reg = alloc_preg (cfg);
2840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2842 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2846 direct_icalls_enabled (MonoCompile *cfg)
2850 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2852 if (cfg->compile_llvm && !cfg->llvm_only)
2855 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2861 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2864 * Call the jit icall without a wrapper if possible.
2865 * The wrapper is needed for the following reasons:
2866 * - to handle exceptions thrown using mono_raise_exceptions () from the
2867 * icall function. The EH code needs the lmf frame pushed by the
2868 * wrapper to be able to unwind back to managed code.
2869 * - to be able to do stack walks for asynchronously suspended
2870 * threads when debugging.
2872 if (info->no_raise && direct_icalls_enabled (cfg)) {
2876 if (!info->wrapper_method) {
2877 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2878 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2880 mono_memory_barrier ();
2884 * Inline the wrapper method, which is basically a call to the C icall, and
2885 * an exception check.
2887 costs = inline_method (cfg, info->wrapper_method, NULL,
2888 args, NULL, il_offset, TRUE);
2889 g_assert (costs > 0);
2890 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2894 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2899 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2901 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2902 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2906 * Native code might return non register sized integers
2907 * without initializing the upper bits.
2909 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2910 case OP_LOADI1_MEMBASE:
2911 widen_op = OP_ICONV_TO_I1;
2913 case OP_LOADU1_MEMBASE:
2914 widen_op = OP_ICONV_TO_U1;
2916 case OP_LOADI2_MEMBASE:
2917 widen_op = OP_ICONV_TO_I2;
2919 case OP_LOADU2_MEMBASE:
2920 widen_op = OP_ICONV_TO_U2;
2926 if (widen_op != -1) {
2927 int dreg = alloc_preg (cfg);
2930 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2931 widen->type = ins->type;
2942 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2944 MonoInst *args [16];
2946 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2947 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2949 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2997 if (!cfg->gen_write_barriers)
3000 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3002 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3004 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3007 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3008 wbarrier->sreg1 = ptr->dreg;
3009 wbarrier->sreg2 = value->dreg;
3010 MONO_ADD_INS (cfg->cbb, wbarrier);
3011 } else if (card_table) {
3012 int offset_reg = alloc_preg (cfg);
3017 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
3018 * collector case, so, for the serial collector, it might slightly slow down nursery
3019 * collections. We also expect that the host system and the target system have the same card
3020 * table configuration, which is the case if they have the same pointer size.
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3024 if (card_table_mask)
3025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3027 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3028 * IMM's larger than 32bits.
3030 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3031 card_reg = ins->dreg;
3033 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3034 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3036 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3037 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3040 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3044 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3046 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3047 unsigned need_wb = 0;
3052 /*types with references can't have alignment smaller than sizeof(void*) */
3053 if (align < SIZEOF_VOID_P)
3056 if (size > 5 * SIZEOF_VOID_P)
3059 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3061 destreg = iargs [0]->dreg;
3062 srcreg = iargs [1]->dreg;
3065 dest_ptr_reg = alloc_preg (cfg);
3066 tmp_reg = alloc_preg (cfg);
3069 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3071 while (size >= SIZEOF_VOID_P) {
3072 MonoInst *load_inst;
3073 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3074 load_inst->dreg = tmp_reg;
3075 load_inst->inst_basereg = srcreg;
3076 load_inst->inst_offset = offset;
3077 MONO_ADD_INS (cfg->cbb, load_inst);
3079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3082 emit_write_barrier (cfg, iargs [0], load_inst);
3084 offset += SIZEOF_VOID_P;
3085 size -= SIZEOF_VOID_P;
3088 /*tmp += sizeof (void*)*/
3089 if (size >= SIZEOF_VOID_P) {
3090 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3091 MONO_ADD_INS (cfg->cbb, iargs [0]);
3095 /* Those cannot be references since size < sizeof (void*) */
3097 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3098 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3112 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3121 * Emit code to copy a valuetype of type @klass whose address is stored in
3122 * @src->dreg to memory whose address is stored at @dest->dreg.
3125 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3127 MonoInst *iargs [4];
3130 MonoMethod *memcpy_method;
3131 MonoInst *size_ins = NULL;
3132 MonoInst *memcpy_ins = NULL;
3136 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3139 * This check breaks with spilled vars... need to handle it during verification anyway.
3140 * g_assert (klass && klass == src->klass && klass == dest->klass);
3143 if (mini_is_gsharedvt_klass (klass)) {
3145 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3146 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3150 n = mono_class_native_size (klass, &align);
3152 n = mono_class_value_size (klass, &align);
3155 align = SIZEOF_VOID_P;
3156 /* if native is true there should be no references in the struct */
3157 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3158 /* Avoid barriers when storing to the stack */
3159 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3160 (dest->opcode == OP_LDADDR))) {
3166 context_used = mini_class_check_context_used (cfg, klass);
3168 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3169 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3171 } else if (size_ins || align < SIZEOF_VOID_P) {
3173 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3175 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3176 if (!cfg->compile_aot)
3177 mono_class_compute_gc_descriptor (klass);
3180 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3182 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3184 /* We don't unroll more than 5 stores to avoid code bloat. */
3185 /*This is harmless and simplify mono_gc_get_range_copy_func */
3186 n += (SIZEOF_VOID_P - 1);
3187 n &= ~(SIZEOF_VOID_P - 1);
3189 EMIT_NEW_ICONST (cfg, iargs [2], n);
3190 mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs);
3195 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3196 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3197 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3202 iargs [2] = size_ins;
3204 EMIT_NEW_ICONST (cfg, iargs [2], n);
3206 memcpy_method = get_memcpy_method ();
3208 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3210 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3215 get_memset_method (void)
3217 static MonoMethod *memset_method = NULL;
3218 if (!memset_method) {
3219 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3221 g_error ("Old corlib found. Install a new one");
3223 return memset_method;
3227 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3229 MonoInst *iargs [3];
3232 MonoMethod *memset_method;
3233 MonoInst *size_ins = NULL;
3234 MonoInst *bzero_ins = NULL;
3235 static MonoMethod *bzero_method;
3237 /* FIXME: Optimize this for the case when dest is an LDADDR */
3238 mono_class_init (klass);
3239 if (mini_is_gsharedvt_klass (klass)) {
3240 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3241 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3243 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3244 g_assert (bzero_method);
3246 iargs [1] = size_ins;
3247 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3251 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3253 n = mono_class_value_size (klass, &align);
3255 if (n <= sizeof (gpointer) * 8) {
3256 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3259 memset_method = get_memset_method ();
3261 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3262 EMIT_NEW_ICONST (cfg, iargs [2], n);
3263 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3270 * Emit IR to return either the this pointer for instance method,
3271 * or the mrgctx for static methods.
3274 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3276 MonoInst *this_ins = NULL;
3278 g_assert (cfg->gshared);
3280 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3281 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3282 !method->klass->valuetype)
3283 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3285 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3286 MonoInst *mrgctx_loc, *mrgctx_var;
3288 g_assert (!this_ins);
3289 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3291 mrgctx_loc = mono_get_vtable_var (cfg);
3292 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3295 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
3296 MonoInst *mrgctx_loc, *mrgctx_var;
3298 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
3299 mrgctx_loc = mono_get_vtable_var (cfg);
3300 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3302 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
3305 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3306 MonoInst *vtable_loc, *vtable_var;
3308 g_assert (!this_ins);
3310 vtable_loc = mono_get_vtable_var (cfg);
3311 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3313 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3314 MonoInst *mrgctx_var = vtable_var;
3317 vtable_reg = alloc_preg (cfg);
3318 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3319 vtable_var->type = STACK_PTR;
3327 vtable_reg = alloc_preg (cfg);
3328 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3333 static MonoJumpInfoRgctxEntry *
3334 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3336 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3337 res->method = method;
3338 res->in_mrgctx = in_mrgctx;
3339 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3340 res->data->type = patch_type;
3341 res->data->data.target = patch_data;
3342 res->info_type = info_type;
3347 static inline MonoInst*
3348 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3350 MonoInst *args [16];
3353 // FIXME: No fastpath since the slot is not a compile time constant
3355 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3356 if (entry->in_mrgctx)
3357 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3359 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3363 * FIXME: This can be called during decompose, which is a problem since it creates
3365 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3367 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3369 MonoBasicBlock *is_null_bb, *end_bb;
3370 MonoInst *res, *ins, *call;
3373 slot = mini_get_rgctx_entry_slot (entry);
3375 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3376 index = MONO_RGCTX_SLOT_INDEX (slot);
3378 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3379 for (depth = 0; ; ++depth) {
3380 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3382 if (index < size - 1)
3387 NEW_BBLOCK (cfg, end_bb);
3388 NEW_BBLOCK (cfg, is_null_bb);
3391 rgctx_reg = rgctx->dreg;
3393 rgctx_reg = alloc_preg (cfg);
3395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3396 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3397 NEW_BBLOCK (cfg, is_null_bb);
3399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3400 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3403 for (i = 0; i < depth; ++i) {
3404 int array_reg = alloc_preg (cfg);
3406 /* load ptr to next array */
3407 if (mrgctx && i == 0)
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3411 rgctx_reg = array_reg;
3412 /* is the ptr null? */
3413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3414 /* if yes, jump to actual trampoline */
3415 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3419 val_reg = alloc_preg (cfg);
3420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3421 /* is the slot null? */
3422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3423 /* if yes, jump to actual trampoline */
3424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3427 res_reg = alloc_preg (cfg);
3428 MONO_INST_NEW (cfg, ins, OP_MOVE);
3429 ins->dreg = res_reg;
3430 ins->sreg1 = val_reg;
3431 MONO_ADD_INS (cfg->cbb, ins);
3433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3436 MONO_START_BB (cfg, is_null_bb);
3438 EMIT_NEW_ICONST (cfg, args [1], index);
3440 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3442 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3443 MONO_INST_NEW (cfg, ins, OP_MOVE);
3444 ins->dreg = res_reg;
3445 ins->sreg1 = call->dreg;
3446 MONO_ADD_INS (cfg->cbb, ins);
3447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3449 MONO_START_BB (cfg, end_bb);
3458 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3461 static inline MonoInst*
3462 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3465 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3467 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3471 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3472 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3474 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3475 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3477 return emit_rgctx_fetch (cfg, rgctx, entry);
3481 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3482 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3484 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3485 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3487 return emit_rgctx_fetch (cfg, rgctx, entry);
3491 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3492 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3494 MonoJumpInfoGSharedVtCall *call_info;
3495 MonoJumpInfoRgctxEntry *entry;
3498 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3499 call_info->sig = sig;
3500 call_info->method = cmethod;
3502 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3503 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3505 return emit_rgctx_fetch (cfg, rgctx, entry);
3509 * emit_get_rgctx_virt_method:
3511 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3514 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3515 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3517 MonoJumpInfoVirtMethod *info;
3518 MonoJumpInfoRgctxEntry *entry;
3521 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3522 info->klass = klass;
3523 info->method = virt_method;
3525 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3526 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3528 return emit_rgctx_fetch (cfg, rgctx, entry);
3532 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3533 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3535 MonoJumpInfoRgctxEntry *entry;
3538 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3539 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3541 return emit_rgctx_fetch (cfg, rgctx, entry);
3545 * emit_get_rgctx_method:
3547 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3548 * normal constants, else emit a load from the rgctx.
3551 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3552 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3554 if (!context_used) {
3557 switch (rgctx_type) {
3558 case MONO_RGCTX_INFO_METHOD:
3559 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3561 case MONO_RGCTX_INFO_METHOD_RGCTX:
3562 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3565 g_assert_not_reached ();
3568 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3569 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3571 return emit_rgctx_fetch (cfg, rgctx, entry);
3576 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3577 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3579 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3580 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3582 return emit_rgctx_fetch (cfg, rgctx, entry);
3586 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3588 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3589 MonoRuntimeGenericContextInfoTemplate *template_;
3594 for (i = 0; i < info->num_entries; ++i) {
3595 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3597 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3601 if (info->num_entries == info->count_entries) {
3602 MonoRuntimeGenericContextInfoTemplate *new_entries;
3603 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3605 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3607 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3608 info->entries = new_entries;
3609 info->count_entries = new_count_entries;
3612 idx = info->num_entries;
3613 template_ = &info->entries [idx];
3614 template_->info_type = rgctx_type;
3615 template_->data = data;
3617 info->num_entries ++;
3623 * emit_get_gsharedvt_info:
3625 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3628 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3633 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3634 /* Load info->entries [idx] */
3635 dreg = alloc_preg (cfg);
3636 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3642 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3644 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3648 * On return the caller must check @klass for load errors.
3651 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3653 MonoInst *vtable_arg;
3656 context_used = mini_class_check_context_used (cfg, klass);
3659 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3660 klass, MONO_RGCTX_INFO_VTABLE);
3662 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3666 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3669 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3673 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3674 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3676 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3677 ins->sreg1 = vtable_arg->dreg;
3678 MONO_ADD_INS (cfg->cbb, ins);
3681 MonoBasicBlock *inited_bb;
3682 MonoInst *args [16];
3684 inited_reg = alloc_ireg (cfg);
3686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3688 NEW_BBLOCK (cfg, inited_bb);
3690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3693 args [0] = vtable_arg;
3694 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3696 MONO_START_BB (cfg, inited_bb);
3701 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3705 if (cfg->gen_seq_points && cfg->method == method) {
3706 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3708 ins->flags |= MONO_INST_NONEMPTY_STACK;
3709 MONO_ADD_INS (cfg->cbb, ins);
3714 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3716 if (mini_get_debug_options ()->better_cast_details) {
3717 int vtable_reg = alloc_preg (cfg);
3718 int klass_reg = alloc_preg (cfg);
3719 MonoBasicBlock *is_null_bb = NULL;
3721 int to_klass_reg, context_used;
3724 NEW_BBLOCK (cfg, is_null_bb);
3726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3730 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3732 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3737 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3741 context_used = mini_class_check_context_used (cfg, klass);
3743 MonoInst *class_ins;
3745 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3746 to_klass_reg = class_ins->dreg;
3748 to_klass_reg = alloc_preg (cfg);
3749 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3754 MONO_START_BB (cfg, is_null_bb);
3759 mini_reset_cast_details (MonoCompile *cfg)
3761 /* Reset the variables holding the cast details */
3762 if (mini_get_debug_options ()->better_cast_details) {
3763 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3764 /* It is enough to reset the from field */
3765 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3770 * On return the caller must check @array_class for load errors
3773 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3775 int vtable_reg = alloc_preg (cfg);
3778 context_used = mini_class_check_context_used (cfg, array_class);
3780 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3782 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3784 if (cfg->opt & MONO_OPT_SHARED) {
3785 int class_reg = alloc_preg (cfg);
3788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3789 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3790 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3791 } else if (context_used) {
3792 MonoInst *vtable_ins;
3794 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3795 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3797 if (cfg->compile_aot) {
3801 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3803 vt_reg = alloc_preg (cfg);
3804 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3805 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3808 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3814 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3816 mini_reset_cast_details (cfg);
3820 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3821 * generic code is generated.
3824 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3826 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3829 MonoInst *rgctx, *addr;
3831 /* FIXME: What if the class is shared? We might not
3832 have to get the address of the method from the
3834 addr = emit_get_rgctx_method (cfg, context_used, method,
3835 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3836 if (cfg->llvm_only) {
3837 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3838 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3840 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3842 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3845 gboolean pass_vtable, pass_mrgctx;
3846 MonoInst *rgctx_arg = NULL;
3848 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3849 g_assert (!pass_mrgctx);
3852 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3855 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3858 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3863 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3867 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3868 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3869 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3870 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3872 obj_reg = sp [0]->dreg;
3873 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3874 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3876 /* FIXME: generics */
3877 g_assert (klass->rank == 0);
3880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3881 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3884 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3887 MonoInst *element_class;
3889 /* This assertion is from the unboxcast insn */
3890 g_assert (klass->rank == 0);
3892 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3893 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3895 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3896 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3898 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3899 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3900 mini_reset_cast_details (cfg);
3903 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3904 MONO_ADD_INS (cfg->cbb, add);
3905 add->type = STACK_MP;
3912 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3914 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3915 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3919 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3925 args [1] = klass_inst;
3928 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3930 NEW_BBLOCK (cfg, is_ref_bb);
3931 NEW_BBLOCK (cfg, is_nullable_bb);
3932 NEW_BBLOCK (cfg, end_bb);
3933 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3935 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3940 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3941 addr_reg = alloc_dreg (cfg, STACK_MP);
3945 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3946 MONO_ADD_INS (cfg->cbb, addr);
3948 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3951 MONO_START_BB (cfg, is_ref_bb);
3953 /* Save the ref to a temporary */
3954 dreg = alloc_ireg (cfg);
3955 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3956 addr->dreg = addr_reg;
3957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3958 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3961 MONO_START_BB (cfg, is_nullable_bb);
3964 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3965 MonoInst *unbox_call;
3966 MonoMethodSignature *unbox_sig;
3968 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3969 unbox_sig->ret = &klass->byval_arg;
3970 unbox_sig->param_count = 1;
3971 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3974 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3976 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3978 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3979 addr->dreg = addr_reg;
3982 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3985 MONO_START_BB (cfg, end_bb);
3988 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3994 * Returns NULL and set the cfg exception on error.
3997 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3999 MonoInst *iargs [2];
4004 MonoRgctxInfoType rgctx_info;
4005 MonoInst *iargs [2];
4006 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4008 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4010 if (cfg->opt & MONO_OPT_SHARED)
4011 rgctx_info = MONO_RGCTX_INFO_KLASS;
4013 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4014 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4016 if (cfg->opt & MONO_OPT_SHARED) {
4017 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4019 alloc_ftn = ves_icall_object_new;
4022 alloc_ftn = ves_icall_object_new_specific;
4025 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4026 if (known_instance_size) {
4027 int size = mono_class_instance_size (klass);
4028 if (size < sizeof (MonoObject))
4029 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4031 EMIT_NEW_ICONST (cfg, iargs [1], size);
4033 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4036 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4039 if (cfg->opt & MONO_OPT_SHARED) {
4040 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4041 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4043 alloc_ftn = ves_icall_object_new;
4044 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4045 /* This happens often in argument checking code, eg. throw new FooException... */
4046 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4047 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4048 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4050 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4051 MonoMethod *managed_alloc = NULL;
4055 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4056 cfg->exception_ptr = klass;
4060 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4062 if (managed_alloc) {
4063 int size = mono_class_instance_size (klass);
4064 if (size < sizeof (MonoObject))
4065 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4067 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4068 EMIT_NEW_ICONST (cfg, iargs [1], size);
4069 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4071 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4073 guint32 lw = vtable->klass->instance_size;
4074 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4075 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4076 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4079 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4083 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4087 * Returns NULL and set the cfg exception on error.
4090 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4092 MonoInst *alloc, *ins;
4094 if (mono_class_is_nullable (klass)) {
4095 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4098 if (cfg->llvm_only && cfg->gsharedvt) {
4099 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4100 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4101 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4103 /* FIXME: What if the class is shared? We might not
4104 have to get the method address from the RGCTX. */
4105 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4106 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4107 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4109 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4112 gboolean pass_vtable, pass_mrgctx;
4113 MonoInst *rgctx_arg = NULL;
4115 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4116 g_assert (!pass_mrgctx);
4119 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4122 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4125 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4129 if (mini_is_gsharedvt_klass (klass)) {
4130 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4131 MonoInst *res, *is_ref, *src_var, *addr;
4134 dreg = alloc_ireg (cfg);
4136 NEW_BBLOCK (cfg, is_ref_bb);
4137 NEW_BBLOCK (cfg, is_nullable_bb);
4138 NEW_BBLOCK (cfg, end_bb);
4139 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4147 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4150 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4151 ins->opcode = OP_STOREV_MEMBASE;
4153 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4154 res->type = STACK_OBJ;
4156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4159 MONO_START_BB (cfg, is_ref_bb);
4161 /* val is a vtype, so has to load the value manually */
4162 src_var = get_vreg_to_inst (cfg, val->dreg);
4164 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4165 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4167 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4170 MONO_START_BB (cfg, is_nullable_bb);
4173 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4174 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4176 MonoMethodSignature *box_sig;
4179 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4180 * construct that method at JIT time, so have to do things by hand.
4182 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4183 box_sig->ret = &mono_defaults.object_class->byval_arg;
4184 box_sig->param_count = 1;
4185 box_sig->params [0] = &klass->byval_arg;
4188 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4190 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4191 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4192 res->type = STACK_OBJ;
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4198 MONO_START_BB (cfg, end_bb);
4202 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4206 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4211 static GHashTable* direct_icall_type_hash;
4214 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4216 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4217 if (!direct_icalls_enabled (cfg))
4221 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4222 * Whitelist a few icalls for now.
4224 if (!direct_icall_type_hash) {
4225 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4227 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4228 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4229 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4230 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4231 mono_memory_barrier ();
4232 direct_icall_type_hash = h;
4235 if (cmethod->klass == mono_defaults.math_class)
4237 /* No locking needed */
4238 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4244 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4246 if (cmethod->klass == mono_defaults.systemtype_class) {
4247 if (!strcmp (cmethod->name, "GetType"))
4253 static G_GNUC_UNUSED MonoInst*
4254 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4256 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4257 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4260 switch (enum_type->type) {
4263 #if SIZEOF_REGISTER == 8
4275 MonoInst *load, *and_, *cmp, *ceq;
4276 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4277 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4278 int dest_reg = alloc_ireg (cfg);
4280 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4281 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4282 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4283 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4285 ceq->type = STACK_I4;
4288 load = mono_decompose_opcode (cfg, load);
4289 and_ = mono_decompose_opcode (cfg, and_);
4290 cmp = mono_decompose_opcode (cfg, cmp);
4291 ceq = mono_decompose_opcode (cfg, ceq);
4299 * Returns NULL and set the cfg exception on error.
4301 static G_GNUC_UNUSED MonoInst*
4302 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4306 gpointer trampoline;
4307 MonoInst *obj, *method_ins, *tramp_ins;
4311 if (virtual_ && !cfg->llvm_only) {
4312 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4315 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4319 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4323 /* Inline the contents of mono_delegate_ctor */
4325 /* Set target field */
4326 /* Optimize away setting of NULL target */
4327 if (!MONO_INS_IS_PCONST_NULL (target)) {
4328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4329 if (cfg->gen_write_barriers) {
4330 dreg = alloc_preg (cfg);
4331 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4332 emit_write_barrier (cfg, ptr, target);
4336 /* Set method field */
4337 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4338 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4341 * To avoid looking up the compiled code belonging to the target method
4342 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4343 * store it, and we fill it after the method has been compiled.
4345 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4346 MonoInst *code_slot_ins;
4349 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4351 domain = mono_domain_get ();
4352 mono_domain_lock (domain);
4353 if (!domain_jit_info (domain)->method_code_hash)
4354 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4355 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4357 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4358 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4360 mono_domain_unlock (domain);
4362 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4367 if (cfg->llvm_only) {
4368 MonoInst *args [16];
4373 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4374 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4377 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4383 if (cfg->compile_aot) {
4384 MonoDelegateClassMethodPair *del_tramp;
4386 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4387 del_tramp->klass = klass;
4388 del_tramp->method = context_used ? NULL : method;
4389 del_tramp->is_virtual = virtual_;
4390 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4393 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4395 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4396 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4399 /* Set invoke_impl field */
4401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4403 dreg = alloc_preg (cfg);
4404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4405 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4407 dreg = alloc_preg (cfg);
4408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4409 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4412 dreg = alloc_preg (cfg);
4413 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4414 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4416 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4422 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4424 MonoJitICallInfo *info;
4426 /* Need to register the icall so it gets an icall wrapper */
4427 info = mono_get_array_new_va_icall (rank);
4429 cfg->flags |= MONO_CFG_HAS_VARARGS;
4431 /* mono_array_new_va () needs a vararg calling convention */
4432 cfg->exception_message = g_strdup ("array-new");
4433 cfg->disable_llvm = TRUE;
4435 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4436 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4440 * handle_constrained_gsharedvt_call:
4442 * Handle constrained calls where the receiver is a gsharedvt type.
4443 * Return the instruction representing the call. Set the cfg exception on failure.
4446 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4447 gboolean *ref_emit_widen)
4449 MonoInst *ins = NULL;
4450 gboolean emit_widen = *ref_emit_widen;
4453 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4454 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4455 * pack the arguments into an array, and do the rest of the work in in an icall.
4457 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4458 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4459 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4460 MonoInst *args [16];
4463 * This case handles calls to
4464 * - object:ToString()/Equals()/GetHashCode(),
4465 * - System.IComparable<T>:CompareTo()
4466 * - System.IEquatable<T>:Equals ()
4467 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4471 if (mono_method_check_context_used (cmethod))
4472 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4474 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4475 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4477 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4478 if (fsig->hasthis && fsig->param_count) {
4479 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4480 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4481 ins->dreg = alloc_preg (cfg);
4482 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4483 MONO_ADD_INS (cfg->cbb, ins);
4486 if (mini_is_gsharedvt_type (fsig->params [0])) {
4487 int addr_reg, deref_arg_reg;
4489 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4490 deref_arg_reg = alloc_preg (cfg);
4491 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4492 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4494 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4495 addr_reg = ins->dreg;
4496 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4498 EMIT_NEW_ICONST (cfg, args [3], 0);
4499 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4502 EMIT_NEW_ICONST (cfg, args [3], 0);
4503 EMIT_NEW_ICONST (cfg, args [4], 0);
4505 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4508 if (mini_is_gsharedvt_type (fsig->ret)) {
4509 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4510 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4514 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4515 MONO_ADD_INS (cfg->cbb, add);
4517 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4518 MONO_ADD_INS (cfg->cbb, ins);
4519 /* ins represents the call result */
4522 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4525 *ref_emit_widen = emit_widen;
4534 mono_emit_load_got_addr (MonoCompile *cfg)
4536 MonoInst *getaddr, *dummy_use;
4538 if (!cfg->got_var || cfg->got_var_allocated)
4541 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4542 getaddr->cil_code = cfg->header->code;
4543 getaddr->dreg = cfg->got_var->dreg;
4545 /* Add it to the start of the first bblock */
4546 if (cfg->bb_entry->code) {
4547 getaddr->next = cfg->bb_entry->code;
4548 cfg->bb_entry->code = getaddr;
4551 MONO_ADD_INS (cfg->bb_entry, getaddr);
4553 cfg->got_var_allocated = TRUE;
4556 * Add a dummy use to keep the got_var alive, since real uses might
4557 * only be generated by the back ends.
4558 * Add it to end_bblock, so the variable's lifetime covers the whole
4560 * It would be better to make the usage of the got var explicit in all
4561 * cases when the backend needs it (i.e. calls, throw etc.), so this
4562 * wouldn't be needed.
4564 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4565 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4568 static int inline_limit;
4569 static gboolean inline_limit_inited;
4572 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4574 MonoMethodHeaderSummary header;
4576 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4577 MonoMethodSignature *sig = mono_method_signature (method);
4581 if (cfg->disable_inline)
4586 if (cfg->inline_depth > 10)
4589 if (!mono_method_get_header_summary (method, &header))
4592 /*runtime, icall and pinvoke are checked by summary call*/
4593 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4594 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4595 (mono_class_is_marshalbyref (method->klass)) ||
4599 /* also consider num_locals? */
4600 /* Do the size check early to avoid creating vtables */
4601 if (!inline_limit_inited) {
4603 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4604 inline_limit = atoi (inlinelimit);
4605 g_free (inlinelimit);
4607 inline_limit = INLINE_LENGTH_LIMIT;
4608 inline_limit_inited = TRUE;
4610 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4614 * if we can initialize the class of the method right away, we do,
4615 * otherwise we don't allow inlining if the class needs initialization,
4616 * since it would mean inserting a call to mono_runtime_class_init()
4617 * inside the inlined code
4619 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4622 if (!(cfg->opt & MONO_OPT_SHARED)) {
4623 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4624 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4625 if (method->klass->has_cctor) {
4626 vtable = mono_class_vtable (cfg->domain, method->klass);
4629 if (!cfg->compile_aot) {
4631 if (!mono_runtime_class_init_full (vtable, &error)) {
4632 mono_error_cleanup (&error);
4637 } else if (mono_class_is_before_field_init (method->klass)) {
4638 if (cfg->run_cctors && method->klass->has_cctor) {
4639 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4640 if (!method->klass->runtime_info)
4641 /* No vtable created yet */
4643 vtable = mono_class_vtable (cfg->domain, method->klass);
4646 /* This makes so that inline cannot trigger */
4647 /* .cctors: too many apps depend on them */
4648 /* running with a specific order... */
4649 if (! vtable->initialized)
4652 if (!mono_runtime_class_init_full (vtable, &error)) {
4653 mono_error_cleanup (&error);
4657 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4658 if (!method->klass->runtime_info)
4659 /* No vtable created yet */
4661 vtable = mono_class_vtable (cfg->domain, method->klass);
4664 if (!vtable->initialized)
4669 * If we're compiling for shared code
4670 * the cctor will need to be run at aot method load time, for example,
4671 * or at the end of the compilation of the inlining method.
4673 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4677 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4678 if (mono_arch_is_soft_float ()) {
4680 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4682 for (i = 0; i < sig->param_count; ++i)
4683 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4688 if (g_list_find (cfg->dont_inline, method))
4695 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4697 if (!cfg->compile_aot) {
4699 if (vtable->initialized)
4703 if (mono_class_is_before_field_init (klass)) {
4704 if (cfg->method == method)
4708 if (!mono_class_needs_cctor_run (klass, method))
4711 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4712 /* The initialization is already done before the method is called */
4719 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4723 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4726 if (mini_is_gsharedvt_variable_klass (klass)) {
4729 mono_class_init (klass);
4730 size = mono_class_array_element_size (klass);
4733 mult_reg = alloc_preg (cfg);
4734 array_reg = arr->dreg;
4735 index_reg = index->dreg;
4737 #if SIZEOF_REGISTER == 8
4738 /* The array reg is 64 bits but the index reg is only 32 */
4739 if (COMPILE_LLVM (cfg)) {
4741 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4742 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4743 * of OP_X86_LEA for llvm.
4745 index2_reg = index_reg;
4747 index2_reg = alloc_preg (cfg);
4748 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4751 if (index->type == STACK_I8) {
4752 index2_reg = alloc_preg (cfg);
4753 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4755 index2_reg = index_reg;
4760 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4762 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4763 if (size == 1 || size == 2 || size == 4 || size == 8) {
4764 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4766 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4767 ins->klass = mono_class_get_element_class (klass);
4768 ins->type = STACK_MP;
4774 add_reg = alloc_ireg_mp (cfg);
4777 MonoInst *rgctx_ins;
4780 g_assert (cfg->gshared);
4781 context_used = mini_class_check_context_used (cfg, klass);
4782 g_assert (context_used);
4783 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4784 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4788 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4789 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4790 ins->klass = mono_class_get_element_class (klass);
4791 ins->type = STACK_MP;
4792 MONO_ADD_INS (cfg->cbb, ins);
4798 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4800 int bounds_reg = alloc_preg (cfg);
4801 int add_reg = alloc_ireg_mp (cfg);
4802 int mult_reg = alloc_preg (cfg);
4803 int mult2_reg = alloc_preg (cfg);
4804 int low1_reg = alloc_preg (cfg);
4805 int low2_reg = alloc_preg (cfg);
4806 int high1_reg = alloc_preg (cfg);
4807 int high2_reg = alloc_preg (cfg);
4808 int realidx1_reg = alloc_preg (cfg);
4809 int realidx2_reg = alloc_preg (cfg);
4810 int sum_reg = alloc_preg (cfg);
4811 int index1, index2, tmpreg;
4815 mono_class_init (klass);
4816 size = mono_class_array_element_size (klass);
4818 index1 = index_ins1->dreg;
4819 index2 = index_ins2->dreg;
4821 #if SIZEOF_REGISTER == 8
4822 /* The array reg is 64 bits but the index reg is only 32 */
4823 if (COMPILE_LLVM (cfg)) {
4826 tmpreg = alloc_preg (cfg);
4827 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4829 tmpreg = alloc_preg (cfg);
4830 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4834 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4838 /* range checking */
4839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4840 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4842 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4843 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4844 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4846 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4848 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4851 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4852 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4854 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4855 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4856 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4858 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4859 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4860 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4861 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4862 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4864 ins->type = STACK_MP;
4866 MONO_ADD_INS (cfg->cbb, ins);
4872 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4876 MonoMethod *addr_method;
4878 MonoClass *eclass = cmethod->klass->element_class;
4880 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4883 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4885 /* emit_ldelema_2 depends on OP_LMUL */
4886 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4887 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4890 if (mini_is_gsharedvt_variable_klass (eclass))
4893 element_size = mono_class_array_element_size (eclass);
4894 addr_method = mono_marshal_get_array_address (rank, element_size);
4895 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4900 static MonoBreakPolicy
4901 always_insert_breakpoint (MonoMethod *method)
4903 return MONO_BREAK_POLICY_ALWAYS;
4906 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4909 * mono_set_break_policy:
4910 * \param policy_callback the new callback function
4912 * Allow embedders to decide wherther to actually obey breakpoint instructions
4913 * (both break IL instructions and \c Debugger.Break method calls), for example
4914 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4915 * untrusted or semi-trusted code.
4917 * \p policy_callback will be called every time a break point instruction needs to
4918 * be inserted with the method argument being the method that calls \c Debugger.Break
4919 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4920 * if it wants the breakpoint to not be effective in the given method.
4921 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4924 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4926 if (policy_callback)
4927 break_policy_func = policy_callback;
4929 break_policy_func = always_insert_breakpoint;
4933 should_insert_brekpoint (MonoMethod *method) {
4934 switch (break_policy_func (method)) {
4935 case MONO_BREAK_POLICY_ALWAYS:
4937 case MONO_BREAK_POLICY_NEVER:
4939 case MONO_BREAK_POLICY_ON_DBG:
4940 g_warning ("mdb no longer supported");
4943 g_warning ("Incorrect value returned from break policy callback");
4948 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4950 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4952 MonoInst *addr, *store, *load;
4953 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4955 /* the bounds check is already done by the callers */
4956 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4958 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4959 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4960 if (mini_type_is_reference (&eklass->byval_arg))
4961 emit_write_barrier (cfg, addr, load);
4963 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4964 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4971 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4973 return mini_type_is_reference (&klass->byval_arg);
4977 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4979 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4980 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4981 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4982 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4983 MonoInst *iargs [3];
4986 mono_class_setup_vtable (obj_array);
4987 g_assert (helper->slot);
4989 if (sp [0]->type != STACK_OBJ)
4991 if (sp [2]->type != STACK_OBJ)
4998 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5002 if (mini_is_gsharedvt_variable_klass (klass)) {
5005 // FIXME-VT: OP_ICONST optimization
5006 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5007 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5008 ins->opcode = OP_STOREV_MEMBASE;
5009 } else if (sp [1]->opcode == OP_ICONST) {
5010 int array_reg = sp [0]->dreg;
5011 int index_reg = sp [1]->dreg;
5012 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5014 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5015 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5018 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5019 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5021 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5022 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5023 if (generic_class_is_reference_type (cfg, klass))
5024 emit_write_barrier (cfg, addr, sp [2]);
5031 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5036 eklass = mono_class_from_mono_type (fsig->params [2]);
5038 eklass = mono_class_from_mono_type (fsig->ret);
5041 return emit_array_store (cfg, eklass, args, FALSE);
5043 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5044 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5050 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5053 int param_size, return_size;
5055 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5056 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5058 if (cfg->verbose_level > 3)
5059 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5061 //Don't allow mixing reference types with value types
5062 if (param_klass->valuetype != return_klass->valuetype) {
5063 if (cfg->verbose_level > 3)
5064 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5068 if (!param_klass->valuetype) {
5069 if (cfg->verbose_level > 3)
5070 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5075 if (param_klass->has_references || return_klass->has_references)
5078 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5079 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5080 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5081 if (cfg->verbose_level > 3)
5082 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5086 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5087 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5088 if (cfg->verbose_level > 3)
5089 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5093 param_size = mono_class_value_size (param_klass, &align);
5094 return_size = mono_class_value_size (return_klass, &align);
5096 //We can do it if sizes match
5097 if (param_size == return_size) {
5098 if (cfg->verbose_level > 3)
5099 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5103 //No simple way to handle struct if sizes don't match
5104 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5105 if (cfg->verbose_level > 3)
5106 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5111 * Same reg size category.
5112 * A quick note on why we don't require widening here.
5113 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5115 * Since the source value comes from a function argument, the JIT will already have
5116 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5118 if (param_size <= 4 && return_size <= 4) {
5119 if (cfg->verbose_level > 3)
5120 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5128 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5130 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5131 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5133 if (mini_is_gsharedvt_variable_type (fsig->ret))
5136 //Valuetypes that are semantically equivalent or numbers than can be widened to
5137 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5140 //Arrays of valuetypes that are semantically equivalent
5141 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5148 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5150 #ifdef MONO_ARCH_SIMD_INTRINSICS
5151 MonoInst *ins = NULL;
5153 if (cfg->opt & MONO_OPT_SIMD) {
5154 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5160 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5164 emit_memory_barrier (MonoCompile *cfg, int kind)
5166 MonoInst *ins = NULL;
5167 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5168 MONO_ADD_INS (cfg->cbb, ins);
5169 ins->backend.memory_barrier_kind = kind;
5175 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5177 MonoInst *ins = NULL;
5180 /* The LLVM backend supports these intrinsics */
5181 if (cmethod->klass == mono_defaults.math_class) {
5182 if (strcmp (cmethod->name, "Sin") == 0) {
5184 } else if (strcmp (cmethod->name, "Cos") == 0) {
5186 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5188 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5192 if (opcode && fsig->param_count == 1) {
5193 MONO_INST_NEW (cfg, ins, opcode);
5194 ins->type = STACK_R8;
5195 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5196 ins->sreg1 = args [0]->dreg;
5197 MONO_ADD_INS (cfg->cbb, ins);
5201 if (cfg->opt & MONO_OPT_CMOV) {
5202 if (strcmp (cmethod->name, "Min") == 0) {
5203 if (fsig->params [0]->type == MONO_TYPE_I4)
5205 if (fsig->params [0]->type == MONO_TYPE_U4)
5206 opcode = OP_IMIN_UN;
5207 else if (fsig->params [0]->type == MONO_TYPE_I8)
5209 else if (fsig->params [0]->type == MONO_TYPE_U8)
5210 opcode = OP_LMIN_UN;
5211 } else if (strcmp (cmethod->name, "Max") == 0) {
5212 if (fsig->params [0]->type == MONO_TYPE_I4)
5214 if (fsig->params [0]->type == MONO_TYPE_U4)
5215 opcode = OP_IMAX_UN;
5216 else if (fsig->params [0]->type == MONO_TYPE_I8)
5218 else if (fsig->params [0]->type == MONO_TYPE_U8)
5219 opcode = OP_LMAX_UN;
5223 if (opcode && fsig->param_count == 2) {
5224 MONO_INST_NEW (cfg, ins, opcode);
5225 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5226 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5227 ins->sreg1 = args [0]->dreg;
5228 ins->sreg2 = args [1]->dreg;
5229 MONO_ADD_INS (cfg->cbb, ins);
5237 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5239 if (cmethod->klass == mono_defaults.array_class) {
5240 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5241 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5242 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5243 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5244 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5245 return emit_array_unsafe_mov (cfg, fsig, args);
5252 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5254 MonoInst *ins = NULL;
5255 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5257 if (cmethod->klass == mono_defaults.string_class) {
5258 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5259 int dreg = alloc_ireg (cfg);
5260 int index_reg = alloc_preg (cfg);
5261 int add_reg = alloc_preg (cfg);
5263 #if SIZEOF_REGISTER == 8
5264 if (COMPILE_LLVM (cfg)) {
5265 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5267 /* The array reg is 64 bits but the index reg is only 32 */
5268 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5271 index_reg = args [1]->dreg;
5273 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5275 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5276 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5277 add_reg = ins->dreg;
5278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5281 int mult_reg = alloc_preg (cfg);
5282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5283 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5285 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5287 type_from_op (cfg, ins, NULL, NULL);
5289 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5290 int dreg = alloc_ireg (cfg);
5291 /* Decompose later to allow more optimizations */
5292 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5293 ins->type = STACK_I4;
5294 ins->flags |= MONO_INST_FAULT;
5295 cfg->cbb->has_array_access = TRUE;
5296 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5301 } else if (cmethod->klass == mono_defaults.object_class) {
5302 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5303 int dreg = alloc_ireg_ref (cfg);
5304 int vt_reg = alloc_preg (cfg);
5305 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5306 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5307 type_from_op (cfg, ins, NULL, NULL);
5310 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5311 int dreg = alloc_ireg (cfg);
5312 int t1 = alloc_ireg (cfg);
5314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5315 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5316 ins->type = STACK_I4;
5319 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5320 MONO_INST_NEW (cfg, ins, OP_NOP);
5321 MONO_ADD_INS (cfg->cbb, ins);
5325 } else if (cmethod->klass == mono_defaults.array_class) {
5326 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5327 return emit_array_generic_access (cfg, fsig, args, FALSE);
5328 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5329 return emit_array_generic_access (cfg, fsig, args, TRUE);
5331 #ifndef MONO_BIG_ARRAYS
5333 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5336 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5337 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5338 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5339 int dreg = alloc_ireg (cfg);
5340 int bounds_reg = alloc_ireg_mp (cfg);
5341 MonoBasicBlock *end_bb, *szarray_bb;
5342 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5344 NEW_BBLOCK (cfg, end_bb);
5345 NEW_BBLOCK (cfg, szarray_bb);
5347 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5348 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5351 /* Non-szarray case */
5353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5354 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5357 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5359 MONO_START_BB (cfg, szarray_bb);
5362 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5363 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5365 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5366 MONO_START_BB (cfg, end_bb);
5368 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5369 ins->type = STACK_I4;
5375 if (cmethod->name [0] != 'g')
5378 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5379 int dreg = alloc_ireg (cfg);
5380 int vtable_reg = alloc_preg (cfg);
5381 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5382 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5383 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5384 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5385 type_from_op (cfg, ins, NULL, NULL);
5388 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5389 int dreg = alloc_ireg (cfg);
5391 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5392 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5393 type_from_op (cfg, ins, NULL, NULL);
5398 } else if (cmethod->klass == runtime_helpers_class) {
5399 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5400 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5402 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5403 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5405 g_assert (ctx->method_inst);
5406 g_assert (ctx->method_inst->type_argc == 1);
5407 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5408 MonoClass *klass = mono_class_from_mono_type (t);
5412 mono_class_init (klass);
5413 if (MONO_TYPE_IS_REFERENCE (t))
5414 EMIT_NEW_ICONST (cfg, ins, 1);
5415 else if (MONO_TYPE_IS_PRIMITIVE (t))
5416 EMIT_NEW_ICONST (cfg, ins, 0);
5417 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5418 EMIT_NEW_ICONST (cfg, ins, 1);
5419 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5420 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5422 g_assert (cfg->gshared);
5424 int context_used = mini_class_check_context_used (cfg, klass);
5426 /* This returns 1 or 2 */
5427 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5428 int dreg = alloc_ireg (cfg);
5429 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5435 } else if (cmethod->klass == mono_defaults.monitor_class) {
5436 gboolean is_enter = FALSE;
5437 gboolean is_v4 = FALSE;
5439 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5443 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5448 * To make async stack traces work, icalls which can block should have a wrapper.
5449 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5451 MonoBasicBlock *end_bb;
5453 NEW_BBLOCK (cfg, end_bb);
5455 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5457 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5458 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5459 MONO_START_BB (cfg, end_bb);
5462 } else if (cmethod->klass == mono_defaults.thread_class) {
5463 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5464 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5465 MONO_ADD_INS (cfg->cbb, ins);
5467 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5468 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5469 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5471 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5473 if (fsig->params [0]->type == MONO_TYPE_I1)
5474 opcode = OP_LOADI1_MEMBASE;
5475 else if (fsig->params [0]->type == MONO_TYPE_U1)
5476 opcode = OP_LOADU1_MEMBASE;
5477 else if (fsig->params [0]->type == MONO_TYPE_I2)
5478 opcode = OP_LOADI2_MEMBASE;
5479 else if (fsig->params [0]->type == MONO_TYPE_U2)
5480 opcode = OP_LOADU2_MEMBASE;
5481 else if (fsig->params [0]->type == MONO_TYPE_I4)
5482 opcode = OP_LOADI4_MEMBASE;
5483 else if (fsig->params [0]->type == MONO_TYPE_U4)
5484 opcode = OP_LOADU4_MEMBASE;
5485 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5486 opcode = OP_LOADI8_MEMBASE;
5487 else if (fsig->params [0]->type == MONO_TYPE_R4)
5488 opcode = OP_LOADR4_MEMBASE;
5489 else if (fsig->params [0]->type == MONO_TYPE_R8)
5490 opcode = OP_LOADR8_MEMBASE;
5491 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5492 opcode = OP_LOAD_MEMBASE;
5495 MONO_INST_NEW (cfg, ins, opcode);
5496 ins->inst_basereg = args [0]->dreg;
5497 ins->inst_offset = 0;
5498 MONO_ADD_INS (cfg->cbb, ins);
5500 switch (fsig->params [0]->type) {
5507 ins->dreg = mono_alloc_ireg (cfg);
5508 ins->type = STACK_I4;
5512 ins->dreg = mono_alloc_lreg (cfg);
5513 ins->type = STACK_I8;
5517 ins->dreg = mono_alloc_ireg (cfg);
5518 #if SIZEOF_REGISTER == 8
5519 ins->type = STACK_I8;
5521 ins->type = STACK_I4;
5526 ins->dreg = mono_alloc_freg (cfg);
5527 ins->type = STACK_R8;
5530 g_assert (mini_type_is_reference (fsig->params [0]));
5531 ins->dreg = mono_alloc_ireg_ref (cfg);
5532 ins->type = STACK_OBJ;
5536 if (opcode == OP_LOADI8_MEMBASE)
5537 ins = mono_decompose_opcode (cfg, ins);
5539 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5543 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5545 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5547 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5548 opcode = OP_STOREI1_MEMBASE_REG;
5549 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5550 opcode = OP_STOREI2_MEMBASE_REG;
5551 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5552 opcode = OP_STOREI4_MEMBASE_REG;
5553 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5554 opcode = OP_STOREI8_MEMBASE_REG;
5555 else if (fsig->params [0]->type == MONO_TYPE_R4)
5556 opcode = OP_STORER4_MEMBASE_REG;
5557 else if (fsig->params [0]->type == MONO_TYPE_R8)
5558 opcode = OP_STORER8_MEMBASE_REG;
5559 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5560 opcode = OP_STORE_MEMBASE_REG;
5563 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5565 MONO_INST_NEW (cfg, ins, opcode);
5566 ins->sreg1 = args [1]->dreg;
5567 ins->inst_destbasereg = args [0]->dreg;
5568 ins->inst_offset = 0;
5569 MONO_ADD_INS (cfg->cbb, ins);
5571 if (opcode == OP_STOREI8_MEMBASE_REG)
5572 ins = mono_decompose_opcode (cfg, ins);
5577 } else if (cmethod->klass->image == mono_defaults.corlib &&
5578 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5579 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5582 #if SIZEOF_REGISTER == 8
5583 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5584 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5585 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5586 ins->dreg = mono_alloc_preg (cfg);
5587 ins->sreg1 = args [0]->dreg;
5588 ins->type = STACK_I8;
5589 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5590 MONO_ADD_INS (cfg->cbb, ins);
5594 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5596 /* 64 bit reads are already atomic */
5597 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5598 load_ins->dreg = mono_alloc_preg (cfg);
5599 load_ins->inst_basereg = args [0]->dreg;
5600 load_ins->inst_offset = 0;
5601 load_ins->type = STACK_I8;
5602 MONO_ADD_INS (cfg->cbb, load_ins);
5604 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5611 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5612 MonoInst *ins_iconst;
5615 if (fsig->params [0]->type == MONO_TYPE_I4) {
5616 opcode = OP_ATOMIC_ADD_I4;
5617 cfg->has_atomic_add_i4 = TRUE;
5619 #if SIZEOF_REGISTER == 8
5620 else if (fsig->params [0]->type == MONO_TYPE_I8)
5621 opcode = OP_ATOMIC_ADD_I8;
5624 if (!mono_arch_opcode_supported (opcode))
5626 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5627 ins_iconst->inst_c0 = 1;
5628 ins_iconst->dreg = mono_alloc_ireg (cfg);
5629 MONO_ADD_INS (cfg->cbb, ins_iconst);
5631 MONO_INST_NEW (cfg, ins, opcode);
5632 ins->dreg = mono_alloc_ireg (cfg);
5633 ins->inst_basereg = args [0]->dreg;
5634 ins->inst_offset = 0;
5635 ins->sreg2 = ins_iconst->dreg;
5636 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5637 MONO_ADD_INS (cfg->cbb, ins);
5639 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5640 MonoInst *ins_iconst;
5643 if (fsig->params [0]->type == MONO_TYPE_I4) {
5644 opcode = OP_ATOMIC_ADD_I4;
5645 cfg->has_atomic_add_i4 = TRUE;
5647 #if SIZEOF_REGISTER == 8
5648 else if (fsig->params [0]->type == MONO_TYPE_I8)
5649 opcode = OP_ATOMIC_ADD_I8;
5652 if (!mono_arch_opcode_supported (opcode))
5654 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5655 ins_iconst->inst_c0 = -1;
5656 ins_iconst->dreg = mono_alloc_ireg (cfg);
5657 MONO_ADD_INS (cfg->cbb, ins_iconst);
5659 MONO_INST_NEW (cfg, ins, opcode);
5660 ins->dreg = mono_alloc_ireg (cfg);
5661 ins->inst_basereg = args [0]->dreg;
5662 ins->inst_offset = 0;
5663 ins->sreg2 = ins_iconst->dreg;
5664 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5665 MONO_ADD_INS (cfg->cbb, ins);
5667 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5670 if (fsig->params [0]->type == MONO_TYPE_I4) {
5671 opcode = OP_ATOMIC_ADD_I4;
5672 cfg->has_atomic_add_i4 = TRUE;
5674 #if SIZEOF_REGISTER == 8
5675 else if (fsig->params [0]->type == MONO_TYPE_I8)
5676 opcode = OP_ATOMIC_ADD_I8;
5679 if (!mono_arch_opcode_supported (opcode))
5681 MONO_INST_NEW (cfg, ins, opcode);
5682 ins->dreg = mono_alloc_ireg (cfg);
5683 ins->inst_basereg = args [0]->dreg;
5684 ins->inst_offset = 0;
5685 ins->sreg2 = args [1]->dreg;
5686 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5687 MONO_ADD_INS (cfg->cbb, ins);
5690 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5691 MonoInst *f2i = NULL, *i2f;
5692 guint32 opcode, f2i_opcode, i2f_opcode;
5693 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5694 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5696 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5697 fsig->params [0]->type == MONO_TYPE_R4) {
5698 opcode = OP_ATOMIC_EXCHANGE_I4;
5699 f2i_opcode = OP_MOVE_F_TO_I4;
5700 i2f_opcode = OP_MOVE_I4_TO_F;
5701 cfg->has_atomic_exchange_i4 = TRUE;
5703 #if SIZEOF_REGISTER == 8
5705 fsig->params [0]->type == MONO_TYPE_I8 ||
5706 fsig->params [0]->type == MONO_TYPE_R8 ||
5707 fsig->params [0]->type == MONO_TYPE_I) {
5708 opcode = OP_ATOMIC_EXCHANGE_I8;
5709 f2i_opcode = OP_MOVE_F_TO_I8;
5710 i2f_opcode = OP_MOVE_I8_TO_F;
5713 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5714 opcode = OP_ATOMIC_EXCHANGE_I4;
5715 cfg->has_atomic_exchange_i4 = TRUE;
5721 if (!mono_arch_opcode_supported (opcode))
5725 /* TODO: Decompose these opcodes instead of bailing here. */
5726 if (COMPILE_SOFT_FLOAT (cfg))
5729 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5730 f2i->dreg = mono_alloc_ireg (cfg);
5731 f2i->sreg1 = args [1]->dreg;
5732 if (f2i_opcode == OP_MOVE_F_TO_I4)
5733 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5734 MONO_ADD_INS (cfg->cbb, f2i);
5737 MONO_INST_NEW (cfg, ins, opcode);
5738 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5739 ins->inst_basereg = args [0]->dreg;
5740 ins->inst_offset = 0;
5741 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5742 MONO_ADD_INS (cfg->cbb, ins);
5744 switch (fsig->params [0]->type) {
5746 ins->type = STACK_I4;
5749 ins->type = STACK_I8;
5752 #if SIZEOF_REGISTER == 8
5753 ins->type = STACK_I8;
5755 ins->type = STACK_I4;
5760 ins->type = STACK_R8;
5763 g_assert (mini_type_is_reference (fsig->params [0]));
5764 ins->type = STACK_OBJ;
5769 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5770 i2f->dreg = mono_alloc_freg (cfg);
5771 i2f->sreg1 = ins->dreg;
5772 i2f->type = STACK_R8;
5773 if (i2f_opcode == OP_MOVE_I4_TO_F)
5774 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5775 MONO_ADD_INS (cfg->cbb, i2f);
5780 if (cfg->gen_write_barriers && is_ref)
5781 emit_write_barrier (cfg, args [0], args [1]);
5783 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5784 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5785 guint32 opcode, f2i_opcode, i2f_opcode;
5786 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5787 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5789 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5790 fsig->params [1]->type == MONO_TYPE_R4) {
5791 opcode = OP_ATOMIC_CAS_I4;
5792 f2i_opcode = OP_MOVE_F_TO_I4;
5793 i2f_opcode = OP_MOVE_I4_TO_F;
5794 cfg->has_atomic_cas_i4 = TRUE;
5796 #if SIZEOF_REGISTER == 8
5798 fsig->params [1]->type == MONO_TYPE_I8 ||
5799 fsig->params [1]->type == MONO_TYPE_R8 ||
5800 fsig->params [1]->type == MONO_TYPE_I) {
5801 opcode = OP_ATOMIC_CAS_I8;
5802 f2i_opcode = OP_MOVE_F_TO_I8;
5803 i2f_opcode = OP_MOVE_I8_TO_F;
5806 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5807 opcode = OP_ATOMIC_CAS_I4;
5808 cfg->has_atomic_cas_i4 = TRUE;
5814 if (!mono_arch_opcode_supported (opcode))
5818 /* TODO: Decompose these opcodes instead of bailing here. */
5819 if (COMPILE_SOFT_FLOAT (cfg))
5822 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5823 f2i_new->dreg = mono_alloc_ireg (cfg);
5824 f2i_new->sreg1 = args [1]->dreg;
5825 if (f2i_opcode == OP_MOVE_F_TO_I4)
5826 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5827 MONO_ADD_INS (cfg->cbb, f2i_new);
5829 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5830 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5831 f2i_cmp->sreg1 = args [2]->dreg;
5832 if (f2i_opcode == OP_MOVE_F_TO_I4)
5833 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5834 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5837 MONO_INST_NEW (cfg, ins, opcode);
5838 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5839 ins->sreg1 = args [0]->dreg;
5840 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5841 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5842 MONO_ADD_INS (cfg->cbb, ins);
5844 switch (fsig->params [1]->type) {
5846 ins->type = STACK_I4;
5849 ins->type = STACK_I8;
5852 #if SIZEOF_REGISTER == 8
5853 ins->type = STACK_I8;
5855 ins->type = STACK_I4;
5859 ins->type = cfg->r4_stack_type;
5862 ins->type = STACK_R8;
5865 g_assert (mini_type_is_reference (fsig->params [1]));
5866 ins->type = STACK_OBJ;
5871 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5872 i2f->dreg = mono_alloc_freg (cfg);
5873 i2f->sreg1 = ins->dreg;
5874 i2f->type = STACK_R8;
5875 if (i2f_opcode == OP_MOVE_I4_TO_F)
5876 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5877 MONO_ADD_INS (cfg->cbb, i2f);
5882 if (cfg->gen_write_barriers && is_ref)
5883 emit_write_barrier (cfg, args [0], args [1]);
5885 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5886 fsig->params [1]->type == MONO_TYPE_I4) {
5887 MonoInst *cmp, *ceq;
5889 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5892 /* int32 r = CAS (location, value, comparand); */
5893 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5894 ins->dreg = alloc_ireg (cfg);
5895 ins->sreg1 = args [0]->dreg;
5896 ins->sreg2 = args [1]->dreg;
5897 ins->sreg3 = args [2]->dreg;
5898 ins->type = STACK_I4;
5899 MONO_ADD_INS (cfg->cbb, ins);
5901 /* bool result = r == comparand; */
5902 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5903 cmp->sreg1 = ins->dreg;
5904 cmp->sreg2 = args [2]->dreg;
5905 cmp->type = STACK_I4;
5906 MONO_ADD_INS (cfg->cbb, cmp);
5908 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5909 ceq->dreg = alloc_ireg (cfg);
5910 ceq->type = STACK_I4;
5911 MONO_ADD_INS (cfg->cbb, ceq);
5913 /* *success = result; */
5914 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5916 cfg->has_atomic_cas_i4 = TRUE;
5918 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5919 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5923 } else if (cmethod->klass->image == mono_defaults.corlib &&
5924 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5925 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5928 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5930 MonoType *t = fsig->params [0];
5932 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5934 g_assert (t->byref);
5935 /* t is a byref type, so the reference check is more complicated */
5936 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5937 if (t->type == MONO_TYPE_I1)
5938 opcode = OP_ATOMIC_LOAD_I1;
5939 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5940 opcode = OP_ATOMIC_LOAD_U1;
5941 else if (t->type == MONO_TYPE_I2)
5942 opcode = OP_ATOMIC_LOAD_I2;
5943 else if (t->type == MONO_TYPE_U2)
5944 opcode = OP_ATOMIC_LOAD_U2;
5945 else if (t->type == MONO_TYPE_I4)
5946 opcode = OP_ATOMIC_LOAD_I4;
5947 else if (t->type == MONO_TYPE_U4)
5948 opcode = OP_ATOMIC_LOAD_U4;
5949 else if (t->type == MONO_TYPE_R4)
5950 opcode = OP_ATOMIC_LOAD_R4;
5951 else if (t->type == MONO_TYPE_R8)
5952 opcode = OP_ATOMIC_LOAD_R8;
5953 #if SIZEOF_REGISTER == 8
5954 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5955 opcode = OP_ATOMIC_LOAD_I8;
5956 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5957 opcode = OP_ATOMIC_LOAD_U8;
5959 else if (t->type == MONO_TYPE_I)
5960 opcode = OP_ATOMIC_LOAD_I4;
5961 else if (is_ref || t->type == MONO_TYPE_U)
5962 opcode = OP_ATOMIC_LOAD_U4;
5966 if (!mono_arch_opcode_supported (opcode))
5969 MONO_INST_NEW (cfg, ins, opcode);
5970 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5971 ins->sreg1 = args [0]->dreg;
5972 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5973 MONO_ADD_INS (cfg->cbb, ins);
5976 case MONO_TYPE_BOOLEAN:
5983 ins->type = STACK_I4;
5987 ins->type = STACK_I8;
5991 #if SIZEOF_REGISTER == 8
5992 ins->type = STACK_I8;
5994 ins->type = STACK_I4;
5998 ins->type = cfg->r4_stack_type;
6001 ins->type = STACK_R8;
6005 ins->type = STACK_OBJ;
6011 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6013 MonoType *t = fsig->params [0];
6016 g_assert (t->byref);
6017 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6018 if (t->type == MONO_TYPE_I1)
6019 opcode = OP_ATOMIC_STORE_I1;
6020 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6021 opcode = OP_ATOMIC_STORE_U1;
6022 else if (t->type == MONO_TYPE_I2)
6023 opcode = OP_ATOMIC_STORE_I2;
6024 else if (t->type == MONO_TYPE_U2)
6025 opcode = OP_ATOMIC_STORE_U2;
6026 else if (t->type == MONO_TYPE_I4)
6027 opcode = OP_ATOMIC_STORE_I4;
6028 else if (t->type == MONO_TYPE_U4)
6029 opcode = OP_ATOMIC_STORE_U4;
6030 else if (t->type == MONO_TYPE_R4)
6031 opcode = OP_ATOMIC_STORE_R4;
6032 else if (t->type == MONO_TYPE_R8)
6033 opcode = OP_ATOMIC_STORE_R8;
6034 #if SIZEOF_REGISTER == 8
6035 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6036 opcode = OP_ATOMIC_STORE_I8;
6037 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6038 opcode = OP_ATOMIC_STORE_U8;
6040 else if (t->type == MONO_TYPE_I)
6041 opcode = OP_ATOMIC_STORE_I4;
6042 else if (is_ref || t->type == MONO_TYPE_U)
6043 opcode = OP_ATOMIC_STORE_U4;
6047 if (!mono_arch_opcode_supported (opcode))
6050 MONO_INST_NEW (cfg, ins, opcode);
6051 ins->dreg = args [0]->dreg;
6052 ins->sreg1 = args [1]->dreg;
6053 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6054 MONO_ADD_INS (cfg->cbb, ins);
6056 if (cfg->gen_write_barriers && is_ref)
6057 emit_write_barrier (cfg, args [0], args [1]);
6063 } else if (cmethod->klass->image == mono_defaults.corlib &&
6064 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6065 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6066 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6067 if (should_insert_brekpoint (cfg->method)) {
6068 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6070 MONO_INST_NEW (cfg, ins, OP_NOP);
6071 MONO_ADD_INS (cfg->cbb, ins);
6075 } else if (cmethod->klass->image == mono_defaults.corlib &&
6076 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6077 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6078 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6080 EMIT_NEW_ICONST (cfg, ins, 1);
6082 EMIT_NEW_ICONST (cfg, ins, 0);
6085 } else if (cmethod->klass->image == mono_defaults.corlib &&
6086 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6087 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6088 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6089 /* No stack walks are currently available, so implement this as an intrinsic */
6090 MonoInst *assembly_ins;
6092 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6093 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6096 } else if (cmethod->klass->image == mono_defaults.corlib &&
6097 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6098 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6099 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6100 /* No stack walks are currently available, so implement this as an intrinsic */
6101 MonoInst *method_ins;
6102 MonoMethod *declaring = cfg->method;
6104 /* This returns the declaring generic method */
6105 if (declaring->is_inflated)
6106 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6107 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6108 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6109 cfg->no_inline = TRUE;
6110 if (cfg->method != cfg->current_method)
6111 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6114 } else if (cmethod->klass == mono_defaults.math_class) {
6116 * There is general branchless code for Min/Max, but it does not work for
6118 * http://everything2.com/?node_id=1051618
6120 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6121 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6122 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6123 ins->dreg = alloc_preg (cfg);
6124 ins->type = STACK_I4;
6125 MONO_ADD_INS (cfg->cbb, ins);
6127 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6128 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6129 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6130 !strcmp (cmethod->klass->name, "Selector")) ||
6131 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6132 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6133 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6134 !strcmp (cmethod->klass->name, "Selector"))
6136 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6137 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6138 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6141 MonoJumpInfoToken *ji;
6144 if (args [0]->opcode == OP_GOT_ENTRY) {
6145 pi = (MonoInst *)args [0]->inst_p1;
6146 g_assert (pi->opcode == OP_PATCH_INFO);
6147 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6148 ji = (MonoJumpInfoToken *)pi->inst_p0;
6150 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6151 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6154 NULLIFY_INS (args [0]);
6156 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6157 return_val_if_nok (&cfg->error, NULL);
6159 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6160 ins->dreg = mono_alloc_ireg (cfg);
6163 MONO_ADD_INS (cfg->cbb, ins);
6168 #ifdef MONO_ARCH_SIMD_INTRINSICS
6169 if (cfg->opt & MONO_OPT_SIMD) {
6170 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6176 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6180 if (COMPILE_LLVM (cfg)) {
6181 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6186 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6190 * This entry point could be used later for arbitrary method
6193 inline static MonoInst*
6194 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6195 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6197 if (method->klass == mono_defaults.string_class) {
6198 /* managed string allocation support */
6199 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6200 MonoInst *iargs [2];
6201 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6202 MonoMethod *managed_alloc = NULL;
6204 g_assert (vtable); /*Should not fail since it System.String*/
6205 #ifndef MONO_CROSS_COMPILE
6206 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6210 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6211 iargs [1] = args [0];
6212 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6219 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6221 MonoInst *store, *temp;
6224 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6225 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6228 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6229 * would be different than the MonoInst's used to represent arguments, and
6230 * the ldelema implementation can't deal with that.
6231 * Solution: When ldelema is used on an inline argument, create a var for
6232 * it, emit ldelema on that var, and emit the saving code below in
6233 * inline_method () if needed.
6235 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6236 cfg->args [i] = temp;
6237 /* This uses cfg->args [i] which is set by the preceeding line */
6238 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6239 store->cil_code = sp [0]->cil_code;
6244 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6245 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6247 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6249 check_inline_called_method_name_limit (MonoMethod *called_method)
6252 static const char *limit = NULL;
6254 if (limit == NULL) {
6255 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6257 if (limit_string != NULL)
6258 limit = limit_string;
6263 if (limit [0] != '\0') {
6264 char *called_method_name = mono_method_full_name (called_method, TRUE);
6266 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6267 g_free (called_method_name);
6269 //return (strncmp_result <= 0);
6270 return (strncmp_result == 0);
6277 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6279 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6282 static const char *limit = NULL;
6284 if (limit == NULL) {
6285 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6286 if (limit_string != NULL) {
6287 limit = limit_string;
6293 if (limit [0] != '\0') {
6294 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6296 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6297 g_free (caller_method_name);
6299 //return (strncmp_result <= 0);
6300 return (strncmp_result == 0);
6308 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6310 static double r8_0 = 0.0;
6311 static float r4_0 = 0.0;
6315 rtype = mini_get_underlying_type (rtype);
6319 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6320 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6321 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6322 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6323 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6324 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6325 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6326 ins->type = STACK_R4;
6327 ins->inst_p0 = (void*)&r4_0;
6329 MONO_ADD_INS (cfg->cbb, ins);
6330 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6331 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6332 ins->type = STACK_R8;
6333 ins->inst_p0 = (void*)&r8_0;
6335 MONO_ADD_INS (cfg->cbb, ins);
6336 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6337 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6338 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6339 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6340 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6342 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6347 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6351 rtype = mini_get_underlying_type (rtype);
6355 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6356 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6357 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6358 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6359 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6360 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6361 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6362 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6363 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6364 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6365 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6366 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6367 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6368 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6370 emit_init_rvar (cfg, dreg, rtype);
6374 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6376 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6378 MonoInst *var = cfg->locals [local];
6379 if (COMPILE_SOFT_FLOAT (cfg)) {
6381 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6382 emit_init_rvar (cfg, reg, type);
6383 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6386 emit_init_rvar (cfg, var->dreg, type);
6388 emit_dummy_init_rvar (cfg, var->dreg, type);
6393 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6395 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6401 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6404 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6405 guchar *ip, guint real_offset, gboolean inline_always)
6408 MonoInst *ins, *rvar = NULL;
6409 MonoMethodHeader *cheader;
6410 MonoBasicBlock *ebblock, *sbblock;
6412 MonoMethod *prev_inlined_method;
6413 MonoInst **prev_locals, **prev_args;
6414 MonoType **prev_arg_types;
6415 guint prev_real_offset;
6416 GHashTable *prev_cbb_hash;
6417 MonoBasicBlock **prev_cil_offset_to_bb;
6418 MonoBasicBlock *prev_cbb;
6419 const unsigned char *prev_ip;
6420 unsigned char *prev_cil_start;
6421 guint32 prev_cil_offset_to_bb_len;
6422 MonoMethod *prev_current_method;
6423 MonoGenericContext *prev_generic_context;
6424 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6426 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6428 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6429 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6432 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6433 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6438 fsig = mono_method_signature (cmethod);
6440 if (cfg->verbose_level > 2)
6441 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6443 if (!cmethod->inline_info) {
6444 cfg->stat_inlineable_methods++;
6445 cmethod->inline_info = 1;
6448 /* allocate local variables */
6449 cheader = mono_method_get_header_checked (cmethod, &error);
6451 if (inline_always) {
6452 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6453 mono_error_move (&cfg->error, &error);
6455 mono_error_cleanup (&error);
6460 /*Must verify before creating locals as it can cause the JIT to assert.*/
6461 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6462 mono_metadata_free_mh (cheader);
6466 /* allocate space to store the return value */
6467 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6468 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6471 prev_locals = cfg->locals;
6472 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6473 for (i = 0; i < cheader->num_locals; ++i)
6474 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6476 /* allocate start and end blocks */
6477 /* This is needed so if the inline is aborted, we can clean up */
6478 NEW_BBLOCK (cfg, sbblock);
6479 sbblock->real_offset = real_offset;
6481 NEW_BBLOCK (cfg, ebblock);
6482 ebblock->block_num = cfg->num_bblocks++;
6483 ebblock->real_offset = real_offset;
6485 prev_args = cfg->args;
6486 prev_arg_types = cfg->arg_types;
6487 prev_inlined_method = cfg->inlined_method;
6488 cfg->inlined_method = cmethod;
6489 cfg->ret_var_set = FALSE;
6490 cfg->inline_depth ++;
6491 prev_real_offset = cfg->real_offset;
6492 prev_cbb_hash = cfg->cbb_hash;
6493 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6494 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6495 prev_cil_start = cfg->cil_start;
6497 prev_cbb = cfg->cbb;
6498 prev_current_method = cfg->current_method;
6499 prev_generic_context = cfg->generic_context;
6500 prev_ret_var_set = cfg->ret_var_set;
6501 prev_disable_inline = cfg->disable_inline;
6503 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6506 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6508 ret_var_set = cfg->ret_var_set;
6510 cfg->inlined_method = prev_inlined_method;
6511 cfg->real_offset = prev_real_offset;
6512 cfg->cbb_hash = prev_cbb_hash;
6513 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6514 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6515 cfg->cil_start = prev_cil_start;
6517 cfg->locals = prev_locals;
6518 cfg->args = prev_args;
6519 cfg->arg_types = prev_arg_types;
6520 cfg->current_method = prev_current_method;
6521 cfg->generic_context = prev_generic_context;
6522 cfg->ret_var_set = prev_ret_var_set;
6523 cfg->disable_inline = prev_disable_inline;
6524 cfg->inline_depth --;
6526 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6527 if (cfg->verbose_level > 2)
6528 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6530 cfg->stat_inlined_methods++;
6532 /* always add some code to avoid block split failures */
6533 MONO_INST_NEW (cfg, ins, OP_NOP);
6534 MONO_ADD_INS (prev_cbb, ins);
6536 prev_cbb->next_bb = sbblock;
6537 link_bblock (cfg, prev_cbb, sbblock);
6540 * Get rid of the begin and end bblocks if possible to aid local
6543 if (prev_cbb->out_count == 1)
6544 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6546 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6547 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6549 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6550 MonoBasicBlock *prev = ebblock->in_bb [0];
6552 if (prev->next_bb == ebblock) {
6553 mono_merge_basic_blocks (cfg, prev, ebblock);
6555 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6556 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6557 cfg->cbb = prev_cbb;
6560 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6565 * Its possible that the rvar is set in some prev bblock, but not in others.
6571 for (i = 0; i < ebblock->in_count; ++i) {
6572 bb = ebblock->in_bb [i];
6574 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6577 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6587 * If the inlined method contains only a throw, then the ret var is not
6588 * set, so set it to a dummy value.
6591 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6593 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6596 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6599 if (cfg->verbose_level > 2)
6600 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6601 cfg->exception_type = MONO_EXCEPTION_NONE;
6603 /* This gets rid of the newly added bblocks */
6604 cfg->cbb = prev_cbb;
6606 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6611 * Some of these comments may well be out-of-date.
6612 * Design decisions: we do a single pass over the IL code (and we do bblock
6613 * splitting/merging in the few cases when it's required: a back jump to an IL
6614 * address that was not already seen as bblock starting point).
6615 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6616 * Complex operations are decomposed in simpler ones right away. We need to let the
6617 * arch-specific code peek and poke inside this process somehow (except when the
6618 * optimizations can take advantage of the full semantic info of coarse opcodes).
6619 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6620 * MonoInst->opcode initially is the IL opcode or some simplification of that
6621 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6622 * opcode with value bigger than OP_LAST.
6623 * At this point the IR can be handed over to an interpreter, a dumb code generator
6624 * or to the optimizing code generator that will translate it to SSA form.
6626 * Profiling directed optimizations.
6627 * We may compile by default with few or no optimizations and instrument the code
6628 * or the user may indicate what methods to optimize the most either in a config file
6629 * or through repeated runs where the compiler applies offline the optimizations to
6630 * each method and then decides if it was worth it.
6633 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6634 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6635 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6636 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6637 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6638 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6639 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6640 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6642 /* offset from br.s -> br like opcodes */
6643 #define BIG_BRANCH_OFFSET 13
6646 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6648 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6650 return b == NULL || b == bb;
6654 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6656 unsigned char *ip = start;
6657 unsigned char *target;
6660 MonoBasicBlock *bblock;
6661 const MonoOpcode *opcode;
6664 cli_addr = ip - start;
6665 i = mono_opcode_value ((const guint8 **)&ip, end);
6668 opcode = &mono_opcodes [i];
6669 switch (opcode->argument) {
6670 case MonoInlineNone:
6673 case MonoInlineString:
6674 case MonoInlineType:
6675 case MonoInlineField:
6676 case MonoInlineMethod:
6679 case MonoShortInlineR:
6686 case MonoShortInlineVar:
6687 case MonoShortInlineI:
6690 case MonoShortInlineBrTarget:
6691 target = start + cli_addr + 2 + (signed char)ip [1];
6692 GET_BBLOCK (cfg, bblock, target);
6695 GET_BBLOCK (cfg, bblock, ip);
6697 case MonoInlineBrTarget:
6698 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6699 GET_BBLOCK (cfg, bblock, target);
6702 GET_BBLOCK (cfg, bblock, ip);
6704 case MonoInlineSwitch: {
6705 guint32 n = read32 (ip + 1);
6708 cli_addr += 5 + 4 * n;
6709 target = start + cli_addr;
6710 GET_BBLOCK (cfg, bblock, target);
6712 for (j = 0; j < n; ++j) {
6713 target = start + cli_addr + (gint32)read32 (ip);
6714 GET_BBLOCK (cfg, bblock, target);
6724 g_assert_not_reached ();
6727 if (i == CEE_THROW) {
6728 unsigned char *bb_start = ip - 1;
6730 /* Find the start of the bblock containing the throw */
6732 while ((bb_start >= start) && !bblock) {
6733 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6737 bblock->out_of_line = 1;
6747 static inline MonoMethod *
6748 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6754 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6755 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6757 method = mono_class_inflate_generic_method_checked (method, context, error);
6760 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6766 static inline MonoMethod *
6767 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6770 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6772 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6773 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6777 if (!method && !cfg)
6778 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6784 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6789 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6790 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6792 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6793 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6796 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6797 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6800 mono_class_init (klass);
6804 static inline MonoMethodSignature*
6805 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6807 MonoMethodSignature *fsig;
6810 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6811 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6813 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6814 return_val_if_nok (error, NULL);
6817 fsig = mono_inflate_generic_signature(fsig, context, error);
6823 throw_exception (void)
6825 static MonoMethod *method = NULL;
6828 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6829 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6836 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6838 MonoMethod *thrower = throw_exception ();
6841 EMIT_NEW_PCONST (cfg, args [0], ex);
6842 mono_emit_method_call (cfg, thrower, args, NULL);
6846 * Return the original method is a wrapper is specified. We can only access
6847 * the custom attributes from the original method.
6850 get_original_method (MonoMethod *method)
6852 if (method->wrapper_type == MONO_WRAPPER_NONE)
6855 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6856 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6859 /* in other cases we need to find the original method */
6860 return mono_marshal_method_from_wrapper (method);
6864 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6866 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6867 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6869 emit_throw_exception (cfg, ex);
6873 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6875 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6876 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6878 emit_throw_exception (cfg, ex);
6882 * Check that the IL instructions at ip are the array initialization
6883 * sequence and return the pointer to the data and the size.
6886 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6889 * newarr[System.Int32]
6891 * ldtoken field valuetype ...
6892 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6894 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6896 guint32 token = read32 (ip + 7);
6897 guint32 field_token = read32 (ip + 2);
6898 guint32 field_index = field_token & 0xffffff;
6900 const char *data_ptr;
6902 MonoMethod *cmethod;
6903 MonoClass *dummy_class;
6904 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6908 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6912 *out_field_token = field_token;
6914 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6917 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6919 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6923 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6924 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6941 if (size > mono_type_size (field->type, &dummy_align))
6944 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6945 if (!image_is_dynamic (method->klass->image)) {
6946 field_index = read32 (ip + 2) & 0xffffff;
6947 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6948 data_ptr = mono_image_rva_map (method->klass->image, rva);
6949 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6950 /* for aot code we do the lookup on load */
6951 if (aot && data_ptr)
6952 return (const char *)GUINT_TO_POINTER (rva);
6954 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6956 data_ptr = mono_field_get_data (field);
6964 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6967 char *method_fname = mono_method_full_name (method, TRUE);
6969 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6972 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6973 mono_error_cleanup (&error);
6974 } else if (header->code_size == 0)
6975 method_code = g_strdup ("method body is empty.");
6977 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6978 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6979 g_free (method_fname);
6980 g_free (method_code);
6981 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6985 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6988 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6989 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6990 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6991 /* Optimize reg-reg moves away */
6993 * Can't optimize other opcodes, since sp[0] might point to
6994 * the last ins of a decomposed opcode.
6996 sp [0]->dreg = (cfg)->locals [n]->dreg;
6998 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7003 * ldloca inhibits many optimizations so try to get rid of it in common
7006 static inline unsigned char *
7007 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7017 local = read16 (ip + 2);
7021 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7022 /* From the INITOBJ case */
7023 token = read32 (ip + 2);
7024 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7025 CHECK_TYPELOAD (klass);
7026 type = mini_get_underlying_type (&klass->byval_arg);
7027 emit_init_local (cfg, local, type, TRUE);
7035 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7037 MonoInst *icall_args [16];
7038 MonoInst *call_target, *ins, *vtable_ins;
7039 int arg_reg, this_reg, vtable_reg;
7040 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7041 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7042 gboolean variant_iface = FALSE;
7045 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7048 * In llvm-only mode, vtables contain function descriptors instead of
7049 * method addresses/trampolines.
7051 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7054 slot = mono_method_get_imt_slot (cmethod);
7056 slot = mono_method_get_vtable_index (cmethod);
7058 this_reg = sp [0]->dreg;
7060 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7061 variant_iface = TRUE;
7063 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7065 * The simplest case, a normal virtual call.
7067 int slot_reg = alloc_preg (cfg);
7068 int addr_reg = alloc_preg (cfg);
7069 int arg_reg = alloc_preg (cfg);
7070 MonoBasicBlock *non_null_bb;
7072 vtable_reg = alloc_preg (cfg);
7073 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7074 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7076 /* Load the vtable slot, which contains a function descriptor. */
7077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7079 NEW_BBLOCK (cfg, non_null_bb);
7081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7082 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7083 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7086 // FIXME: Make the wrapper use the preserveall cconv
7087 // FIXME: Use one icall per slot for small slot numbers ?
7088 icall_args [0] = vtable_ins;
7089 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7090 /* Make the icall return the vtable slot value to save some code space */
7091 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7092 ins->dreg = slot_reg;
7093 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7096 MONO_START_BB (cfg, non_null_bb);
7097 /* Load the address + arg from the vtable slot */
7098 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7099 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7101 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7104 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7106 * A simple interface call
7108 * We make a call through an imt slot to obtain the function descriptor we need to call.
7109 * The imt slot contains a function descriptor for a runtime function + arg.
7111 int slot_reg = alloc_preg (cfg);
7112 int addr_reg = alloc_preg (cfg);
7113 int arg_reg = alloc_preg (cfg);
7114 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7116 vtable_reg = alloc_preg (cfg);
7117 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7118 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7121 * The slot is already initialized when the vtable is created so there is no need
7125 /* Load the imt slot, which contains a function descriptor. */
7126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7128 /* Load the address + arg of the imt thunk from the imt slot */
7129 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7130 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7132 * IMT thunks in llvm-only mode are C functions which take an info argument
7133 * plus the imt method and return the ftndesc to call.
7135 icall_args [0] = thunk_arg_ins;
7136 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7137 cmethod, MONO_RGCTX_INFO_METHOD);
7138 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7140 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7143 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7145 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7146 * dynamically extended as more instantiations are discovered.
7147 * This handles generic virtual methods both on classes and interfaces.
7149 int slot_reg = alloc_preg (cfg);
7150 int addr_reg = alloc_preg (cfg);
7151 int arg_reg = alloc_preg (cfg);
7152 int ftndesc_reg = alloc_preg (cfg);
7153 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7154 MonoBasicBlock *slowpath_bb, *end_bb;
7156 NEW_BBLOCK (cfg, slowpath_bb);
7157 NEW_BBLOCK (cfg, end_bb);
7159 vtable_reg = alloc_preg (cfg);
7160 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7162 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7164 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7166 /* Load the slot, which contains a function descriptor. */
7167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7169 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7170 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7175 /* Same as with iface calls */
7176 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7177 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7178 icall_args [0] = thunk_arg_ins;
7179 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7180 cmethod, MONO_RGCTX_INFO_METHOD);
7181 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7182 ftndesc_ins->dreg = ftndesc_reg;
7184 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7185 * they don't know about yet. Fall back to the slowpath in that case.
7187 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7188 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7193 MONO_START_BB (cfg, slowpath_bb);
7194 icall_args [0] = vtable_ins;
7195 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7196 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7197 cmethod, MONO_RGCTX_INFO_METHOD);
7199 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7201 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7202 ftndesc_ins->dreg = ftndesc_reg;
7203 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7206 MONO_START_BB (cfg, end_bb);
7207 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7211 * Non-optimized cases
7213 icall_args [0] = sp [0];
7214 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7216 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7217 cmethod, MONO_RGCTX_INFO_METHOD);
7219 arg_reg = alloc_preg (cfg);
7220 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7221 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7223 g_assert (is_gsharedvt);
7225 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7227 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7230 * Pass the extra argument even if the callee doesn't receive it, most
7231 * calling conventions allow this.
7233 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7237 is_exception_class (MonoClass *klass)
7240 if (klass == mono_defaults.exception_class)
7242 klass = klass->parent;
7248 * is_jit_optimizer_disabled:
7250 * Determine whenever M's assembly has a DebuggableAttribute with the
7251 * IsJITOptimizerDisabled flag set.
7254 is_jit_optimizer_disabled (MonoMethod *m)
7257 MonoAssembly *ass = m->klass->image->assembly;
7258 MonoCustomAttrInfo* attrs;
7261 gboolean val = FALSE;
7264 if (ass->jit_optimizer_disabled_inited)
7265 return ass->jit_optimizer_disabled;
7267 klass = mono_class_try_get_debuggable_attribute_class ();
7271 ass->jit_optimizer_disabled = FALSE;
7272 mono_memory_barrier ();
7273 ass->jit_optimizer_disabled_inited = TRUE;
7277 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7278 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7280 for (i = 0; i < attrs->num_attrs; ++i) {
7281 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7283 MonoMethodSignature *sig;
7285 if (!attr->ctor || attr->ctor->klass != klass)
7287 /* Decode the attribute. See reflection.c */
7288 p = (const char*)attr->data;
7289 g_assert (read16 (p) == 0x0001);
7292 // FIXME: Support named parameters
7293 sig = mono_method_signature (attr->ctor);
7294 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7296 /* Two boolean arguments */
7300 mono_custom_attrs_free (attrs);
7303 ass->jit_optimizer_disabled = val;
7304 mono_memory_barrier ();
7305 ass->jit_optimizer_disabled_inited = TRUE;
7311 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7313 gboolean supported_tail_call;
7316 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7318 for (i = 0; i < fsig->param_count; ++i) {
7319 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7320 /* These can point to the current method's stack */
7321 supported_tail_call = FALSE;
7323 if (fsig->hasthis && cmethod->klass->valuetype)
7324 /* this might point to the current method's stack */
7325 supported_tail_call = FALSE;
7326 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7327 supported_tail_call = FALSE;
7328 if (cfg->method->save_lmf)
7329 supported_tail_call = FALSE;
7330 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7331 supported_tail_call = FALSE;
7332 if (call_opcode != CEE_CALL)
7333 supported_tail_call = FALSE;
7335 /* Debugging support */
7337 if (supported_tail_call) {
7338 if (!mono_debug_count ())
7339 supported_tail_call = FALSE;
7343 return supported_tail_call;
7349 * Handle calls made to ctors from NEWOBJ opcodes.
7352 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7353 MonoInst **sp, guint8 *ip, int *inline_costs)
7355 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7357 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7358 mono_method_is_generic_sharable (cmethod, TRUE)) {
7359 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7360 mono_class_vtable (cfg->domain, cmethod->klass);
7361 CHECK_TYPELOAD (cmethod->klass);
7363 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7364 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7367 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7368 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7370 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7372 CHECK_TYPELOAD (cmethod->klass);
7373 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7378 /* Avoid virtual calls to ctors if possible */
7379 if (mono_class_is_marshalbyref (cmethod->klass))
7380 callvirt_this_arg = sp [0];
7382 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7383 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7384 CHECK_CFG_EXCEPTION;
7385 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7386 mono_method_check_inlining (cfg, cmethod) &&
7387 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7390 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7391 cfg->real_offset += 5;
7393 *inline_costs += costs - 5;
7395 INLINE_FAILURE ("inline failure");
7396 // FIXME-VT: Clean this up
7397 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7398 GSHAREDVT_FAILURE(*ip);
7399 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7401 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7404 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7406 if (cfg->llvm_only) {
7407 // FIXME: Avoid initializing vtable_arg
7408 emit_llvmonly_calli (cfg, fsig, sp, addr);
7410 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7412 } else if (context_used &&
7413 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7414 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7415 MonoInst *cmethod_addr;
7417 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7419 if (cfg->llvm_only) {
7420 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7421 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7422 emit_llvmonly_calli (cfg, fsig, sp, addr);
7424 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7425 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7427 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7430 INLINE_FAILURE ("ctor call");
7431 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7432 callvirt_this_arg, NULL, vtable_arg);
7439 emit_setret (MonoCompile *cfg, MonoInst *val)
7441 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7444 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7447 if (!cfg->vret_addr) {
7448 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7450 EMIT_NEW_RETLOADA (cfg, ret_addr);
7452 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7453 ins->klass = mono_class_from_mono_type (ret_type);
7456 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7457 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7458 MonoInst *iargs [1];
7462 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7463 mono_arch_emit_setret (cfg, cfg->method, conv);
7465 mono_arch_emit_setret (cfg, cfg->method, val);
7468 mono_arch_emit_setret (cfg, cfg->method, val);
7474 * mono_method_to_ir:
7476 * Translate the .net IL into linear IR.
7478 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7479 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7480 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7481 * @inline_args: if not NULL, contains the arguments to the inline call
7482 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7483 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7485 * This method is used to turn ECMA IL into Mono's internal Linear IR
7486 * reprensetation. It is used both for entire methods, as well as
7487 * inlining existing methods. In the former case, the @start_bblock,
7488 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7489 * inline_offset is set to zero.
7491 * Returns: the inline cost, or -1 if there was an error processing this method.
7494 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7495 MonoInst *return_var, MonoInst **inline_args,
7496 guint inline_offset, gboolean is_virtual_call)
7499 MonoInst *ins, **sp, **stack_start;
7500 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7501 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7502 MonoMethod *cmethod, *method_definition;
7503 MonoInst **arg_array;
7504 MonoMethodHeader *header;
7506 guint32 token, ins_flag;
7508 MonoClass *constrained_class = NULL;
7509 unsigned char *ip, *end, *target, *err_pos;
7510 MonoMethodSignature *sig;
7511 MonoGenericContext *generic_context = NULL;
7512 MonoGenericContainer *generic_container = NULL;
7513 MonoType **param_types;
7514 int i, n, start_new_bblock, dreg;
7515 int num_calls = 0, inline_costs = 0;
7516 int breakpoint_id = 0;
7518 GSList *class_inits = NULL;
7519 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7521 gboolean init_locals, seq_points, skip_dead_blocks;
7522 gboolean sym_seq_points = FALSE;
7523 MonoDebugMethodInfo *minfo;
7524 MonoBitSet *seq_point_locs = NULL;
7525 MonoBitSet *seq_point_set_locs = NULL;
7527 cfg->disable_inline = is_jit_optimizer_disabled (method);
7529 /* serialization and xdomain stuff may need access to private fields and methods */
7530 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7531 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7532 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7533 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7534 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7535 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7537 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7538 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7539 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7540 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7541 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7543 image = method->klass->image;
7544 header = mono_method_get_header_checked (method, &cfg->error);
7546 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7547 goto exception_exit;
7549 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7552 generic_container = mono_method_get_generic_container (method);
7553 sig = mono_method_signature (method);
7554 num_args = sig->hasthis + sig->param_count;
7555 ip = (unsigned char*)header->code;
7556 cfg->cil_start = ip;
7557 end = ip + header->code_size;
7558 cfg->stat_cil_code_size += header->code_size;
7560 seq_points = cfg->gen_seq_points && cfg->method == method;
7562 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7563 /* We could hit a seq point before attaching to the JIT (#8338) */
7567 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7568 minfo = mono_debug_lookup_method (method);
7570 MonoSymSeqPoint *sps;
7571 int i, n_il_offsets;
7573 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7574 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7575 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7576 sym_seq_points = TRUE;
7577 for (i = 0; i < n_il_offsets; ++i) {
7578 if (sps [i].il_offset < header->code_size)
7579 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7583 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7585 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7587 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7588 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7590 mono_debug_free_method_async_debug_info (asyncMethod);
7592 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7593 /* Methods without line number info like auto-generated property accessors */
7594 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7595 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7596 sym_seq_points = TRUE;
7601 * Methods without init_locals set could cause asserts in various passes
7602 * (#497220). To work around this, we emit dummy initialization opcodes
7603 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7604 * on some platforms.
7606 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7607 init_locals = header->init_locals;
7611 method_definition = method;
7612 while (method_definition->is_inflated) {
7613 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7614 method_definition = imethod->declaring;
7617 /* SkipVerification is not allowed if core-clr is enabled */
7618 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7620 dont_verify_stloc = TRUE;
7623 if (sig->is_inflated)
7624 generic_context = mono_method_get_context (method);
7625 else if (generic_container)
7626 generic_context = &generic_container->context;
7627 cfg->generic_context = generic_context;
7630 g_assert (!sig->has_type_parameters);
7632 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7633 g_assert (method->is_inflated);
7634 g_assert (mono_method_get_context (method)->method_inst);
7636 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7637 g_assert (sig->generic_param_count);
7639 if (cfg->method == method) {
7640 cfg->real_offset = 0;
7642 cfg->real_offset = inline_offset;
7645 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7646 cfg->cil_offset_to_bb_len = header->code_size;
7648 cfg->current_method = method;
7650 if (cfg->verbose_level > 2)
7651 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7653 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7655 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7656 for (n = 0; n < sig->param_count; ++n)
7657 param_types [n + sig->hasthis] = sig->params [n];
7658 cfg->arg_types = param_types;
7660 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7661 if (cfg->method == method) {
7663 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7664 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7667 NEW_BBLOCK (cfg, start_bblock);
7668 cfg->bb_entry = start_bblock;
7669 start_bblock->cil_code = NULL;
7670 start_bblock->cil_length = 0;
7673 NEW_BBLOCK (cfg, end_bblock);
7674 cfg->bb_exit = end_bblock;
7675 end_bblock->cil_code = NULL;
7676 end_bblock->cil_length = 0;
7677 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7678 g_assert (cfg->num_bblocks == 2);
7680 arg_array = cfg->args;
7682 if (header->num_clauses) {
7683 cfg->spvars = g_hash_table_new (NULL, NULL);
7684 cfg->exvars = g_hash_table_new (NULL, NULL);
7686 /* handle exception clauses */
7687 for (i = 0; i < header->num_clauses; ++i) {
7688 MonoBasicBlock *try_bb;
7689 MonoExceptionClause *clause = &header->clauses [i];
7690 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7692 try_bb->real_offset = clause->try_offset;
7693 try_bb->try_start = TRUE;
7694 try_bb->region = ((i + 1) << 8) | clause->flags;
7695 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7696 tblock->real_offset = clause->handler_offset;
7697 tblock->flags |= BB_EXCEPTION_HANDLER;
7700 * Linking the try block with the EH block hinders inlining as we won't be able to
7701 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7703 if (COMPILE_LLVM (cfg))
7704 link_bblock (cfg, try_bb, tblock);
7706 if (*(ip + clause->handler_offset) == CEE_POP)
7707 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7709 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7710 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7711 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7712 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7713 MONO_ADD_INS (tblock, ins);
7715 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7716 /* finally clauses already have a seq point */
7717 /* seq points for filter clauses are emitted below */
7718 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7719 MONO_ADD_INS (tblock, ins);
7722 /* todo: is a fault block unsafe to optimize? */
7723 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7724 tblock->flags |= BB_EXCEPTION_UNSAFE;
7727 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7729 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7731 /* catch and filter blocks get the exception object on the stack */
7732 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7733 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7735 /* mostly like handle_stack_args (), but just sets the input args */
7736 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7737 tblock->in_scount = 1;
7738 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7739 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7743 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7744 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7745 if (!cfg->compile_llvm) {
7746 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7747 ins->dreg = tblock->in_stack [0]->dreg;
7748 MONO_ADD_INS (tblock, ins);
7751 MonoInst *dummy_use;
7754 * Add a dummy use for the exvar so its liveness info will be
7757 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7760 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7761 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7762 MONO_ADD_INS (tblock, ins);
7765 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7766 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7767 tblock->flags |= BB_EXCEPTION_HANDLER;
7768 tblock->real_offset = clause->data.filter_offset;
7769 tblock->in_scount = 1;
7770 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7771 /* The filter block shares the exvar with the handler block */
7772 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7773 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7774 MONO_ADD_INS (tblock, ins);
7778 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7779 clause->data.catch_class &&
7781 mono_class_check_context_used (clause->data.catch_class)) {
7783 * In shared generic code with catch
7784 * clauses containing type variables
7785 * the exception handling code has to
7786 * be able to get to the rgctx.
7787 * Therefore we have to make sure that
7788 * the vtable/mrgctx argument (for
7789 * static or generic methods) or the
7790 * "this" argument (for non-static
7791 * methods) are live.
7793 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7794 mini_method_get_context (method)->method_inst ||
7795 method->klass->valuetype) {
7796 mono_get_vtable_var (cfg);
7798 MonoInst *dummy_use;
7800 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7805 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7806 cfg->cbb = start_bblock;
7807 cfg->args = arg_array;
7808 mono_save_args (cfg, sig, inline_args);
7811 /* FIRST CODE BLOCK */
7812 NEW_BBLOCK (cfg, tblock);
7813 tblock->cil_code = ip;
7817 ADD_BBLOCK (cfg, tblock);
7819 if (cfg->method == method) {
7820 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7821 if (breakpoint_id) {
7822 MONO_INST_NEW (cfg, ins, OP_BREAK);
7823 MONO_ADD_INS (cfg->cbb, ins);
7827 /* we use a separate basic block for the initialization code */
7828 NEW_BBLOCK (cfg, init_localsbb);
7829 if (cfg->method == method)
7830 cfg->bb_init = init_localsbb;
7831 init_localsbb->real_offset = cfg->real_offset;
7832 start_bblock->next_bb = init_localsbb;
7833 init_localsbb->next_bb = cfg->cbb;
7834 link_bblock (cfg, start_bblock, init_localsbb);
7835 link_bblock (cfg, init_localsbb, cfg->cbb);
7837 cfg->cbb = init_localsbb;
7839 if (cfg->gsharedvt && cfg->method == method) {
7840 MonoGSharedVtMethodInfo *info;
7841 MonoInst *var, *locals_var;
7844 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7845 info->method = cfg->method;
7846 info->count_entries = 16;
7847 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7848 cfg->gsharedvt_info = info;
7850 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7851 /* prevent it from being register allocated */
7852 //var->flags |= MONO_INST_VOLATILE;
7853 cfg->gsharedvt_info_var = var;
7855 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7856 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7858 /* Allocate locals */
7859 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7860 /* prevent it from being register allocated */
7861 //locals_var->flags |= MONO_INST_VOLATILE;
7862 cfg->gsharedvt_locals_var = locals_var;
7864 dreg = alloc_ireg (cfg);
7865 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7867 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7868 ins->dreg = locals_var->dreg;
7870 MONO_ADD_INS (cfg->cbb, ins);
7871 cfg->gsharedvt_locals_var_ins = ins;
7873 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7876 ins->flags |= MONO_INST_INIT;
7880 if (mono_security_core_clr_enabled ()) {
7881 /* check if this is native code, e.g. an icall or a p/invoke */
7882 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7883 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7885 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7886 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7888 /* if this ia a native call then it can only be JITted from platform code */
7889 if ((icall || pinvk) && method->klass && method->klass->image) {
7890 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7891 MonoException *ex = icall ? mono_get_exception_security () :
7892 mono_get_exception_method_access ();
7893 emit_throw_exception (cfg, ex);
7900 CHECK_CFG_EXCEPTION;
7902 if (header->code_size == 0)
7905 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7910 if (cfg->method == method)
7911 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7913 for (n = 0; n < header->num_locals; ++n) {
7914 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7919 /* We force the vtable variable here for all shared methods
7920 for the possibility that they might show up in a stack
7921 trace where their exact instantiation is needed. */
7922 if (cfg->gshared && method == cfg->method) {
7923 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7924 mini_method_get_context (method)->method_inst ||
7925 method->klass->valuetype) {
7926 mono_get_vtable_var (cfg);
7928 /* FIXME: Is there a better way to do this?
7929 We need the variable live for the duration
7930 of the whole method. */
7931 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7935 /* add a check for this != NULL to inlined methods */
7936 if (is_virtual_call) {
7939 NEW_ARGLOAD (cfg, arg_ins, 0);
7940 MONO_ADD_INS (cfg->cbb, arg_ins);
7941 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7944 skip_dead_blocks = !dont_verify;
7945 if (skip_dead_blocks) {
7946 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7951 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7952 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7955 start_new_bblock = 0;
7957 if (cfg->method == method)
7958 cfg->real_offset = ip - header->code;
7960 cfg->real_offset = inline_offset;
7965 if (start_new_bblock) {
7966 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7967 if (start_new_bblock == 2) {
7968 g_assert (ip == tblock->cil_code);
7970 GET_BBLOCK (cfg, tblock, ip);
7972 cfg->cbb->next_bb = tblock;
7974 start_new_bblock = 0;
7975 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7976 if (cfg->verbose_level > 3)
7977 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7978 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7982 g_slist_free (class_inits);
7985 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7986 link_bblock (cfg, cfg->cbb, tblock);
7987 if (sp != stack_start) {
7988 handle_stack_args (cfg, stack_start, sp - stack_start);
7990 CHECK_UNVERIFIABLE (cfg);
7992 cfg->cbb->next_bb = tblock;
7994 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7995 if (cfg->verbose_level > 3)
7996 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7997 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8000 g_slist_free (class_inits);
8005 if (skip_dead_blocks) {
8006 int ip_offset = ip - header->code;
8008 if (ip_offset == bb->end)
8012 int op_size = mono_opcode_size (ip, end);
8013 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8015 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8017 if (ip_offset + op_size == bb->end) {
8018 MONO_INST_NEW (cfg, ins, OP_NOP);
8019 MONO_ADD_INS (cfg->cbb, ins);
8020 start_new_bblock = 1;
8028 * Sequence points are points where the debugger can place a breakpoint.
8029 * Currently, we generate these automatically at points where the IL
8032 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8034 * Make methods interruptable at the beginning, and at the targets of
8035 * backward branches.
8036 * Also, do this at the start of every bblock in methods with clauses too,
8037 * to be able to handle instructions with inprecise control flow like
8039 * Backward branches are handled at the end of method-to-ir ().
8041 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8042 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8044 /* Avoid sequence points on empty IL like .volatile */
8045 // FIXME: Enable this
8046 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8047 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8048 if ((sp != stack_start) && !sym_seq_point)
8049 ins->flags |= MONO_INST_NONEMPTY_STACK;
8050 MONO_ADD_INS (cfg->cbb, ins);
8053 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8056 cfg->cbb->real_offset = cfg->real_offset;
8058 if ((cfg->method == method) && cfg->coverage_info) {
8059 guint32 cil_offset = ip - header->code;
8060 cfg->coverage_info->data [cil_offset].cil_code = ip;
8062 /* TODO: Use an increment here */
8063 #if defined(TARGET_X86)
8064 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8065 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8067 MONO_ADD_INS (cfg->cbb, ins);
8069 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8070 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8074 if (cfg->verbose_level > 3)
8075 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8079 if (seq_points && !sym_seq_points && sp != stack_start) {
8081 * The C# compiler uses these nops to notify the JIT that it should
8082 * insert seq points.
8084 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8085 MONO_ADD_INS (cfg->cbb, ins);
8087 if (cfg->keep_cil_nops)
8088 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8090 MONO_INST_NEW (cfg, ins, OP_NOP);
8092 MONO_ADD_INS (cfg->cbb, ins);
8095 if (should_insert_brekpoint (cfg->method)) {
8096 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8098 MONO_INST_NEW (cfg, ins, OP_NOP);
8101 MONO_ADD_INS (cfg->cbb, ins);
8107 CHECK_STACK_OVF (1);
8108 n = (*ip)-CEE_LDARG_0;
8110 EMIT_NEW_ARGLOAD (cfg, ins, n);
8118 CHECK_STACK_OVF (1);
8119 n = (*ip)-CEE_LDLOC_0;
8121 EMIT_NEW_LOCLOAD (cfg, ins, n);
8130 n = (*ip)-CEE_STLOC_0;
8133 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8135 emit_stloc_ir (cfg, sp, header, n);
8142 CHECK_STACK_OVF (1);
8145 EMIT_NEW_ARGLOAD (cfg, ins, n);
8151 CHECK_STACK_OVF (1);
8154 NEW_ARGLOADA (cfg, ins, n);
8155 MONO_ADD_INS (cfg->cbb, ins);
8165 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8167 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8172 CHECK_STACK_OVF (1);
8175 EMIT_NEW_LOCLOAD (cfg, ins, n);
8179 case CEE_LDLOCA_S: {
8180 unsigned char *tmp_ip;
8182 CHECK_STACK_OVF (1);
8183 CHECK_LOCAL (ip [1]);
8185 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8191 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8200 CHECK_LOCAL (ip [1]);
8201 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8203 emit_stloc_ir (cfg, sp, header, ip [1]);
8208 CHECK_STACK_OVF (1);
8209 EMIT_NEW_PCONST (cfg, ins, NULL);
8210 ins->type = STACK_OBJ;
8215 CHECK_STACK_OVF (1);
8216 EMIT_NEW_ICONST (cfg, ins, -1);
8229 CHECK_STACK_OVF (1);
8230 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8236 CHECK_STACK_OVF (1);
8238 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8244 CHECK_STACK_OVF (1);
8245 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8251 CHECK_STACK_OVF (1);
8252 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8253 ins->type = STACK_I8;
8254 ins->dreg = alloc_dreg (cfg, STACK_I8);
8256 ins->inst_l = (gint64)read64 (ip);
8257 MONO_ADD_INS (cfg->cbb, ins);
8263 gboolean use_aotconst = FALSE;
8265 #ifdef TARGET_POWERPC
8266 /* FIXME: Clean this up */
8267 if (cfg->compile_aot)
8268 use_aotconst = TRUE;
8271 /* FIXME: we should really allocate this only late in the compilation process */
8272 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8274 CHECK_STACK_OVF (1);
8280 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8282 dreg = alloc_freg (cfg);
8283 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8284 ins->type = cfg->r4_stack_type;
8286 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8287 ins->type = cfg->r4_stack_type;
8288 ins->dreg = alloc_dreg (cfg, STACK_R8);
8290 MONO_ADD_INS (cfg->cbb, ins);
8300 gboolean use_aotconst = FALSE;
8302 #ifdef TARGET_POWERPC
8303 /* FIXME: Clean this up */
8304 if (cfg->compile_aot)
8305 use_aotconst = TRUE;
8308 /* FIXME: we should really allocate this only late in the compilation process */
8309 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8311 CHECK_STACK_OVF (1);
8317 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8319 dreg = alloc_freg (cfg);
8320 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8321 ins->type = STACK_R8;
8323 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8324 ins->type = STACK_R8;
8325 ins->dreg = alloc_dreg (cfg, STACK_R8);
8327 MONO_ADD_INS (cfg->cbb, ins);
8336 MonoInst *temp, *store;
8338 CHECK_STACK_OVF (1);
8342 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8343 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8345 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8348 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8361 if (sp [0]->type == STACK_R8)
8362 /* we need to pop the value from the x86 FP stack */
8363 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8368 MonoMethodSignature *fsig;
8371 INLINE_FAILURE ("jmp");
8372 GSHAREDVT_FAILURE (*ip);
8375 if (stack_start != sp)
8377 token = read32 (ip + 1);
8378 /* FIXME: check the signature matches */
8379 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8382 if (cfg->gshared && mono_method_check_context_used (cmethod))
8383 GENERIC_SHARING_FAILURE (CEE_JMP);
8385 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8387 fsig = mono_method_signature (cmethod);
8388 n = fsig->param_count + fsig->hasthis;
8389 if (cfg->llvm_only) {
8392 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8393 for (i = 0; i < n; ++i)
8394 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8395 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8397 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8398 * have to emit a normal return since llvm expects it.
8401 emit_setret (cfg, ins);
8402 MONO_INST_NEW (cfg, ins, OP_BR);
8403 ins->inst_target_bb = end_bblock;
8404 MONO_ADD_INS (cfg->cbb, ins);
8405 link_bblock (cfg, cfg->cbb, end_bblock);
8408 } else if (cfg->backend->have_op_tail_call) {
8409 /* Handle tail calls similarly to calls */
8412 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8413 call->method = cmethod;
8414 call->tail_call = TRUE;
8415 call->signature = mono_method_signature (cmethod);
8416 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8417 call->inst.inst_p0 = cmethod;
8418 for (i = 0; i < n; ++i)
8419 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8421 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8422 call->vret_var = cfg->vret_addr;
8424 mono_arch_emit_call (cfg, call);
8425 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8426 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8428 for (i = 0; i < num_args; ++i)
8429 /* Prevent arguments from being optimized away */
8430 arg_array [i]->flags |= MONO_INST_VOLATILE;
8432 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8433 ins = (MonoInst*)call;
8434 ins->inst_p0 = cmethod;
8435 MONO_ADD_INS (cfg->cbb, ins);
8439 start_new_bblock = 1;
8444 MonoMethodSignature *fsig;
8447 token = read32 (ip + 1);
8451 //GSHAREDVT_FAILURE (*ip);
8456 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8459 if (method->dynamic && fsig->pinvoke) {
8463 * This is a call through a function pointer using a pinvoke
8464 * signature. Have to create a wrapper and call that instead.
8465 * FIXME: This is very slow, need to create a wrapper at JIT time
8466 * instead based on the signature.
8468 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8469 EMIT_NEW_PCONST (cfg, args [1], fsig);
8471 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8474 n = fsig->param_count + fsig->hasthis;
8478 //g_assert (!virtual_ || fsig->hasthis);
8482 inline_costs += 10 * num_calls++;
8485 * Making generic calls out of gsharedvt methods.
8486 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8487 * patching gshared method addresses into a gsharedvt method.
8489 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8491 * We pass the address to the gsharedvt trampoline in the rgctx reg
8493 MonoInst *callee = addr;
8495 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8497 GSHAREDVT_FAILURE (*ip);
8501 GSHAREDVT_FAILURE (*ip);
8503 addr = emit_get_rgctx_sig (cfg, context_used,
8504 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8505 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8509 /* Prevent inlining of methods with indirect calls */
8510 INLINE_FAILURE ("indirect call");
8512 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8513 MonoJumpInfoType info_type;
8517 * Instead of emitting an indirect call, emit a direct call
8518 * with the contents of the aotconst as the patch info.
8520 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8521 info_type = (MonoJumpInfoType)addr->inst_c1;
8522 info_data = addr->inst_p0;
8524 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8525 info_data = addr->inst_right->inst_left;
8528 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8529 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8532 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8533 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8538 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8542 /* End of call, INS should contain the result of the call, if any */
8544 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8546 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8549 CHECK_CFG_EXCEPTION;
8553 constrained_class = NULL;
8557 case CEE_CALLVIRT: {
8558 MonoInst *addr = NULL;
8559 MonoMethodSignature *fsig = NULL;
8561 int virtual_ = *ip == CEE_CALLVIRT;
8562 gboolean pass_imt_from_rgctx = FALSE;
8563 MonoInst *imt_arg = NULL;
8564 MonoInst *keep_this_alive = NULL;
8565 gboolean pass_vtable = FALSE;
8566 gboolean pass_mrgctx = FALSE;
8567 MonoInst *vtable_arg = NULL;
8568 gboolean check_this = FALSE;
8569 gboolean supported_tail_call = FALSE;
8570 gboolean tail_call = FALSE;
8571 gboolean need_seq_point = FALSE;
8572 guint32 call_opcode = *ip;
8573 gboolean emit_widen = TRUE;
8574 gboolean push_res = TRUE;
8575 gboolean skip_ret = FALSE;
8576 gboolean delegate_invoke = FALSE;
8577 gboolean direct_icall = FALSE;
8578 gboolean constrained_partial_call = FALSE;
8579 MonoMethod *cil_method;
8582 token = read32 (ip + 1);
8586 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8589 cil_method = cmethod;
8591 if (constrained_class) {
8592 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8593 if (!mini_is_gsharedvt_klass (constrained_class)) {
8594 g_assert (!cmethod->klass->valuetype);
8595 if (!mini_type_is_reference (&constrained_class->byval_arg))
8596 constrained_partial_call = TRUE;
8600 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8601 if (cfg->verbose_level > 2)
8602 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8603 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8604 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8606 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8610 if (cfg->verbose_level > 2)
8611 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8613 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8615 * This is needed since get_method_constrained can't find
8616 * the method in klass representing a type var.
8617 * The type var is guaranteed to be a reference type in this
8620 if (!mini_is_gsharedvt_klass (constrained_class))
8621 g_assert (!cmethod->klass->valuetype);
8623 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8628 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8629 /* Use the corresponding method from the base type to avoid boxing */
8630 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8631 g_assert (base_type);
8632 constrained_class = mono_class_from_mono_type (base_type);
8633 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8638 if (!dont_verify && !cfg->skip_visibility) {
8639 MonoMethod *target_method = cil_method;
8640 if (method->is_inflated) {
8641 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8644 if (!mono_method_can_access_method (method_definition, target_method) &&
8645 !mono_method_can_access_method (method, cil_method))
8646 emit_method_access_failure (cfg, method, cil_method);
8649 if (mono_security_core_clr_enabled ())
8650 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8652 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8653 /* MS.NET seems to silently convert this to a callvirt */
8658 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8659 * converts to a callvirt.
8661 * tests/bug-515884.il is an example of this behavior
8663 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8664 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8665 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8669 if (!cmethod->klass->inited)
8670 if (!mono_class_init (cmethod->klass))
8671 TYPE_LOAD_ERROR (cmethod->klass);
8673 fsig = mono_method_signature (cmethod);
8676 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8677 mini_class_is_system_array (cmethod->klass)) {
8678 array_rank = cmethod->klass->rank;
8679 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8680 direct_icall = TRUE;
8681 } else if (fsig->pinvoke) {
8682 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8683 fsig = mono_method_signature (wrapper);
8684 } else if (constrained_class) {
8686 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8690 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8691 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8693 /* See code below */
8694 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8695 MonoBasicBlock *tbb;
8697 GET_BBLOCK (cfg, tbb, ip + 5);
8698 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8700 * We want to extend the try block to cover the call, but we can't do it if the
8701 * call is made directly since its followed by an exception check.
8703 direct_icall = FALSE;
8707 mono_save_token_info (cfg, image, token, cil_method);
8709 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8710 need_seq_point = TRUE;
8712 /* Don't support calls made using type arguments for now */
8714 if (cfg->gsharedvt) {
8715 if (mini_is_gsharedvt_signature (fsig))
8716 GSHAREDVT_FAILURE (*ip);
8720 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8721 g_assert_not_reached ();
8723 n = fsig->param_count + fsig->hasthis;
8725 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8729 g_assert (!mono_method_check_context_used (cmethod));
8733 //g_assert (!virtual_ || fsig->hasthis);
8738 * We have the `constrained.' prefix opcode.
8740 if (constrained_class) {
8741 if (mini_is_gsharedvt_klass (constrained_class)) {
8742 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8743 /* The 'Own method' case below */
8744 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8745 /* 'The type parameter is instantiated as a reference type' case below. */
8747 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8748 CHECK_CFG_EXCEPTION;
8754 if (constrained_partial_call) {
8755 gboolean need_box = TRUE;
8758 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8759 * called method is not known at compile time either. The called method could end up being
8760 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8761 * to box the receiver.
8762 * A simple solution would be to box always and make a normal virtual call, but that would
8763 * be bad performance wise.
8765 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8767 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8772 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8773 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8774 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8775 ins->klass = constrained_class;
8776 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8777 CHECK_CFG_EXCEPTION;
8778 } else if (need_box) {
8780 MonoBasicBlock *is_ref_bb, *end_bb;
8781 MonoInst *nonbox_call;
8784 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8786 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8787 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8789 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8791 NEW_BBLOCK (cfg, is_ref_bb);
8792 NEW_BBLOCK (cfg, end_bb);
8794 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8799 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8801 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8804 MONO_START_BB (cfg, is_ref_bb);
8805 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8806 ins->klass = constrained_class;
8807 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8808 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8812 MONO_START_BB (cfg, end_bb);
8815 nonbox_call->dreg = ins->dreg;
8818 g_assert (mono_class_is_interface (cmethod->klass));
8819 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8820 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8823 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8825 * The type parameter is instantiated as a valuetype,
8826 * but that type doesn't override the method we're
8827 * calling, so we need to box `this'.
8829 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8830 ins->klass = constrained_class;
8831 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8832 CHECK_CFG_EXCEPTION;
8833 } else if (!constrained_class->valuetype) {
8834 int dreg = alloc_ireg_ref (cfg);
8837 * The type parameter is instantiated as a reference
8838 * type. We have a managed pointer on the stack, so
8839 * we need to dereference it here.
8841 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8842 ins->type = STACK_OBJ;
8845 if (cmethod->klass->valuetype) {
8848 /* Interface method */
8851 mono_class_setup_vtable (constrained_class);
8852 CHECK_TYPELOAD (constrained_class);
8853 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8855 TYPE_LOAD_ERROR (constrained_class);
8856 slot = mono_method_get_vtable_slot (cmethod);
8858 TYPE_LOAD_ERROR (cmethod->klass);
8859 cmethod = constrained_class->vtable [ioffset + slot];
8861 if (cmethod->klass == mono_defaults.enum_class) {
8862 /* Enum implements some interfaces, so treat this as the first case */
8863 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8864 ins->klass = constrained_class;
8865 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8866 CHECK_CFG_EXCEPTION;
8871 constrained_class = NULL;
8874 if (check_call_signature (cfg, fsig, sp))
8877 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8878 delegate_invoke = TRUE;
8880 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8881 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8882 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8890 * If the callee is a shared method, then its static cctor
8891 * might not get called after the call was patched.
8893 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8894 emit_class_init (cfg, cmethod->klass);
8895 CHECK_TYPELOAD (cmethod->klass);
8898 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8901 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8903 context_used = mini_method_check_context_used (cfg, cmethod);
8905 if (context_used && mono_class_is_interface (cmethod->klass)) {
8906 /* Generic method interface
8907 calls are resolved via a
8908 helper function and don't
8910 if (!cmethod_context || !cmethod_context->method_inst)
8911 pass_imt_from_rgctx = TRUE;
8915 * If a shared method calls another
8916 * shared method then the caller must
8917 * have a generic sharing context
8918 * because the magic trampoline
8919 * requires it. FIXME: We shouldn't
8920 * have to force the vtable/mrgctx
8921 * variable here. Instead there
8922 * should be a flag in the cfg to
8923 * request a generic sharing context.
8926 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8927 mono_get_vtable_var (cfg);
8932 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8934 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8936 CHECK_TYPELOAD (cmethod->klass);
8937 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8942 g_assert (!vtable_arg);
8944 if (!cfg->compile_aot) {
8946 * emit_get_rgctx_method () calls mono_class_vtable () so check
8947 * for type load errors before.
8949 mono_class_setup_vtable (cmethod->klass);
8950 CHECK_TYPELOAD (cmethod->klass);
8953 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8955 /* !marshalbyref is needed to properly handle generic methods + remoting */
8956 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8957 MONO_METHOD_IS_FINAL (cmethod)) &&
8958 !mono_class_is_marshalbyref (cmethod->klass)) {
8965 if (pass_imt_from_rgctx) {
8966 g_assert (!pass_vtable);
8968 imt_arg = emit_get_rgctx_method (cfg, context_used,
8969 cmethod, MONO_RGCTX_INFO_METHOD);
8973 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8975 /* Calling virtual generic methods */
8976 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8977 !(MONO_METHOD_IS_FINAL (cmethod) &&
8978 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8979 fsig->generic_param_count &&
8980 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8982 MonoInst *this_temp, *this_arg_temp, *store;
8983 MonoInst *iargs [4];
8985 g_assert (fsig->is_inflated);
8987 /* Prevent inlining of methods that contain indirect calls */
8988 INLINE_FAILURE ("virtual generic call");
8990 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8991 GSHAREDVT_FAILURE (*ip);
8993 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8994 g_assert (!imt_arg);
8996 g_assert (cmethod->is_inflated);
8997 imt_arg = emit_get_rgctx_method (cfg, context_used,
8998 cmethod, MONO_RGCTX_INFO_METHOD);
8999 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9001 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9002 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9003 MONO_ADD_INS (cfg->cbb, store);
9005 /* FIXME: This should be a managed pointer */
9006 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9008 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9009 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9010 cmethod, MONO_RGCTX_INFO_METHOD);
9011 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9012 addr = mono_emit_jit_icall (cfg,
9013 mono_helper_compile_generic_method, iargs);
9015 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9017 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9024 * Implement a workaround for the inherent races involved in locking:
9030 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9031 * try block, the Exit () won't be executed, see:
9032 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9033 * To work around this, we extend such try blocks to include the last x bytes
9034 * of the Monitor.Enter () call.
9036 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9037 MonoBasicBlock *tbb;
9039 GET_BBLOCK (cfg, tbb, ip + 5);
9041 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9042 * from Monitor.Enter like ArgumentNullException.
9044 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9045 /* Mark this bblock as needing to be extended */
9046 tbb->extend_try_block = TRUE;
9050 /* Conversion to a JIT intrinsic */
9051 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9052 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9053 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9061 if ((cfg->opt & MONO_OPT_INLINE) &&
9062 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9063 mono_method_check_inlining (cfg, cmethod)) {
9065 gboolean always = FALSE;
9067 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9068 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9069 /* Prevent inlining of methods that call wrappers */
9070 INLINE_FAILURE ("wrapper call");
9071 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9075 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9077 cfg->real_offset += 5;
9079 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9080 /* *sp is already set by inline_method */
9085 inline_costs += costs;
9091 /* Tail recursion elimination */
9092 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9093 gboolean has_vtargs = FALSE;
9096 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9097 INLINE_FAILURE ("tail call");
9099 /* keep it simple */
9100 for (i = fsig->param_count - 1; i >= 0; i--) {
9101 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9106 if (need_seq_point) {
9107 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9108 need_seq_point = FALSE;
9110 for (i = 0; i < n; ++i)
9111 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9112 MONO_INST_NEW (cfg, ins, OP_BR);
9113 MONO_ADD_INS (cfg->cbb, ins);
9114 tblock = start_bblock->out_bb [0];
9115 link_bblock (cfg, cfg->cbb, tblock);
9116 ins->inst_target_bb = tblock;
9117 start_new_bblock = 1;
9119 /* skip the CEE_RET, too */
9120 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9127 inline_costs += 10 * num_calls++;
9130 * Synchronized wrappers.
9131 * Its hard to determine where to replace a method with its synchronized
9132 * wrapper without causing an infinite recursion. The current solution is
9133 * to add the synchronized wrapper in the trampolines, and to
9134 * change the called method to a dummy wrapper, and resolve that wrapper
9135 * to the real method in mono_jit_compile_method ().
9137 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9138 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9139 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9140 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9144 * Making generic calls out of gsharedvt methods.
9145 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9146 * patching gshared method addresses into a gsharedvt method.
9148 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9149 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9150 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9151 MonoRgctxInfoType info_type;
9154 //if (mono_class_is_interface (cmethod->klass))
9155 //GSHAREDVT_FAILURE (*ip);
9156 // disable for possible remoting calls
9157 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9158 GSHAREDVT_FAILURE (*ip);
9159 if (fsig->generic_param_count) {
9160 /* virtual generic call */
9161 g_assert (!imt_arg);
9162 /* Same as the virtual generic case above */
9163 imt_arg = emit_get_rgctx_method (cfg, context_used,
9164 cmethod, MONO_RGCTX_INFO_METHOD);
9165 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9167 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9168 /* This can happen when we call a fully instantiated iface method */
9169 imt_arg = emit_get_rgctx_method (cfg, context_used,
9170 cmethod, MONO_RGCTX_INFO_METHOD);
9175 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9176 keep_this_alive = sp [0];
9178 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9179 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9181 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9182 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9184 if (cfg->llvm_only) {
9185 // FIXME: Avoid initializing vtable_arg
9186 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9188 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9193 /* Generic sharing */
9196 * Use this if the callee is gsharedvt sharable too, since
9197 * at runtime we might find an instantiation so the call cannot
9198 * be patched (the 'no_patch' code path in mini-trampolines.c).
9200 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9201 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9202 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9203 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9204 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9205 INLINE_FAILURE ("gshared");
9207 g_assert (cfg->gshared && cmethod);
9211 * We are compiling a call to a
9212 * generic method from shared code,
9213 * which means that we have to look up
9214 * the method in the rgctx and do an
9218 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9220 if (cfg->llvm_only) {
9221 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9222 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9224 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9225 // FIXME: Avoid initializing imt_arg/vtable_arg
9226 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9228 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9229 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9234 /* Direct calls to icalls */
9236 MonoMethod *wrapper;
9239 /* Inline the wrapper */
9240 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9242 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9243 g_assert (costs > 0);
9244 cfg->real_offset += 5;
9246 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9247 /* *sp is already set by inline_method */
9252 inline_costs += costs;
9261 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9262 MonoInst *val = sp [fsig->param_count];
9264 if (val->type == STACK_OBJ) {
9265 MonoInst *iargs [2];
9270 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9273 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9274 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9275 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9276 emit_write_barrier (cfg, addr, val);
9277 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9278 GSHAREDVT_FAILURE (*ip);
9279 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9280 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9282 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9283 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9284 if (!cmethod->klass->element_class->valuetype && !readonly)
9285 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9286 CHECK_TYPELOAD (cmethod->klass);
9289 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9292 g_assert_not_reached ();
9299 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9303 /* Tail prefix / tail call optimization */
9305 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9306 /* FIXME: runtime generic context pointer for jumps? */
9307 /* FIXME: handle this for generic sharing eventually */
9308 if ((ins_flag & MONO_INST_TAILCALL) &&
9309 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9310 supported_tail_call = TRUE;
9312 if (supported_tail_call) {
9315 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9316 INLINE_FAILURE ("tail call");
9318 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9320 if (cfg->backend->have_op_tail_call) {
9321 /* Handle tail calls similarly to normal calls */
9324 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9326 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9327 call->tail_call = TRUE;
9328 call->method = cmethod;
9329 call->signature = mono_method_signature (cmethod);
9332 * We implement tail calls by storing the actual arguments into the
9333 * argument variables, then emitting a CEE_JMP.
9335 for (i = 0; i < n; ++i) {
9336 /* Prevent argument from being register allocated */
9337 arg_array [i]->flags |= MONO_INST_VOLATILE;
9338 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9340 ins = (MonoInst*)call;
9341 ins->inst_p0 = cmethod;
9342 ins->inst_p1 = arg_array [0];
9343 MONO_ADD_INS (cfg->cbb, ins);
9344 link_bblock (cfg, cfg->cbb, end_bblock);
9345 start_new_bblock = 1;
9347 // FIXME: Eliminate unreachable epilogs
9350 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9351 * only reachable from this call.
9353 GET_BBLOCK (cfg, tblock, ip + 5);
9354 if (tblock == cfg->cbb || tblock->in_count == 0)
9363 * Virtual calls in llvm-only mode.
9365 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9366 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9371 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9372 INLINE_FAILURE ("call");
9373 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9374 imt_arg, vtable_arg);
9376 if (tail_call && !cfg->llvm_only) {
9377 link_bblock (cfg, cfg->cbb, end_bblock);
9378 start_new_bblock = 1;
9380 // FIXME: Eliminate unreachable epilogs
9383 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9384 * only reachable from this call.
9386 GET_BBLOCK (cfg, tblock, ip + 5);
9387 if (tblock == cfg->cbb || tblock->in_count == 0)
9394 /* End of call, INS should contain the result of the call, if any */
9396 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9399 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9404 if (keep_this_alive) {
9405 MonoInst *dummy_use;
9407 /* See mono_emit_method_call_full () */
9408 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9411 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9413 * Clang can convert these calls to tail calls which screw up the stack
9414 * walk. This happens even when the -fno-optimize-sibling-calls
9415 * option is passed to clang.
9416 * Work around this by emitting a dummy call.
9418 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9421 CHECK_CFG_EXCEPTION;
9425 g_assert (*ip == CEE_RET);
9429 constrained_class = NULL;
9431 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9435 if (cfg->method != method) {
9436 /* return from inlined method */
9438 * If in_count == 0, that means the ret is unreachable due to
9439 * being preceeded by a throw. In that case, inline_method () will
9440 * handle setting the return value
9441 * (test case: test_0_inline_throw ()).
9443 if (return_var && cfg->cbb->in_count) {
9444 MonoType *ret_type = mono_method_signature (method)->ret;
9450 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9453 //g_assert (returnvar != -1);
9454 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9455 cfg->ret_var_set = TRUE;
9458 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9460 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9464 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9466 if (seq_points && !sym_seq_points) {
9468 * Place a seq point here too even through the IL stack is not
9469 * empty, so a step over on
9472 * will work correctly.
9474 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9475 MONO_ADD_INS (cfg->cbb, ins);
9478 g_assert (!return_var);
9482 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9485 emit_setret (cfg, *sp);
9488 if (sp != stack_start)
9490 MONO_INST_NEW (cfg, ins, OP_BR);
9492 ins->inst_target_bb = end_bblock;
9493 MONO_ADD_INS (cfg->cbb, ins);
9494 link_bblock (cfg, cfg->cbb, end_bblock);
9495 start_new_bblock = 1;
9499 MONO_INST_NEW (cfg, ins, OP_BR);
9501 target = ip + 1 + (signed char)(*ip);
9503 GET_BBLOCK (cfg, tblock, target);
9504 link_bblock (cfg, cfg->cbb, tblock);
9505 ins->inst_target_bb = tblock;
9506 if (sp != stack_start) {
9507 handle_stack_args (cfg, stack_start, sp - stack_start);
9509 CHECK_UNVERIFIABLE (cfg);
9511 MONO_ADD_INS (cfg->cbb, ins);
9512 start_new_bblock = 1;
9513 inline_costs += BRANCH_COST;
9527 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9529 target = ip + 1 + *(signed char*)ip;
9535 inline_costs += BRANCH_COST;
9539 MONO_INST_NEW (cfg, ins, OP_BR);
9542 target = ip + 4 + (gint32)read32(ip);
9544 GET_BBLOCK (cfg, tblock, target);
9545 link_bblock (cfg, cfg->cbb, tblock);
9546 ins->inst_target_bb = tblock;
9547 if (sp != stack_start) {
9548 handle_stack_args (cfg, stack_start, sp - stack_start);
9550 CHECK_UNVERIFIABLE (cfg);
9553 MONO_ADD_INS (cfg->cbb, ins);
9555 start_new_bblock = 1;
9556 inline_costs += BRANCH_COST;
9563 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9564 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9565 guint32 opsize = is_short ? 1 : 4;
9567 CHECK_OPSIZE (opsize);
9569 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9572 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9577 GET_BBLOCK (cfg, tblock, target);
9578 link_bblock (cfg, cfg->cbb, tblock);
9579 GET_BBLOCK (cfg, tblock, ip);
9580 link_bblock (cfg, cfg->cbb, tblock);
9582 if (sp != stack_start) {
9583 handle_stack_args (cfg, stack_start, sp - stack_start);
9584 CHECK_UNVERIFIABLE (cfg);
9587 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9588 cmp->sreg1 = sp [0]->dreg;
9589 type_from_op (cfg, cmp, sp [0], NULL);
9592 #if SIZEOF_REGISTER == 4
9593 if (cmp->opcode == OP_LCOMPARE_IMM) {
9594 /* Convert it to OP_LCOMPARE */
9595 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9596 ins->type = STACK_I8;
9597 ins->dreg = alloc_dreg (cfg, STACK_I8);
9599 MONO_ADD_INS (cfg->cbb, ins);
9600 cmp->opcode = OP_LCOMPARE;
9601 cmp->sreg2 = ins->dreg;
9604 MONO_ADD_INS (cfg->cbb, cmp);
9606 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9607 type_from_op (cfg, ins, sp [0], NULL);
9608 MONO_ADD_INS (cfg->cbb, ins);
9609 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9610 GET_BBLOCK (cfg, tblock, target);
9611 ins->inst_true_bb = tblock;
9612 GET_BBLOCK (cfg, tblock, ip);
9613 ins->inst_false_bb = tblock;
9614 start_new_bblock = 2;
9617 inline_costs += BRANCH_COST;
9632 MONO_INST_NEW (cfg, ins, *ip);
9634 target = ip + 4 + (gint32)read32(ip);
9640 inline_costs += BRANCH_COST;
9644 MonoBasicBlock **targets;
9645 MonoBasicBlock *default_bblock;
9646 MonoJumpInfoBBTable *table;
9647 int offset_reg = alloc_preg (cfg);
9648 int target_reg = alloc_preg (cfg);
9649 int table_reg = alloc_preg (cfg);
9650 int sum_reg = alloc_preg (cfg);
9651 gboolean use_op_switch;
9655 n = read32 (ip + 1);
9658 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9662 CHECK_OPSIZE (n * sizeof (guint32));
9663 target = ip + n * sizeof (guint32);
9665 GET_BBLOCK (cfg, default_bblock, target);
9666 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9668 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9669 for (i = 0; i < n; ++i) {
9670 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9671 targets [i] = tblock;
9672 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9676 if (sp != stack_start) {
9678 * Link the current bb with the targets as well, so handle_stack_args
9679 * will set their in_stack correctly.
9681 link_bblock (cfg, cfg->cbb, default_bblock);
9682 for (i = 0; i < n; ++i)
9683 link_bblock (cfg, cfg->cbb, targets [i]);
9685 handle_stack_args (cfg, stack_start, sp - stack_start);
9687 CHECK_UNVERIFIABLE (cfg);
9689 /* Undo the links */
9690 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9691 for (i = 0; i < n; ++i)
9692 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9698 for (i = 0; i < n; ++i)
9699 link_bblock (cfg, cfg->cbb, targets [i]);
9701 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9702 table->table = targets;
9703 table->table_size = n;
9705 use_op_switch = FALSE;
9707 /* ARM implements SWITCH statements differently */
9708 /* FIXME: Make it use the generic implementation */
9709 if (!cfg->compile_aot)
9710 use_op_switch = TRUE;
9713 if (COMPILE_LLVM (cfg))
9714 use_op_switch = TRUE;
9716 cfg->cbb->has_jump_table = 1;
9718 if (use_op_switch) {
9719 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9720 ins->sreg1 = src1->dreg;
9721 ins->inst_p0 = table;
9722 ins->inst_many_bb = targets;
9723 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9724 MONO_ADD_INS (cfg->cbb, ins);
9726 if (sizeof (gpointer) == 8)
9727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9731 #if SIZEOF_REGISTER == 8
9732 /* The upper word might not be zero, and we add it to a 64 bit address later */
9733 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9736 if (cfg->compile_aot) {
9737 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9739 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9740 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9741 ins->inst_p0 = table;
9742 ins->dreg = table_reg;
9743 MONO_ADD_INS (cfg->cbb, ins);
9746 /* FIXME: Use load_memindex */
9747 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9749 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9751 start_new_bblock = 1;
9752 inline_costs += (BRANCH_COST * 2);
9772 dreg = alloc_freg (cfg);
9775 dreg = alloc_lreg (cfg);
9778 dreg = alloc_ireg_ref (cfg);
9781 dreg = alloc_preg (cfg);
9784 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9785 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9786 if (*ip == CEE_LDIND_R4)
9787 ins->type = cfg->r4_stack_type;
9788 ins->flags |= ins_flag;
9789 MONO_ADD_INS (cfg->cbb, ins);
9791 if (ins_flag & MONO_INST_VOLATILE) {
9792 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9809 if (ins_flag & MONO_INST_VOLATILE) {
9810 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9811 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9814 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9815 ins->flags |= ins_flag;
9818 MONO_ADD_INS (cfg->cbb, ins);
9820 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9821 emit_write_barrier (cfg, sp [0], sp [1]);
9830 MONO_INST_NEW (cfg, ins, (*ip));
9832 ins->sreg1 = sp [0]->dreg;
9833 ins->sreg2 = sp [1]->dreg;
9834 type_from_op (cfg, ins, sp [0], sp [1]);
9836 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9838 /* Use the immediate opcodes if possible */
9839 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9840 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9841 if (imm_opcode != -1) {
9842 ins->opcode = imm_opcode;
9843 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9846 NULLIFY_INS (sp [1]);
9850 MONO_ADD_INS ((cfg)->cbb, (ins));
9852 *sp++ = mono_decompose_opcode (cfg, ins);
9869 MONO_INST_NEW (cfg, ins, (*ip));
9871 ins->sreg1 = sp [0]->dreg;
9872 ins->sreg2 = sp [1]->dreg;
9873 type_from_op (cfg, ins, sp [0], sp [1]);
9875 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9876 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9878 /* FIXME: Pass opcode to is_inst_imm */
9880 /* Use the immediate opcodes if possible */
9881 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9882 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9883 if (imm_opcode != -1) {
9884 ins->opcode = imm_opcode;
9885 if (sp [1]->opcode == OP_I8CONST) {
9886 #if SIZEOF_REGISTER == 8
9887 ins->inst_imm = sp [1]->inst_l;
9889 ins->inst_ls_word = sp [1]->inst_ls_word;
9890 ins->inst_ms_word = sp [1]->inst_ms_word;
9894 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9897 /* Might be followed by an instruction added by add_widen_op */
9898 if (sp [1]->next == NULL)
9899 NULLIFY_INS (sp [1]);
9902 MONO_ADD_INS ((cfg)->cbb, (ins));
9904 *sp++ = mono_decompose_opcode (cfg, ins);
9917 case CEE_CONV_OVF_I8:
9918 case CEE_CONV_OVF_U8:
9922 /* Special case this earlier so we have long constants in the IR */
9923 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9924 int data = sp [-1]->inst_c0;
9925 sp [-1]->opcode = OP_I8CONST;
9926 sp [-1]->type = STACK_I8;
9927 #if SIZEOF_REGISTER == 8
9928 if ((*ip) == CEE_CONV_U8)
9929 sp [-1]->inst_c0 = (guint32)data;
9931 sp [-1]->inst_c0 = data;
9933 sp [-1]->inst_ls_word = data;
9934 if ((*ip) == CEE_CONV_U8)
9935 sp [-1]->inst_ms_word = 0;
9937 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9939 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9946 case CEE_CONV_OVF_I4:
9947 case CEE_CONV_OVF_I1:
9948 case CEE_CONV_OVF_I2:
9949 case CEE_CONV_OVF_I:
9950 case CEE_CONV_OVF_U:
9953 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9954 ADD_UNOP (CEE_CONV_OVF_I8);
9961 case CEE_CONV_OVF_U1:
9962 case CEE_CONV_OVF_U2:
9963 case CEE_CONV_OVF_U4:
9966 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9967 ADD_UNOP (CEE_CONV_OVF_U8);
9974 case CEE_CONV_OVF_I1_UN:
9975 case CEE_CONV_OVF_I2_UN:
9976 case CEE_CONV_OVF_I4_UN:
9977 case CEE_CONV_OVF_I8_UN:
9978 case CEE_CONV_OVF_U1_UN:
9979 case CEE_CONV_OVF_U2_UN:
9980 case CEE_CONV_OVF_U4_UN:
9981 case CEE_CONV_OVF_U8_UN:
9982 case CEE_CONV_OVF_I_UN:
9983 case CEE_CONV_OVF_U_UN:
9990 CHECK_CFG_EXCEPTION;
9994 case CEE_ADD_OVF_UN:
9996 case CEE_MUL_OVF_UN:
9998 case CEE_SUB_OVF_UN:
10004 GSHAREDVT_FAILURE (*ip);
10007 token = read32 (ip + 1);
10008 klass = mini_get_class (method, token, generic_context);
10009 CHECK_TYPELOAD (klass);
10011 if (generic_class_is_reference_type (cfg, klass)) {
10012 MonoInst *store, *load;
10013 int dreg = alloc_ireg_ref (cfg);
10015 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10016 load->flags |= ins_flag;
10017 MONO_ADD_INS (cfg->cbb, load);
10019 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10020 store->flags |= ins_flag;
10021 MONO_ADD_INS (cfg->cbb, store);
10023 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10024 emit_write_barrier (cfg, sp [0], sp [1]);
10026 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10032 int loc_index = -1;
10038 token = read32 (ip + 1);
10039 klass = mini_get_class (method, token, generic_context);
10040 CHECK_TYPELOAD (klass);
10042 /* Optimize the common ldobj+stloc combination */
10045 loc_index = ip [6];
10052 loc_index = ip [5] - CEE_STLOC_0;
10059 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10060 CHECK_LOCAL (loc_index);
10062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10063 ins->dreg = cfg->locals [loc_index]->dreg;
10064 ins->flags |= ins_flag;
10067 if (ins_flag & MONO_INST_VOLATILE) {
10068 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10069 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10075 /* Optimize the ldobj+stobj combination */
10076 /* The reference case ends up being a load+store anyway */
10077 /* Skip this if the operation is volatile. */
10078 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10083 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10090 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10091 ins->flags |= ins_flag;
10094 if (ins_flag & MONO_INST_VOLATILE) {
10095 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10096 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10105 CHECK_STACK_OVF (1);
10107 n = read32 (ip + 1);
10109 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10110 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10111 ins->type = STACK_OBJ;
10114 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10115 MonoInst *iargs [1];
10116 char *str = (char *)mono_method_get_wrapper_data (method, n);
10118 if (cfg->compile_aot)
10119 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10121 EMIT_NEW_PCONST (cfg, iargs [0], str);
10122 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10124 if (cfg->opt & MONO_OPT_SHARED) {
10125 MonoInst *iargs [3];
10127 if (cfg->compile_aot) {
10128 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10130 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10131 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10132 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10133 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10134 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10137 if (cfg->cbb->out_of_line) {
10138 MonoInst *iargs [2];
10140 if (image == mono_defaults.corlib) {
10142 * Avoid relocations in AOT and save some space by using a
10143 * version of helper_ldstr specialized to mscorlib.
10145 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10146 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10148 /* Avoid creating the string object */
10149 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10150 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10151 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10155 if (cfg->compile_aot) {
10156 NEW_LDSTRCONST (cfg, ins, image, n);
10158 MONO_ADD_INS (cfg->cbb, ins);
10161 NEW_PCONST (cfg, ins, NULL);
10162 ins->type = STACK_OBJ;
10163 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10167 OUT_OF_MEMORY_FAILURE;
10170 MONO_ADD_INS (cfg->cbb, ins);
10179 MonoInst *iargs [2];
10180 MonoMethodSignature *fsig;
10183 MonoInst *vtable_arg = NULL;
10186 token = read32 (ip + 1);
10187 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10190 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10193 mono_save_token_info (cfg, image, token, cmethod);
10195 if (!mono_class_init (cmethod->klass))
10196 TYPE_LOAD_ERROR (cmethod->klass);
10198 context_used = mini_method_check_context_used (cfg, cmethod);
10200 if (mono_security_core_clr_enabled ())
10201 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10203 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10204 emit_class_init (cfg, cmethod->klass);
10205 CHECK_TYPELOAD (cmethod->klass);
10209 if (cfg->gsharedvt) {
10210 if (mini_is_gsharedvt_variable_signature (sig))
10211 GSHAREDVT_FAILURE (*ip);
10215 n = fsig->param_count;
10219 * Generate smaller code for the common newobj <exception> instruction in
10220 * argument checking code.
10222 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10223 is_exception_class (cmethod->klass) && n <= 2 &&
10224 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10225 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10226 MonoInst *iargs [3];
10230 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10233 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10236 iargs [1] = sp [0];
10237 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10240 iargs [1] = sp [0];
10241 iargs [2] = sp [1];
10242 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10245 g_assert_not_reached ();
10253 /* move the args to allow room for 'this' in the first position */
10259 /* check_call_signature () requires sp[0] to be set */
10260 this_ins.type = STACK_OBJ;
10261 sp [0] = &this_ins;
10262 if (check_call_signature (cfg, fsig, sp))
10267 if (mini_class_is_system_array (cmethod->klass)) {
10268 *sp = emit_get_rgctx_method (cfg, context_used,
10269 cmethod, MONO_RGCTX_INFO_METHOD);
10271 /* Avoid varargs in the common case */
10272 if (fsig->param_count == 1)
10273 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10274 else if (fsig->param_count == 2)
10275 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10276 else if (fsig->param_count == 3)
10277 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10278 else if (fsig->param_count == 4)
10279 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10281 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10282 } else if (cmethod->string_ctor) {
10283 g_assert (!context_used);
10284 g_assert (!vtable_arg);
10285 /* we simply pass a null pointer */
10286 EMIT_NEW_PCONST (cfg, *sp, NULL);
10287 /* now call the string ctor */
10288 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10290 if (cmethod->klass->valuetype) {
10291 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10292 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10293 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10298 * The code generated by mini_emit_virtual_call () expects
10299 * iargs [0] to be a boxed instance, but luckily the vcall
10300 * will be transformed into a normal call there.
10302 } else if (context_used) {
10303 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10306 MonoVTable *vtable = NULL;
10308 if (!cfg->compile_aot)
10309 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10310 CHECK_TYPELOAD (cmethod->klass);
10313 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10314 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10315 * As a workaround, we call class cctors before allocating objects.
10317 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10318 emit_class_init (cfg, cmethod->klass);
10319 if (cfg->verbose_level > 2)
10320 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10321 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10324 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10327 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10330 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10332 /* Now call the actual ctor */
10333 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10334 CHECK_CFG_EXCEPTION;
10337 if (alloc == NULL) {
10339 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10340 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10348 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10349 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10352 case CEE_CASTCLASS:
10357 token = read32 (ip + 1);
10358 klass = mini_get_class (method, token, generic_context);
10359 CHECK_TYPELOAD (klass);
10360 if (sp [0]->type != STACK_OBJ)
10363 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10364 ins->dreg = alloc_preg (cfg);
10365 ins->sreg1 = (*sp)->dreg;
10366 ins->klass = klass;
10367 ins->type = STACK_OBJ;
10368 MONO_ADD_INS (cfg->cbb, ins);
10370 CHECK_CFG_EXCEPTION;
10374 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10377 case CEE_UNBOX_ANY: {
10378 MonoInst *res, *addr;
10383 token = read32 (ip + 1);
10384 klass = mini_get_class (method, token, generic_context);
10385 CHECK_TYPELOAD (klass);
10387 mono_save_token_info (cfg, image, token, klass);
10389 context_used = mini_class_check_context_used (cfg, klass);
10391 if (mini_is_gsharedvt_klass (klass)) {
10392 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10394 } else if (generic_class_is_reference_type (cfg, klass)) {
10395 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10396 EMIT_NEW_PCONST (cfg, res, NULL);
10397 res->type = STACK_OBJ;
10399 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10400 res->dreg = alloc_preg (cfg);
10401 res->sreg1 = (*sp)->dreg;
10402 res->klass = klass;
10403 res->type = STACK_OBJ;
10404 MONO_ADD_INS (cfg->cbb, res);
10405 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10407 } else if (mono_class_is_nullable (klass)) {
10408 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10410 addr = handle_unbox (cfg, klass, sp, context_used);
10412 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10423 MonoClass *enum_class;
10424 MonoMethod *has_flag;
10430 token = read32 (ip + 1);
10431 klass = mini_get_class (method, token, generic_context);
10432 CHECK_TYPELOAD (klass);
10434 mono_save_token_info (cfg, image, token, klass);
10436 context_used = mini_class_check_context_used (cfg, klass);
10438 if (generic_class_is_reference_type (cfg, klass)) {
10444 if (klass == mono_defaults.void_class)
10446 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10448 /* frequent check in generic code: box (struct), brtrue */
10453 * <push int/long ptr>
10456 * constrained. MyFlags
10457 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10459 * If we find this sequence and the operand types on box and constrained
10460 * are equal, we can emit a specialized instruction sequence instead of
10461 * the very slow HasFlag () call.
10463 if ((cfg->opt & MONO_OPT_INTRINS) &&
10464 /* Cheap checks first. */
10465 ip + 5 + 6 + 5 < end &&
10466 ip [5] == CEE_PREFIX1 &&
10467 ip [6] == CEE_CONSTRAINED_ &&
10468 ip [11] == CEE_CALLVIRT &&
10469 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10470 mono_class_is_enum (klass) &&
10471 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10472 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10473 has_flag->klass == mono_defaults.enum_class &&
10474 !strcmp (has_flag->name, "HasFlag") &&
10475 has_flag->signature->hasthis &&
10476 has_flag->signature->param_count == 1) {
10477 CHECK_TYPELOAD (enum_class);
10479 if (enum_class == klass) {
10480 MonoInst *enum_this, *enum_flag;
10485 enum_this = sp [0];
10486 enum_flag = sp [1];
10488 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10493 // FIXME: LLVM can't handle the inconsistent bb linking
10494 if (!mono_class_is_nullable (klass) &&
10495 !mini_is_gsharedvt_klass (klass) &&
10496 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10497 (ip [5] == CEE_BRTRUE ||
10498 ip [5] == CEE_BRTRUE_S ||
10499 ip [5] == CEE_BRFALSE ||
10500 ip [5] == CEE_BRFALSE_S)) {
10501 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10503 MonoBasicBlock *true_bb, *false_bb;
10507 if (cfg->verbose_level > 3) {
10508 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10509 printf ("<box+brtrue opt>\n");
10514 case CEE_BRFALSE_S:
10517 target = ip + 1 + (signed char)(*ip);
10524 target = ip + 4 + (gint)(read32 (ip));
10528 g_assert_not_reached ();
10532 * We need to link both bblocks, since it is needed for handling stack
10533 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10534 * Branching to only one of them would lead to inconsistencies, so
10535 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10537 GET_BBLOCK (cfg, true_bb, target);
10538 GET_BBLOCK (cfg, false_bb, ip);
10540 mono_link_bblock (cfg, cfg->cbb, true_bb);
10541 mono_link_bblock (cfg, cfg->cbb, false_bb);
10543 if (sp != stack_start) {
10544 handle_stack_args (cfg, stack_start, sp - stack_start);
10546 CHECK_UNVERIFIABLE (cfg);
10549 if (COMPILE_LLVM (cfg)) {
10550 dreg = alloc_ireg (cfg);
10551 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10554 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10556 /* The JIT can't eliminate the iconst+compare */
10557 MONO_INST_NEW (cfg, ins, OP_BR);
10558 ins->inst_target_bb = is_true ? true_bb : false_bb;
10559 MONO_ADD_INS (cfg->cbb, ins);
10562 start_new_bblock = 1;
10566 *sp++ = handle_box (cfg, val, klass, context_used);
10568 CHECK_CFG_EXCEPTION;
10577 token = read32 (ip + 1);
10578 klass = mini_get_class (method, token, generic_context);
10579 CHECK_TYPELOAD (klass);
10581 mono_save_token_info (cfg, image, token, klass);
10583 context_used = mini_class_check_context_used (cfg, klass);
10585 if (mono_class_is_nullable (klass)) {
10588 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10589 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10593 ins = handle_unbox (cfg, klass, sp, context_used);
10606 MonoClassField *field;
10607 #ifndef DISABLE_REMOTING
10611 gboolean is_instance;
10613 gpointer addr = NULL;
10614 gboolean is_special_static;
10616 MonoInst *store_val = NULL;
10617 MonoInst *thread_ins;
10620 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10622 if (op == CEE_STFLD) {
10625 store_val = sp [1];
10630 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10632 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10635 if (op == CEE_STSFLD) {
10638 store_val = sp [0];
10643 token = read32 (ip + 1);
10644 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10645 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10646 klass = field->parent;
10649 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10652 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10653 FIELD_ACCESS_FAILURE (method, field);
10654 mono_class_init (klass);
10656 /* if the class is Critical then transparent code cannot access it's fields */
10657 if (!is_instance && mono_security_core_clr_enabled ())
10658 ensure_method_is_allowed_to_access_field (cfg, method, field);
10660 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10661 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10662 if (mono_security_core_clr_enabled ())
10663 ensure_method_is_allowed_to_access_field (cfg, method, field);
10666 ftype = mono_field_get_type (field);
10669 * LDFLD etc. is usable on static fields as well, so convert those cases to
10672 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10684 g_assert_not_reached ();
10686 is_instance = FALSE;
10689 context_used = mini_class_check_context_used (cfg, klass);
10691 /* INSTANCE CASE */
10693 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10694 if (op == CEE_STFLD) {
10695 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10697 #ifndef DISABLE_REMOTING
10698 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10699 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10700 MonoInst *iargs [5];
10702 GSHAREDVT_FAILURE (op);
10704 iargs [0] = sp [0];
10705 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10706 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10707 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10709 iargs [4] = sp [1];
10711 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10712 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10713 iargs, ip, cfg->real_offset, TRUE);
10714 CHECK_CFG_EXCEPTION;
10715 g_assert (costs > 0);
10717 cfg->real_offset += 5;
10719 inline_costs += costs;
10721 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10726 MonoInst *store, *wbarrier_ptr_ins = NULL;
10728 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10730 if (ins_flag & MONO_INST_VOLATILE) {
10731 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10732 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10735 if (mini_is_gsharedvt_klass (klass)) {
10736 MonoInst *offset_ins;
10738 context_used = mini_class_check_context_used (cfg, klass);
10740 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10741 /* The value is offset by 1 */
10742 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10743 dreg = alloc_ireg_mp (cfg);
10744 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10745 wbarrier_ptr_ins = ins;
10746 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10747 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10749 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10751 if (sp [0]->opcode != OP_LDADDR)
10752 store->flags |= MONO_INST_FAULT;
10754 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10755 if (mini_is_gsharedvt_klass (klass)) {
10756 g_assert (wbarrier_ptr_ins);
10757 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10759 /* insert call to write barrier */
10763 dreg = alloc_ireg_mp (cfg);
10764 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10765 emit_write_barrier (cfg, ptr, sp [1]);
10769 store->flags |= ins_flag;
10776 #ifndef DISABLE_REMOTING
10777 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10778 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10779 MonoInst *iargs [4];
10781 GSHAREDVT_FAILURE (op);
10783 iargs [0] = sp [0];
10784 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10785 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10786 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10787 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10788 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10789 iargs, ip, cfg->real_offset, TRUE);
10790 CHECK_CFG_EXCEPTION;
10791 g_assert (costs > 0);
10793 cfg->real_offset += 5;
10797 inline_costs += costs;
10799 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10805 if (sp [0]->type == STACK_VTYPE) {
10808 /* Have to compute the address of the variable */
10810 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10812 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10814 g_assert (var->klass == klass);
10816 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10820 if (op == CEE_LDFLDA) {
10821 if (sp [0]->type == STACK_OBJ) {
10822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10823 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10826 dreg = alloc_ireg_mp (cfg);
10828 if (mini_is_gsharedvt_klass (klass)) {
10829 MonoInst *offset_ins;
10831 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10832 /* The value is offset by 1 */
10833 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10834 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10836 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10838 ins->klass = mono_class_from_mono_type (field->type);
10839 ins->type = STACK_MP;
10844 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10846 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10847 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10856 if (mini_is_gsharedvt_klass (klass)) {
10857 MonoInst *offset_ins;
10859 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10860 /* The value is offset by 1 */
10861 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10862 dreg = alloc_ireg_mp (cfg);
10863 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10864 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10866 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10868 load->flags |= ins_flag;
10869 if (sp [0]->opcode != OP_LDADDR)
10870 load->flags |= MONO_INST_FAULT;
10882 context_used = mini_class_check_context_used (cfg, klass);
10884 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10885 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10889 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10890 * to be called here.
10892 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10893 mono_class_vtable (cfg->domain, klass);
10894 CHECK_TYPELOAD (klass);
10896 mono_domain_lock (cfg->domain);
10897 if (cfg->domain->special_static_fields)
10898 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10899 mono_domain_unlock (cfg->domain);
10901 is_special_static = mono_class_field_is_special_static (field);
10903 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10904 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10908 /* Generate IR to compute the field address */
10909 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10911 * Fast access to TLS data
10912 * Inline version of get_thread_static_data () in
10916 int idx, static_data_reg, array_reg, dreg;
10918 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10919 GSHAREDVT_FAILURE (op);
10921 static_data_reg = alloc_ireg (cfg);
10922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10924 if (cfg->compile_aot) {
10925 int offset_reg, offset2_reg, idx_reg;
10927 /* For TLS variables, this will return the TLS offset */
10928 EMIT_NEW_SFLDACONST (cfg, ins, field);
10929 offset_reg = ins->dreg;
10930 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10931 idx_reg = alloc_ireg (cfg);
10932 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10934 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10935 array_reg = alloc_ireg (cfg);
10936 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10937 offset2_reg = alloc_ireg (cfg);
10938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10940 dreg = alloc_ireg (cfg);
10941 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10943 offset = (gsize)addr & 0x7fffffff;
10944 idx = offset & 0x3f;
10946 array_reg = alloc_ireg (cfg);
10947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10948 dreg = alloc_ireg (cfg);
10949 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10951 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10952 (cfg->compile_aot && is_special_static) ||
10953 (context_used && is_special_static)) {
10954 MonoInst *iargs [2];
10956 g_assert (field->parent);
10957 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10958 if (context_used) {
10959 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10960 field, MONO_RGCTX_INFO_CLASS_FIELD);
10962 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10964 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10965 } else if (context_used) {
10966 MonoInst *static_data;
10969 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10970 method->klass->name_space, method->klass->name, method->name,
10971 depth, field->offset);
10974 if (mono_class_needs_cctor_run (klass, method))
10975 emit_class_init (cfg, klass);
10978 * The pointer we're computing here is
10980 * super_info.static_data + field->offset
10982 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10983 klass, MONO_RGCTX_INFO_STATIC_DATA);
10985 if (mini_is_gsharedvt_klass (klass)) {
10986 MonoInst *offset_ins;
10988 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10989 /* The value is offset by 1 */
10990 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10991 dreg = alloc_ireg_mp (cfg);
10992 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10993 } else if (field->offset == 0) {
10996 int addr_reg = mono_alloc_preg (cfg);
10997 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10999 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11000 MonoInst *iargs [2];
11002 g_assert (field->parent);
11003 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11004 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11005 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11007 MonoVTable *vtable = NULL;
11009 if (!cfg->compile_aot)
11010 vtable = mono_class_vtable (cfg->domain, klass);
11011 CHECK_TYPELOAD (klass);
11014 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11015 if (!(g_slist_find (class_inits, klass))) {
11016 emit_class_init (cfg, klass);
11017 if (cfg->verbose_level > 2)
11018 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11019 class_inits = g_slist_prepend (class_inits, klass);
11022 if (cfg->run_cctors) {
11023 /* This makes so that inline cannot trigger */
11024 /* .cctors: too many apps depend on them */
11025 /* running with a specific order... */
11027 if (! vtable->initialized)
11028 INLINE_FAILURE ("class init");
11029 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11030 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11031 goto exception_exit;
11035 if (cfg->compile_aot)
11036 EMIT_NEW_SFLDACONST (cfg, ins, field);
11039 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11041 EMIT_NEW_PCONST (cfg, ins, addr);
11044 MonoInst *iargs [1];
11045 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11046 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11050 /* Generate IR to do the actual load/store operation */
11052 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11053 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11054 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11057 if (op == CEE_LDSFLDA) {
11058 ins->klass = mono_class_from_mono_type (ftype);
11059 ins->type = STACK_PTR;
11061 } else if (op == CEE_STSFLD) {
11064 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11065 store->flags |= ins_flag;
11067 gboolean is_const = FALSE;
11068 MonoVTable *vtable = NULL;
11069 gpointer addr = NULL;
11071 if (!context_used) {
11072 vtable = mono_class_vtable (cfg->domain, klass);
11073 CHECK_TYPELOAD (klass);
11075 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11076 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11077 int ro_type = ftype->type;
11079 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11080 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11081 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11084 GSHAREDVT_FAILURE (op);
11086 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11089 case MONO_TYPE_BOOLEAN:
11091 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11095 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11098 case MONO_TYPE_CHAR:
11100 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11104 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11109 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11113 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11118 case MONO_TYPE_PTR:
11119 case MONO_TYPE_FNPTR:
11120 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11121 type_to_eval_stack_type ((cfg), field->type, *sp);
11124 case MONO_TYPE_STRING:
11125 case MONO_TYPE_OBJECT:
11126 case MONO_TYPE_CLASS:
11127 case MONO_TYPE_SZARRAY:
11128 case MONO_TYPE_ARRAY:
11129 if (!mono_gc_is_moving ()) {
11130 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11131 type_to_eval_stack_type ((cfg), field->type, *sp);
11139 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11144 case MONO_TYPE_VALUETYPE:
11154 CHECK_STACK_OVF (1);
11156 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11157 load->flags |= ins_flag;
11163 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11164 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11165 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11176 token = read32 (ip + 1);
11177 klass = mini_get_class (method, token, generic_context);
11178 CHECK_TYPELOAD (klass);
11179 if (ins_flag & MONO_INST_VOLATILE) {
11180 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11181 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11183 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11184 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11185 ins->flags |= ins_flag;
11186 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11187 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11188 /* insert call to write barrier */
11189 emit_write_barrier (cfg, sp [0], sp [1]);
11201 const char *data_ptr;
11203 guint32 field_token;
11209 token = read32 (ip + 1);
11211 klass = mini_get_class (method, token, generic_context);
11212 CHECK_TYPELOAD (klass);
11213 if (klass->byval_arg.type == MONO_TYPE_VOID)
11216 context_used = mini_class_check_context_used (cfg, klass);
11218 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11219 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11220 ins->sreg1 = sp [0]->dreg;
11221 ins->type = STACK_I4;
11222 ins->dreg = alloc_ireg (cfg);
11223 MONO_ADD_INS (cfg->cbb, ins);
11224 *sp = mono_decompose_opcode (cfg, ins);
11227 if (context_used) {
11228 MonoInst *args [3];
11229 MonoClass *array_class = mono_array_class_get (klass, 1);
11230 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11232 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11235 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11236 array_class, MONO_RGCTX_INFO_VTABLE);
11241 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11243 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11245 if (cfg->opt & MONO_OPT_SHARED) {
11246 /* Decompose now to avoid problems with references to the domainvar */
11247 MonoInst *iargs [3];
11249 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11250 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11251 iargs [2] = sp [0];
11253 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11255 /* Decompose later since it is needed by abcrem */
11256 MonoClass *array_type = mono_array_class_get (klass, 1);
11257 mono_class_vtable (cfg->domain, array_type);
11258 CHECK_TYPELOAD (array_type);
11260 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11261 ins->dreg = alloc_ireg_ref (cfg);
11262 ins->sreg1 = sp [0]->dreg;
11263 ins->inst_newa_class = klass;
11264 ins->type = STACK_OBJ;
11265 ins->klass = array_type;
11266 MONO_ADD_INS (cfg->cbb, ins);
11267 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11268 cfg->cbb->has_array_access = TRUE;
11270 /* Needed so mono_emit_load_get_addr () gets called */
11271 mono_get_got_var (cfg);
11281 * we inline/optimize the initialization sequence if possible.
11282 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11283 * for small sizes open code the memcpy
11284 * ensure the rva field is big enough
11286 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11287 MonoMethod *memcpy_method = get_memcpy_method ();
11288 MonoInst *iargs [3];
11289 int add_reg = alloc_ireg_mp (cfg);
11291 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11292 if (cfg->compile_aot) {
11293 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11295 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11297 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11298 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11307 if (sp [0]->type != STACK_OBJ)
11310 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11311 ins->dreg = alloc_preg (cfg);
11312 ins->sreg1 = sp [0]->dreg;
11313 ins->type = STACK_I4;
11314 /* This flag will be inherited by the decomposition */
11315 ins->flags |= MONO_INST_FAULT;
11316 MONO_ADD_INS (cfg->cbb, ins);
11317 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11318 cfg->cbb->has_array_access = TRUE;
11326 if (sp [0]->type != STACK_OBJ)
11329 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11331 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11332 CHECK_TYPELOAD (klass);
11333 /* we need to make sure that this array is exactly the type it needs
11334 * to be for correctness. the wrappers are lax with their usage
11335 * so we need to ignore them here
11337 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11338 MonoClass *array_class = mono_array_class_get (klass, 1);
11339 mini_emit_check_array_type (cfg, sp [0], array_class);
11340 CHECK_TYPELOAD (array_class);
11344 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11349 case CEE_LDELEM_I1:
11350 case CEE_LDELEM_U1:
11351 case CEE_LDELEM_I2:
11352 case CEE_LDELEM_U2:
11353 case CEE_LDELEM_I4:
11354 case CEE_LDELEM_U4:
11355 case CEE_LDELEM_I8:
11357 case CEE_LDELEM_R4:
11358 case CEE_LDELEM_R8:
11359 case CEE_LDELEM_REF: {
11365 if (*ip == CEE_LDELEM) {
11367 token = read32 (ip + 1);
11368 klass = mini_get_class (method, token, generic_context);
11369 CHECK_TYPELOAD (klass);
11370 mono_class_init (klass);
11373 klass = array_access_to_klass (*ip);
11375 if (sp [0]->type != STACK_OBJ)
11378 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11380 if (mini_is_gsharedvt_variable_klass (klass)) {
11381 // FIXME-VT: OP_ICONST optimization
11382 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11383 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11384 ins->opcode = OP_LOADV_MEMBASE;
11385 } else if (sp [1]->opcode == OP_ICONST) {
11386 int array_reg = sp [0]->dreg;
11387 int index_reg = sp [1]->dreg;
11388 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11390 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11391 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11393 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11396 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11397 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11400 if (*ip == CEE_LDELEM)
11407 case CEE_STELEM_I1:
11408 case CEE_STELEM_I2:
11409 case CEE_STELEM_I4:
11410 case CEE_STELEM_I8:
11411 case CEE_STELEM_R4:
11412 case CEE_STELEM_R8:
11413 case CEE_STELEM_REF:
11418 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11420 if (*ip == CEE_STELEM) {
11422 token = read32 (ip + 1);
11423 klass = mini_get_class (method, token, generic_context);
11424 CHECK_TYPELOAD (klass);
11425 mono_class_init (klass);
11428 klass = array_access_to_klass (*ip);
11430 if (sp [0]->type != STACK_OBJ)
11433 emit_array_store (cfg, klass, sp, TRUE);
11435 if (*ip == CEE_STELEM)
11442 case CEE_CKFINITE: {
11446 if (cfg->llvm_only) {
11447 MonoInst *iargs [1];
11449 iargs [0] = sp [0];
11450 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11452 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11453 ins->sreg1 = sp [0]->dreg;
11454 ins->dreg = alloc_freg (cfg);
11455 ins->type = STACK_R8;
11456 MONO_ADD_INS (cfg->cbb, ins);
11458 *sp++ = mono_decompose_opcode (cfg, ins);
11464 case CEE_REFANYVAL: {
11465 MonoInst *src_var, *src;
11467 int klass_reg = alloc_preg (cfg);
11468 int dreg = alloc_preg (cfg);
11470 GSHAREDVT_FAILURE (*ip);
11473 MONO_INST_NEW (cfg, ins, *ip);
11476 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11477 CHECK_TYPELOAD (klass);
11479 context_used = mini_class_check_context_used (cfg, klass);
11482 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11484 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11485 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11488 if (context_used) {
11489 MonoInst *klass_ins;
11491 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11492 klass, MONO_RGCTX_INFO_KLASS);
11495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11496 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11498 mini_emit_class_check (cfg, klass_reg, klass);
11500 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11501 ins->type = STACK_MP;
11502 ins->klass = klass;
11507 case CEE_MKREFANY: {
11508 MonoInst *loc, *addr;
11510 GSHAREDVT_FAILURE (*ip);
11513 MONO_INST_NEW (cfg, ins, *ip);
11516 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11517 CHECK_TYPELOAD (klass);
11519 context_used = mini_class_check_context_used (cfg, klass);
11521 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11522 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11524 if (context_used) {
11525 MonoInst *const_ins;
11526 int type_reg = alloc_preg (cfg);
11528 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11531 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11533 int const_reg = alloc_preg (cfg);
11534 int type_reg = alloc_preg (cfg);
11536 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11539 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11541 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11543 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11544 ins->type = STACK_VTYPE;
11545 ins->klass = mono_defaults.typed_reference_class;
11550 case CEE_LDTOKEN: {
11552 MonoClass *handle_class;
11554 CHECK_STACK_OVF (1);
11557 n = read32 (ip + 1);
11559 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11560 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11561 handle = mono_method_get_wrapper_data (method, n);
11562 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11563 if (handle_class == mono_defaults.typehandle_class)
11564 handle = &((MonoClass*)handle)->byval_arg;
11567 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11572 mono_class_init (handle_class);
11573 if (cfg->gshared) {
11574 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11575 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11576 /* This case handles ldtoken
11577 of an open type, like for
11580 } else if (handle_class == mono_defaults.typehandle_class) {
11581 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11582 } else if (handle_class == mono_defaults.fieldhandle_class)
11583 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11584 else if (handle_class == mono_defaults.methodhandle_class)
11585 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11587 g_assert_not_reached ();
11590 if ((cfg->opt & MONO_OPT_SHARED) &&
11591 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11592 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11593 MonoInst *addr, *vtvar, *iargs [3];
11594 int method_context_used;
11596 method_context_used = mini_method_check_context_used (cfg, method);
11598 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11600 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11601 EMIT_NEW_ICONST (cfg, iargs [1], n);
11602 if (method_context_used) {
11603 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11604 method, MONO_RGCTX_INFO_METHOD);
11605 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11607 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11608 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11610 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11612 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11614 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11616 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11617 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11618 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11619 (cmethod->klass == mono_defaults.systemtype_class) &&
11620 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11621 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11623 mono_class_init (tclass);
11624 if (context_used) {
11625 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11626 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11627 } else if (cfg->compile_aot) {
11628 if (method->wrapper_type) {
11629 error_init (&error); //got to do it since there are multiple conditionals below
11630 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11631 /* Special case for static synchronized wrappers */
11632 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11634 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11635 /* FIXME: n is not a normal token */
11637 EMIT_NEW_PCONST (cfg, ins, NULL);
11640 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11643 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11645 EMIT_NEW_PCONST (cfg, ins, rt);
11647 ins->type = STACK_OBJ;
11648 ins->klass = cmethod->klass;
11651 MonoInst *addr, *vtvar;
11653 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11655 if (context_used) {
11656 if (handle_class == mono_defaults.typehandle_class) {
11657 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11658 mono_class_from_mono_type ((MonoType *)handle),
11659 MONO_RGCTX_INFO_TYPE);
11660 } else if (handle_class == mono_defaults.methodhandle_class) {
11661 ins = emit_get_rgctx_method (cfg, context_used,
11662 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11663 } else if (handle_class == mono_defaults.fieldhandle_class) {
11664 ins = emit_get_rgctx_field (cfg, context_used,
11665 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11667 g_assert_not_reached ();
11669 } else if (cfg->compile_aot) {
11670 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11672 EMIT_NEW_PCONST (cfg, ins, handle);
11674 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11676 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11686 if (sp [-1]->type != STACK_OBJ)
11689 MONO_INST_NEW (cfg, ins, OP_THROW);
11691 ins->sreg1 = sp [0]->dreg;
11693 cfg->cbb->out_of_line = TRUE;
11694 MONO_ADD_INS (cfg->cbb, ins);
11695 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11696 MONO_ADD_INS (cfg->cbb, ins);
11699 link_bblock (cfg, cfg->cbb, end_bblock);
11700 start_new_bblock = 1;
11701 /* This can complicate code generation for llvm since the return value might not be defined */
11702 if (COMPILE_LLVM (cfg))
11703 INLINE_FAILURE ("throw");
11705 case CEE_ENDFINALLY:
11706 if (!ip_in_finally_clause (cfg, ip - header->code))
11708 /* mono_save_seq_point_info () depends on this */
11709 if (sp != stack_start)
11710 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11711 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11712 MONO_ADD_INS (cfg->cbb, ins);
11714 start_new_bblock = 1;
11717 * Control will leave the method so empty the stack, otherwise
11718 * the next basic block will start with a nonempty stack.
11720 while (sp != stack_start) {
11725 case CEE_LEAVE_S: {
11728 if (*ip == CEE_LEAVE) {
11730 target = ip + 5 + (gint32)read32(ip + 1);
11733 target = ip + 2 + (signed char)(ip [1]);
11736 /* empty the stack */
11737 while (sp != stack_start) {
11742 * If this leave statement is in a catch block, check for a
11743 * pending exception, and rethrow it if necessary.
11744 * We avoid doing this in runtime invoke wrappers, since those are called
11745 * by native code which excepts the wrapper to catch all exceptions.
11747 for (i = 0; i < header->num_clauses; ++i) {
11748 MonoExceptionClause *clause = &header->clauses [i];
11751 * Use <= in the final comparison to handle clauses with multiple
11752 * leave statements, like in bug #78024.
11753 * The ordering of the exception clauses guarantees that we find the
11754 * innermost clause.
11756 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11758 MonoBasicBlock *dont_throw;
11763 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11766 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11768 NEW_BBLOCK (cfg, dont_throw);
11771 * Currently, we always rethrow the abort exception, despite the
11772 * fact that this is not correct. See thread6.cs for an example.
11773 * But propagating the abort exception is more important than
11774 * getting the sematics right.
11776 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11778 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11780 MONO_START_BB (cfg, dont_throw);
11785 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11788 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11790 MonoExceptionClause *clause;
11792 for (tmp = handlers; tmp; tmp = tmp->next) {
11793 clause = (MonoExceptionClause *)tmp->data;
11794 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11796 link_bblock (cfg, cfg->cbb, tblock);
11797 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11798 ins->inst_target_bb = tblock;
11799 ins->inst_eh_block = clause;
11800 MONO_ADD_INS (cfg->cbb, ins);
11801 cfg->cbb->has_call_handler = 1;
11802 if (COMPILE_LLVM (cfg)) {
11803 MonoBasicBlock *target_bb;
11806 * Link the finally bblock with the target, since it will
11807 * conceptually branch there.
11809 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11810 GET_BBLOCK (cfg, target_bb, target);
11811 link_bblock (cfg, tblock, target_bb);
11814 g_list_free (handlers);
11817 MONO_INST_NEW (cfg, ins, OP_BR);
11818 MONO_ADD_INS (cfg->cbb, ins);
11819 GET_BBLOCK (cfg, tblock, target);
11820 link_bblock (cfg, cfg->cbb, tblock);
11821 ins->inst_target_bb = tblock;
11823 start_new_bblock = 1;
11825 if (*ip == CEE_LEAVE)
11834 * Mono specific opcodes
11836 case MONO_CUSTOM_PREFIX: {
11838 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11842 case CEE_MONO_ICALL: {
11844 MonoJitICallInfo *info;
11846 token = read32 (ip + 2);
11847 func = mono_method_get_wrapper_data (method, token);
11848 info = mono_find_jit_icall_by_addr (func);
11850 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11853 CHECK_STACK (info->sig->param_count);
11854 sp -= info->sig->param_count;
11856 ins = mono_emit_jit_icall (cfg, info->func, sp);
11857 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11861 inline_costs += 10 * num_calls++;
11865 case CEE_MONO_LDPTR_CARD_TABLE:
11866 case CEE_MONO_LDPTR_NURSERY_START:
11867 case CEE_MONO_LDPTR_NURSERY_BITS:
11868 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11869 CHECK_STACK_OVF (1);
11872 case CEE_MONO_LDPTR_CARD_TABLE:
11873 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11875 case CEE_MONO_LDPTR_NURSERY_START:
11876 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11878 case CEE_MONO_LDPTR_NURSERY_BITS:
11879 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11881 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11882 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11888 inline_costs += 10 * num_calls++;
11891 case CEE_MONO_LDPTR: {
11894 CHECK_STACK_OVF (1);
11896 token = read32 (ip + 2);
11898 ptr = mono_method_get_wrapper_data (method, token);
11899 EMIT_NEW_PCONST (cfg, ins, ptr);
11902 inline_costs += 10 * num_calls++;
11903 /* Can't embed random pointers into AOT code */
11907 case CEE_MONO_JIT_ICALL_ADDR: {
11908 MonoJitICallInfo *callinfo;
11911 CHECK_STACK_OVF (1);
11913 token = read32 (ip + 2);
11915 ptr = mono_method_get_wrapper_data (method, token);
11916 callinfo = mono_find_jit_icall_by_addr (ptr);
11917 g_assert (callinfo);
11918 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11921 inline_costs += 10 * num_calls++;
11924 case CEE_MONO_ICALL_ADDR: {
11925 MonoMethod *cmethod;
11928 CHECK_STACK_OVF (1);
11930 token = read32 (ip + 2);
11932 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11934 if (cfg->compile_aot) {
11935 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11937 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11938 * before the call, its not needed when using direct pinvoke.
11939 * This is not an optimization, but its used to avoid looking up pinvokes
11940 * on platforms which don't support dlopen ().
11942 EMIT_NEW_PCONST (cfg, ins, NULL);
11944 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11947 ptr = mono_lookup_internal_call (cmethod);
11949 EMIT_NEW_PCONST (cfg, ins, ptr);
11955 case CEE_MONO_VTADDR: {
11956 MonoInst *src_var, *src;
11962 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11963 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11968 case CEE_MONO_NEWOBJ: {
11969 MonoInst *iargs [2];
11971 CHECK_STACK_OVF (1);
11973 token = read32 (ip + 2);
11974 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11975 mono_class_init (klass);
11976 NEW_DOMAINCONST (cfg, iargs [0]);
11977 MONO_ADD_INS (cfg->cbb, iargs [0]);
11978 NEW_CLASSCONST (cfg, iargs [1], klass);
11979 MONO_ADD_INS (cfg->cbb, iargs [1]);
11980 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11982 inline_costs += 10 * num_calls++;
11985 case CEE_MONO_OBJADDR:
11988 MONO_INST_NEW (cfg, ins, OP_MOVE);
11989 ins->dreg = alloc_ireg_mp (cfg);
11990 ins->sreg1 = sp [0]->dreg;
11991 ins->type = STACK_MP;
11992 MONO_ADD_INS (cfg->cbb, ins);
11996 case CEE_MONO_LDNATIVEOBJ:
11998 * Similar to LDOBJ, but instead load the unmanaged
11999 * representation of the vtype to the stack.
12004 token = read32 (ip + 2);
12005 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12006 g_assert (klass->valuetype);
12007 mono_class_init (klass);
12010 MonoInst *src, *dest, *temp;
12013 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12014 temp->backend.is_pinvoke = 1;
12015 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12016 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12018 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12019 dest->type = STACK_VTYPE;
12020 dest->klass = klass;
12026 case CEE_MONO_RETOBJ: {
12028 * Same as RET, but return the native representation of a vtype
12031 g_assert (cfg->ret);
12032 g_assert (mono_method_signature (method)->pinvoke);
12037 token = read32 (ip + 2);
12038 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12040 if (!cfg->vret_addr) {
12041 g_assert (cfg->ret_var_is_local);
12043 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12045 EMIT_NEW_RETLOADA (cfg, ins);
12047 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12049 if (sp != stack_start)
12052 MONO_INST_NEW (cfg, ins, OP_BR);
12053 ins->inst_target_bb = end_bblock;
12054 MONO_ADD_INS (cfg->cbb, ins);
12055 link_bblock (cfg, cfg->cbb, end_bblock);
12056 start_new_bblock = 1;
12060 case CEE_MONO_SAVE_LMF:
12061 case CEE_MONO_RESTORE_LMF:
12064 case CEE_MONO_CLASSCONST:
12065 CHECK_STACK_OVF (1);
12067 token = read32 (ip + 2);
12068 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12071 inline_costs += 10 * num_calls++;
12073 case CEE_MONO_NOT_TAKEN:
12074 cfg->cbb->out_of_line = TRUE;
12077 case CEE_MONO_TLS: {
12080 CHECK_STACK_OVF (1);
12082 key = (MonoTlsKey)read32 (ip + 2);
12083 g_assert (key < TLS_KEY_NUM);
12085 ins = mono_create_tls_get (cfg, key);
12087 ins->type = STACK_PTR;
12092 case CEE_MONO_DYN_CALL: {
12093 MonoCallInst *call;
12095 /* It would be easier to call a trampoline, but that would put an
12096 * extra frame on the stack, confusing exception handling. So
12097 * implement it inline using an opcode for now.
12100 if (!cfg->dyn_call_var) {
12101 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12102 /* prevent it from being register allocated */
12103 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12106 /* Has to use a call inst since it local regalloc expects it */
12107 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12108 ins = (MonoInst*)call;
12110 ins->sreg1 = sp [0]->dreg;
12111 ins->sreg2 = sp [1]->dreg;
12112 MONO_ADD_INS (cfg->cbb, ins);
12114 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12117 inline_costs += 10 * num_calls++;
12121 case CEE_MONO_MEMORY_BARRIER: {
12123 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12127 case CEE_MONO_ATOMIC_STORE_I4: {
12128 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12134 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12135 ins->dreg = sp [0]->dreg;
12136 ins->sreg1 = sp [1]->dreg;
12137 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12138 MONO_ADD_INS (cfg->cbb, ins);
12143 case CEE_MONO_JIT_ATTACH: {
12144 MonoInst *args [16], *domain_ins;
12145 MonoInst *ad_ins, *jit_tls_ins;
12146 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12148 g_assert (!mono_threads_is_coop_enabled ());
12150 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12152 EMIT_NEW_PCONST (cfg, ins, NULL);
12153 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12155 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12156 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12158 if (ad_ins && jit_tls_ins) {
12159 NEW_BBLOCK (cfg, next_bb);
12160 NEW_BBLOCK (cfg, call_bb);
12162 if (cfg->compile_aot) {
12163 /* AOT code is only used in the root domain */
12164 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12166 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12168 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12174 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12175 MONO_START_BB (cfg, call_bb);
12178 /* AOT code is only used in the root domain */
12179 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12180 if (cfg->compile_aot) {
12184 * This is called on unattached threads, so it cannot go through the trampoline
12185 * infrastructure. Use an indirect call through a got slot initialized at load time
12188 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12189 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12191 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12193 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12196 MONO_START_BB (cfg, next_bb);
12201 case CEE_MONO_JIT_DETACH: {
12202 MonoInst *args [16];
12204 /* Restore the original domain */
12205 dreg = alloc_ireg (cfg);
12206 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12207 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12211 case CEE_MONO_CALLI_EXTRA_ARG: {
12213 MonoMethodSignature *fsig;
12217 * This is the same as CEE_CALLI, but passes an additional argument
12218 * to the called method in llvmonly mode.
12219 * This is only used by delegate invoke wrappers to call the
12220 * actual delegate method.
12222 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12225 token = read32 (ip + 2);
12233 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12236 if (cfg->llvm_only)
12237 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12239 n = fsig->param_count + fsig->hasthis + 1;
12246 if (cfg->llvm_only) {
12248 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12249 * cconv. This is set by mono_init_delegate ().
12251 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12252 MonoInst *callee = addr;
12253 MonoInst *call, *localloc_ins;
12254 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12255 int low_bit_reg = alloc_preg (cfg);
12257 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12258 NEW_BBLOCK (cfg, end_bb);
12260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12264 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12265 addr = emit_get_rgctx_sig (cfg, context_used,
12266 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12268 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12270 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12271 ins->dreg = alloc_preg (cfg);
12272 ins->inst_imm = 2 * SIZEOF_VOID_P;
12273 MONO_ADD_INS (cfg->cbb, ins);
12274 localloc_ins = ins;
12275 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12276 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12277 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12279 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12280 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12282 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12283 MONO_START_BB (cfg, is_gsharedvt_bb);
12284 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12285 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12286 ins->dreg = call->dreg;
12288 MONO_START_BB (cfg, end_bb);
12290 /* Caller uses a normal calling conv */
12292 MonoInst *callee = addr;
12293 MonoInst *call, *localloc_ins;
12294 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12295 int low_bit_reg = alloc_preg (cfg);
12297 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12298 NEW_BBLOCK (cfg, end_bb);
12300 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12302 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12304 /* Normal case: callee uses a normal cconv, no conversion is needed */
12305 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12306 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12307 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12308 MONO_START_BB (cfg, is_gsharedvt_bb);
12309 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12310 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12311 MONO_ADD_INS (cfg->cbb, addr);
12313 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12315 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12316 ins->dreg = alloc_preg (cfg);
12317 ins->inst_imm = 2 * SIZEOF_VOID_P;
12318 MONO_ADD_INS (cfg->cbb, ins);
12319 localloc_ins = ins;
12320 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12321 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12324 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12325 ins->dreg = call->dreg;
12326 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12328 MONO_START_BB (cfg, end_bb);
12331 /* Same as CEE_CALLI */
12332 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12334 * We pass the address to the gsharedvt trampoline in the rgctx reg
12336 MonoInst *callee = addr;
12338 addr = emit_get_rgctx_sig (cfg, context_used,
12339 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12340 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12342 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12346 if (!MONO_TYPE_IS_VOID (fsig->ret))
12347 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12349 CHECK_CFG_EXCEPTION;
12353 constrained_class = NULL;
12356 case CEE_MONO_LDDOMAIN:
12357 CHECK_STACK_OVF (1);
12358 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12362 case CEE_MONO_GET_LAST_ERROR:
12364 CHECK_STACK_OVF (1);
12366 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12367 ins->dreg = alloc_dreg (cfg, STACK_I4);
12368 ins->type = STACK_I4;
12369 MONO_ADD_INS (cfg->cbb, ins);
12374 case CEE_MONO_GET_RGCTX_ARG:
12376 CHECK_STACK_OVF (1);
12378 mono_create_rgctx_var (cfg);
12380 MONO_INST_NEW (cfg, ins, OP_MOVE);
12381 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12382 ins->sreg1 = cfg->rgctx_var->dreg;
12383 ins->type = STACK_PTR;
12384 MONO_ADD_INS (cfg->cbb, ins);
12390 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12396 case CEE_PREFIX1: {
12399 case CEE_ARGLIST: {
12400 /* somewhat similar to LDTOKEN */
12401 MonoInst *addr, *vtvar;
12402 CHECK_STACK_OVF (1);
12403 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12405 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12406 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12408 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12409 ins->type = STACK_VTYPE;
12410 ins->klass = mono_defaults.argumenthandle_class;
12420 MonoInst *cmp, *arg1, *arg2;
12428 * The following transforms:
12429 * CEE_CEQ into OP_CEQ
12430 * CEE_CGT into OP_CGT
12431 * CEE_CGT_UN into OP_CGT_UN
12432 * CEE_CLT into OP_CLT
12433 * CEE_CLT_UN into OP_CLT_UN
12435 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12437 MONO_INST_NEW (cfg, ins, cmp->opcode);
12438 cmp->sreg1 = arg1->dreg;
12439 cmp->sreg2 = arg2->dreg;
12440 type_from_op (cfg, cmp, arg1, arg2);
12442 add_widen_op (cfg, cmp, &arg1, &arg2);
12443 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12444 cmp->opcode = OP_LCOMPARE;
12445 else if (arg1->type == STACK_R4)
12446 cmp->opcode = OP_RCOMPARE;
12447 else if (arg1->type == STACK_R8)
12448 cmp->opcode = OP_FCOMPARE;
12450 cmp->opcode = OP_ICOMPARE;
12451 MONO_ADD_INS (cfg->cbb, cmp);
12452 ins->type = STACK_I4;
12453 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12454 type_from_op (cfg, ins, arg1, arg2);
12456 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12458 * The backends expect the fceq opcodes to do the
12461 ins->sreg1 = cmp->sreg1;
12462 ins->sreg2 = cmp->sreg2;
12465 MONO_ADD_INS (cfg->cbb, ins);
12471 MonoInst *argconst;
12472 MonoMethod *cil_method;
12474 CHECK_STACK_OVF (1);
12476 n = read32 (ip + 2);
12477 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12480 mono_class_init (cmethod->klass);
12482 mono_save_token_info (cfg, image, n, cmethod);
12484 context_used = mini_method_check_context_used (cfg, cmethod);
12486 cil_method = cmethod;
12487 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12488 emit_method_access_failure (cfg, method, cil_method);
12490 if (mono_security_core_clr_enabled ())
12491 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12494 * Optimize the common case of ldftn+delegate creation
12496 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12497 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12498 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12499 MonoInst *target_ins, *handle_ins;
12500 MonoMethod *invoke;
12501 int invoke_context_used;
12503 invoke = mono_get_delegate_invoke (ctor_method->klass);
12504 if (!invoke || !mono_method_signature (invoke))
12507 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12509 target_ins = sp [-1];
12511 if (mono_security_core_clr_enabled ())
12512 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12514 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12515 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12516 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12518 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12522 /* FIXME: SGEN support */
12523 if (invoke_context_used == 0 || cfg->llvm_only) {
12525 if (cfg->verbose_level > 3)
12526 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12527 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12530 CHECK_CFG_EXCEPTION;
12540 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12541 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12545 inline_costs += 10 * num_calls++;
12548 case CEE_LDVIRTFTN: {
12549 MonoInst *args [2];
12553 n = read32 (ip + 2);
12554 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12557 mono_class_init (cmethod->klass);
12559 context_used = mini_method_check_context_used (cfg, cmethod);
12561 if (mono_security_core_clr_enabled ())
12562 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12565 * Optimize the common case of ldvirtftn+delegate creation
12567 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12568 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12569 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12570 MonoInst *target_ins, *handle_ins;
12571 MonoMethod *invoke;
12572 int invoke_context_used;
12573 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12575 invoke = mono_get_delegate_invoke (ctor_method->klass);
12576 if (!invoke || !mono_method_signature (invoke))
12579 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12581 target_ins = sp [-1];
12583 if (mono_security_core_clr_enabled ())
12584 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12586 /* FIXME: SGEN support */
12587 if (invoke_context_used == 0 || cfg->llvm_only) {
12589 if (cfg->verbose_level > 3)
12590 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12591 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12594 CHECK_CFG_EXCEPTION;
12607 args [1] = emit_get_rgctx_method (cfg, context_used,
12608 cmethod, MONO_RGCTX_INFO_METHOD);
12611 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12613 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12616 inline_costs += 10 * num_calls++;
12620 CHECK_STACK_OVF (1);
12622 n = read16 (ip + 2);
12624 EMIT_NEW_ARGLOAD (cfg, ins, n);
12629 CHECK_STACK_OVF (1);
12631 n = read16 (ip + 2);
12633 NEW_ARGLOADA (cfg, ins, n);
12634 MONO_ADD_INS (cfg->cbb, ins);
12642 n = read16 (ip + 2);
12644 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12646 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12650 CHECK_STACK_OVF (1);
12652 n = read16 (ip + 2);
12654 EMIT_NEW_LOCLOAD (cfg, ins, n);
12659 unsigned char *tmp_ip;
12660 CHECK_STACK_OVF (1);
12662 n = read16 (ip + 2);
12665 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12671 EMIT_NEW_LOCLOADA (cfg, ins, n);
12680 n = read16 (ip + 2);
12682 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12684 emit_stloc_ir (cfg, sp, header, n);
12688 case CEE_LOCALLOC: {
12690 MonoBasicBlock *non_zero_bb, *end_bb;
12691 int alloc_ptr = alloc_preg (cfg);
12693 if (sp != stack_start)
12695 if (cfg->method != method)
12697 * Inlining this into a loop in a parent could lead to
12698 * stack overflows which is different behavior than the
12699 * non-inlined case, thus disable inlining in this case.
12701 INLINE_FAILURE("localloc");
12703 NEW_BBLOCK (cfg, non_zero_bb);
12704 NEW_BBLOCK (cfg, end_bb);
12706 /* if size != zero */
12707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12708 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12710 //size is zero, so result is NULL
12711 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12714 MONO_START_BB (cfg, non_zero_bb);
12715 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12716 ins->dreg = alloc_ptr;
12717 ins->sreg1 = sp [0]->dreg;
12718 ins->type = STACK_PTR;
12719 MONO_ADD_INS (cfg->cbb, ins);
12721 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12723 ins->flags |= MONO_INST_INIT;
12725 MONO_START_BB (cfg, end_bb);
12726 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12727 ins->type = STACK_PTR;
12733 case CEE_ENDFILTER: {
12734 MonoExceptionClause *clause, *nearest;
12739 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12741 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12742 ins->sreg1 = (*sp)->dreg;
12743 MONO_ADD_INS (cfg->cbb, ins);
12744 start_new_bblock = 1;
12748 for (cc = 0; cc < header->num_clauses; ++cc) {
12749 clause = &header->clauses [cc];
12750 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12751 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12752 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12755 g_assert (nearest);
12756 if ((ip - header->code) != nearest->handler_offset)
12761 case CEE_UNALIGNED_:
12762 ins_flag |= MONO_INST_UNALIGNED;
12763 /* FIXME: record alignment? we can assume 1 for now */
12767 case CEE_VOLATILE_:
12768 ins_flag |= MONO_INST_VOLATILE;
12772 ins_flag |= MONO_INST_TAILCALL;
12773 cfg->flags |= MONO_CFG_HAS_TAIL;
12774 /* Can't inline tail calls at this time */
12775 inline_costs += 100000;
12782 token = read32 (ip + 2);
12783 klass = mini_get_class (method, token, generic_context);
12784 CHECK_TYPELOAD (klass);
12785 if (generic_class_is_reference_type (cfg, klass))
12786 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12788 mini_emit_initobj (cfg, *sp, NULL, klass);
12792 case CEE_CONSTRAINED_:
12794 token = read32 (ip + 2);
12795 constrained_class = mini_get_class (method, token, generic_context);
12796 CHECK_TYPELOAD (constrained_class);
12800 case CEE_INITBLK: {
12801 MonoInst *iargs [3];
12805 /* Skip optimized paths for volatile operations. */
12806 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12807 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12808 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12809 /* emit_memset only works when val == 0 */
12810 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12813 iargs [0] = sp [0];
12814 iargs [1] = sp [1];
12815 iargs [2] = sp [2];
12816 if (ip [1] == CEE_CPBLK) {
12818 * FIXME: It's unclear whether we should be emitting both the acquire
12819 * and release barriers for cpblk. It is technically both a load and
12820 * store operation, so it seems like that's the sensible thing to do.
12822 * FIXME: We emit full barriers on both sides of the operation for
12823 * simplicity. We should have a separate atomic memcpy method instead.
12825 MonoMethod *memcpy_method = get_memcpy_method ();
12827 if (ins_flag & MONO_INST_VOLATILE)
12828 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12830 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12831 call->flags |= ins_flag;
12833 if (ins_flag & MONO_INST_VOLATILE)
12834 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12836 MonoMethod *memset_method = get_memset_method ();
12837 if (ins_flag & MONO_INST_VOLATILE) {
12838 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12839 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12841 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12842 call->flags |= ins_flag;
12853 ins_flag |= MONO_INST_NOTYPECHECK;
12855 ins_flag |= MONO_INST_NORANGECHECK;
12856 /* we ignore the no-nullcheck for now since we
12857 * really do it explicitly only when doing callvirt->call
12861 case CEE_RETHROW: {
12863 int handler_offset = -1;
12865 for (i = 0; i < header->num_clauses; ++i) {
12866 MonoExceptionClause *clause = &header->clauses [i];
12867 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12868 handler_offset = clause->handler_offset;
12873 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12875 if (handler_offset == -1)
12878 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12879 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12880 ins->sreg1 = load->dreg;
12881 MONO_ADD_INS (cfg->cbb, ins);
12883 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12884 MONO_ADD_INS (cfg->cbb, ins);
12887 link_bblock (cfg, cfg->cbb, end_bblock);
12888 start_new_bblock = 1;
12896 CHECK_STACK_OVF (1);
12898 token = read32 (ip + 2);
12899 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12900 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12903 val = mono_type_size (type, &ialign);
12905 MonoClass *klass = mini_get_class (method, token, generic_context);
12906 CHECK_TYPELOAD (klass);
12908 val = mono_type_size (&klass->byval_arg, &ialign);
12910 if (mini_is_gsharedvt_klass (klass))
12911 GSHAREDVT_FAILURE (*ip);
12913 EMIT_NEW_ICONST (cfg, ins, val);
12918 case CEE_REFANYTYPE: {
12919 MonoInst *src_var, *src;
12921 GSHAREDVT_FAILURE (*ip);
12927 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12929 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12930 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12931 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12936 case CEE_READONLY_:
12949 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12959 g_warning ("opcode 0x%02x not handled", *ip);
12963 if (start_new_bblock != 1)
12966 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12967 if (cfg->cbb->next_bb) {
12968 /* This could already be set because of inlining, #693905 */
12969 MonoBasicBlock *bb = cfg->cbb;
12971 while (bb->next_bb)
12973 bb->next_bb = end_bblock;
12975 cfg->cbb->next_bb = end_bblock;
12978 if (cfg->method == method && cfg->domainvar) {
12980 MonoInst *get_domain;
12982 cfg->cbb = init_localsbb;
12984 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12985 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12986 MONO_ADD_INS (cfg->cbb, store);
12989 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12990 if (cfg->compile_aot)
12991 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12992 mono_get_got_var (cfg);
12995 if (cfg->method == method && cfg->got_var)
12996 mono_emit_load_got_addr (cfg);
12998 if (init_localsbb) {
12999 cfg->cbb = init_localsbb;
13001 for (i = 0; i < header->num_locals; ++i) {
13002 emit_init_local (cfg, i, header->locals [i], init_locals);
13006 if (cfg->init_ref_vars && cfg->method == method) {
13007 /* Emit initialization for ref vars */
13008 // FIXME: Avoid duplication initialization for IL locals.
13009 for (i = 0; i < cfg->num_varinfo; ++i) {
13010 MonoInst *ins = cfg->varinfo [i];
13012 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13013 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13017 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13018 cfg->cbb = init_localsbb;
13019 emit_push_lmf (cfg);
13022 cfg->cbb = init_localsbb;
13023 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13026 MonoBasicBlock *bb;
13029 * Make seq points at backward branch targets interruptable.
13031 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13032 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13033 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13036 /* Add a sequence point for method entry/exit events */
13037 if (seq_points && cfg->gen_sdb_seq_points) {
13038 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13039 MONO_ADD_INS (init_localsbb, ins);
13040 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13041 MONO_ADD_INS (cfg->bb_exit, ins);
13045 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13046 * the code they refer to was dead (#11880).
13048 if (sym_seq_points) {
13049 for (i = 0; i < header->code_size; ++i) {
13050 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13053 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13054 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13061 if (cfg->method == method) {
13062 MonoBasicBlock *bb;
13063 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13064 if (bb == cfg->bb_init)
13067 bb->region = mono_find_block_region (cfg, bb->real_offset);
13069 mono_create_spvar_for_region (cfg, bb->region);
13070 if (cfg->verbose_level > 2)
13071 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13074 MonoBasicBlock *bb;
13075 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13076 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13077 bb->real_offset = inline_offset;
13081 if (inline_costs < 0) {
13084 /* Method is too large */
13085 mname = mono_method_full_name (method, TRUE);
13086 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13090 if ((cfg->verbose_level > 2) && (cfg->method == method))
13091 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13096 g_assert (!mono_error_ok (&cfg->error));
13100 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13104 set_exception_type_from_invalid_il (cfg, method, ip);
13108 g_slist_free (class_inits);
13109 mono_basic_block_free (original_bb);
13110 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13111 if (cfg->exception_type)
13114 return inline_costs;
13118 store_membase_reg_to_store_membase_imm (int opcode)
13121 case OP_STORE_MEMBASE_REG:
13122 return OP_STORE_MEMBASE_IMM;
13123 case OP_STOREI1_MEMBASE_REG:
13124 return OP_STOREI1_MEMBASE_IMM;
13125 case OP_STOREI2_MEMBASE_REG:
13126 return OP_STOREI2_MEMBASE_IMM;
13127 case OP_STOREI4_MEMBASE_REG:
13128 return OP_STOREI4_MEMBASE_IMM;
13129 case OP_STOREI8_MEMBASE_REG:
13130 return OP_STOREI8_MEMBASE_IMM;
13132 g_assert_not_reached ();
13139 mono_op_to_op_imm (int opcode)
13143 return OP_IADD_IMM;
13145 return OP_ISUB_IMM;
13147 return OP_IDIV_IMM;
13149 return OP_IDIV_UN_IMM;
13151 return OP_IREM_IMM;
13153 return OP_IREM_UN_IMM;
13155 return OP_IMUL_IMM;
13157 return OP_IAND_IMM;
13161 return OP_IXOR_IMM;
13163 return OP_ISHL_IMM;
13165 return OP_ISHR_IMM;
13167 return OP_ISHR_UN_IMM;
13170 return OP_LADD_IMM;
13172 return OP_LSUB_IMM;
13174 return OP_LAND_IMM;
13178 return OP_LXOR_IMM;
13180 return OP_LSHL_IMM;
13182 return OP_LSHR_IMM;
13184 return OP_LSHR_UN_IMM;
13185 #if SIZEOF_REGISTER == 8
13187 return OP_LREM_IMM;
13191 return OP_COMPARE_IMM;
13193 return OP_ICOMPARE_IMM;
13195 return OP_LCOMPARE_IMM;
13197 case OP_STORE_MEMBASE_REG:
13198 return OP_STORE_MEMBASE_IMM;
13199 case OP_STOREI1_MEMBASE_REG:
13200 return OP_STOREI1_MEMBASE_IMM;
13201 case OP_STOREI2_MEMBASE_REG:
13202 return OP_STOREI2_MEMBASE_IMM;
13203 case OP_STOREI4_MEMBASE_REG:
13204 return OP_STOREI4_MEMBASE_IMM;
13206 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13208 return OP_X86_PUSH_IMM;
13209 case OP_X86_COMPARE_MEMBASE_REG:
13210 return OP_X86_COMPARE_MEMBASE_IMM;
13212 #if defined(TARGET_AMD64)
13213 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13214 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13216 case OP_VOIDCALL_REG:
13217 return OP_VOIDCALL;
13225 return OP_LOCALLOC_IMM;
13232 ldind_to_load_membase (int opcode)
13236 return OP_LOADI1_MEMBASE;
13238 return OP_LOADU1_MEMBASE;
13240 return OP_LOADI2_MEMBASE;
13242 return OP_LOADU2_MEMBASE;
13244 return OP_LOADI4_MEMBASE;
13246 return OP_LOADU4_MEMBASE;
13248 return OP_LOAD_MEMBASE;
13249 case CEE_LDIND_REF:
13250 return OP_LOAD_MEMBASE;
13252 return OP_LOADI8_MEMBASE;
13254 return OP_LOADR4_MEMBASE;
13256 return OP_LOADR8_MEMBASE;
13258 g_assert_not_reached ();
13265 stind_to_store_membase (int opcode)
13269 return OP_STOREI1_MEMBASE_REG;
13271 return OP_STOREI2_MEMBASE_REG;
13273 return OP_STOREI4_MEMBASE_REG;
13275 case CEE_STIND_REF:
13276 return OP_STORE_MEMBASE_REG;
13278 return OP_STOREI8_MEMBASE_REG;
13280 return OP_STORER4_MEMBASE_REG;
13282 return OP_STORER8_MEMBASE_REG;
13284 g_assert_not_reached ();
13291 mono_load_membase_to_load_mem (int opcode)
13293 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13294 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13296 case OP_LOAD_MEMBASE:
13297 return OP_LOAD_MEM;
13298 case OP_LOADU1_MEMBASE:
13299 return OP_LOADU1_MEM;
13300 case OP_LOADU2_MEMBASE:
13301 return OP_LOADU2_MEM;
13302 case OP_LOADI4_MEMBASE:
13303 return OP_LOADI4_MEM;
13304 case OP_LOADU4_MEMBASE:
13305 return OP_LOADU4_MEM;
13306 #if SIZEOF_REGISTER == 8
13307 case OP_LOADI8_MEMBASE:
13308 return OP_LOADI8_MEM;
13317 op_to_op_dest_membase (int store_opcode, int opcode)
13319 #if defined(TARGET_X86)
13320 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13325 return OP_X86_ADD_MEMBASE_REG;
13327 return OP_X86_SUB_MEMBASE_REG;
13329 return OP_X86_AND_MEMBASE_REG;
13331 return OP_X86_OR_MEMBASE_REG;
13333 return OP_X86_XOR_MEMBASE_REG;
13336 return OP_X86_ADD_MEMBASE_IMM;
13339 return OP_X86_SUB_MEMBASE_IMM;
13342 return OP_X86_AND_MEMBASE_IMM;
13345 return OP_X86_OR_MEMBASE_IMM;
13348 return OP_X86_XOR_MEMBASE_IMM;
13354 #if defined(TARGET_AMD64)
13355 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13360 return OP_X86_ADD_MEMBASE_REG;
13362 return OP_X86_SUB_MEMBASE_REG;
13364 return OP_X86_AND_MEMBASE_REG;
13366 return OP_X86_OR_MEMBASE_REG;
13368 return OP_X86_XOR_MEMBASE_REG;
13370 return OP_X86_ADD_MEMBASE_IMM;
13372 return OP_X86_SUB_MEMBASE_IMM;
13374 return OP_X86_AND_MEMBASE_IMM;
13376 return OP_X86_OR_MEMBASE_IMM;
13378 return OP_X86_XOR_MEMBASE_IMM;
13380 return OP_AMD64_ADD_MEMBASE_REG;
13382 return OP_AMD64_SUB_MEMBASE_REG;
13384 return OP_AMD64_AND_MEMBASE_REG;
13386 return OP_AMD64_OR_MEMBASE_REG;
13388 return OP_AMD64_XOR_MEMBASE_REG;
13391 return OP_AMD64_ADD_MEMBASE_IMM;
13394 return OP_AMD64_SUB_MEMBASE_IMM;
13397 return OP_AMD64_AND_MEMBASE_IMM;
13400 return OP_AMD64_OR_MEMBASE_IMM;
13403 return OP_AMD64_XOR_MEMBASE_IMM;
13413 op_to_op_store_membase (int store_opcode, int opcode)
13415 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13418 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13419 return OP_X86_SETEQ_MEMBASE;
13421 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13422 return OP_X86_SETNE_MEMBASE;
13430 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13433 /* FIXME: This has sign extension issues */
13435 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13436 return OP_X86_COMPARE_MEMBASE8_IMM;
13439 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13444 return OP_X86_PUSH_MEMBASE;
13445 case OP_COMPARE_IMM:
13446 case OP_ICOMPARE_IMM:
13447 return OP_X86_COMPARE_MEMBASE_IMM;
13450 return OP_X86_COMPARE_MEMBASE_REG;
13454 #ifdef TARGET_AMD64
13455 /* FIXME: This has sign extension issues */
13457 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13458 return OP_X86_COMPARE_MEMBASE8_IMM;
13463 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13464 return OP_X86_PUSH_MEMBASE;
13466 /* FIXME: This only works for 32 bit immediates
13467 case OP_COMPARE_IMM:
13468 case OP_LCOMPARE_IMM:
13469 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13470 return OP_AMD64_COMPARE_MEMBASE_IMM;
13472 case OP_ICOMPARE_IMM:
13473 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13474 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13478 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13479 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13480 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13481 return OP_AMD64_COMPARE_MEMBASE_REG;
13484 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13485 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13494 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13497 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13503 return OP_X86_COMPARE_REG_MEMBASE;
13505 return OP_X86_ADD_REG_MEMBASE;
13507 return OP_X86_SUB_REG_MEMBASE;
13509 return OP_X86_AND_REG_MEMBASE;
13511 return OP_X86_OR_REG_MEMBASE;
13513 return OP_X86_XOR_REG_MEMBASE;
13517 #ifdef TARGET_AMD64
13518 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13521 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13523 return OP_X86_ADD_REG_MEMBASE;
13525 return OP_X86_SUB_REG_MEMBASE;
13527 return OP_X86_AND_REG_MEMBASE;
13529 return OP_X86_OR_REG_MEMBASE;
13531 return OP_X86_XOR_REG_MEMBASE;
13533 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13537 return OP_AMD64_COMPARE_REG_MEMBASE;
13539 return OP_AMD64_ADD_REG_MEMBASE;
13541 return OP_AMD64_SUB_REG_MEMBASE;
13543 return OP_AMD64_AND_REG_MEMBASE;
13545 return OP_AMD64_OR_REG_MEMBASE;
13547 return OP_AMD64_XOR_REG_MEMBASE;
13556 mono_op_to_op_imm_noemul (int opcode)
13559 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13565 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13572 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13577 return mono_op_to_op_imm (opcode);
13582 * mono_handle_global_vregs:
13584 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13588 mono_handle_global_vregs (MonoCompile *cfg)
13590 gint32 *vreg_to_bb;
13591 MonoBasicBlock *bb;
13594 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13596 #ifdef MONO_ARCH_SIMD_INTRINSICS
13597 if (cfg->uses_simd_intrinsics)
13598 mono_simd_simplify_indirection (cfg);
13601 /* Find local vregs used in more than one bb */
13602 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13603 MonoInst *ins = bb->code;
13604 int block_num = bb->block_num;
13606 if (cfg->verbose_level > 2)
13607 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13610 for (; ins; ins = ins->next) {
13611 const char *spec = INS_INFO (ins->opcode);
13612 int regtype = 0, regindex;
13615 if (G_UNLIKELY (cfg->verbose_level > 2))
13616 mono_print_ins (ins);
13618 g_assert (ins->opcode >= MONO_CEE_LAST);
13620 for (regindex = 0; regindex < 4; regindex ++) {
13623 if (regindex == 0) {
13624 regtype = spec [MONO_INST_DEST];
13625 if (regtype == ' ')
13628 } else if (regindex == 1) {
13629 regtype = spec [MONO_INST_SRC1];
13630 if (regtype == ' ')
13633 } else if (regindex == 2) {
13634 regtype = spec [MONO_INST_SRC2];
13635 if (regtype == ' ')
13638 } else if (regindex == 3) {
13639 regtype = spec [MONO_INST_SRC3];
13640 if (regtype == ' ')
13645 #if SIZEOF_REGISTER == 4
13646 /* In the LLVM case, the long opcodes are not decomposed */
13647 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13649 * Since some instructions reference the original long vreg,
13650 * and some reference the two component vregs, it is quite hard
13651 * to determine when it needs to be global. So be conservative.
13653 if (!get_vreg_to_inst (cfg, vreg)) {
13654 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13656 if (cfg->verbose_level > 2)
13657 printf ("LONG VREG R%d made global.\n", vreg);
13661 * Make the component vregs volatile since the optimizations can
13662 * get confused otherwise.
13664 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13665 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13669 g_assert (vreg != -1);
13671 prev_bb = vreg_to_bb [vreg];
13672 if (prev_bb == 0) {
13673 /* 0 is a valid block num */
13674 vreg_to_bb [vreg] = block_num + 1;
13675 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13676 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13679 if (!get_vreg_to_inst (cfg, vreg)) {
13680 if (G_UNLIKELY (cfg->verbose_level > 2))
13681 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13685 if (vreg_is_ref (cfg, vreg))
13686 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13688 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13691 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13694 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13698 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13701 g_assert_not_reached ();
13705 /* Flag as having been used in more than one bb */
13706 vreg_to_bb [vreg] = -1;
13712 /* If a variable is used in only one bblock, convert it into a local vreg */
13713 for (i = 0; i < cfg->num_varinfo; i++) {
13714 MonoInst *var = cfg->varinfo [i];
13715 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13717 switch (var->type) {
13723 #if SIZEOF_REGISTER == 8
13726 #if !defined(TARGET_X86)
13727 /* Enabling this screws up the fp stack on x86 */
13730 if (mono_arch_is_soft_float ())
13734 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13738 /* Arguments are implicitly global */
13739 /* Putting R4 vars into registers doesn't work currently */
13740 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13741 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13743 * Make that the variable's liveness interval doesn't contain a call, since
13744 * that would cause the lvreg to be spilled, making the whole optimization
13747 /* This is too slow for JIT compilation */
13749 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13751 int def_index, call_index, ins_index;
13752 gboolean spilled = FALSE;
13757 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13758 const char *spec = INS_INFO (ins->opcode);
13760 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13761 def_index = ins_index;
13763 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13764 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13765 if (call_index > def_index) {
13771 if (MONO_IS_CALL (ins))
13772 call_index = ins_index;
13782 if (G_UNLIKELY (cfg->verbose_level > 2))
13783 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13784 var->flags |= MONO_INST_IS_DEAD;
13785 cfg->vreg_to_inst [var->dreg] = NULL;
13792 * Compress the varinfo and vars tables so the liveness computation is faster and
13793 * takes up less space.
13796 for (i = 0; i < cfg->num_varinfo; ++i) {
13797 MonoInst *var = cfg->varinfo [i];
13798 if (pos < i && cfg->locals_start == i)
13799 cfg->locals_start = pos;
13800 if (!(var->flags & MONO_INST_IS_DEAD)) {
13802 cfg->varinfo [pos] = cfg->varinfo [i];
13803 cfg->varinfo [pos]->inst_c0 = pos;
13804 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13805 cfg->vars [pos].idx = pos;
13806 #if SIZEOF_REGISTER == 4
13807 if (cfg->varinfo [pos]->type == STACK_I8) {
13808 /* Modify the two component vars too */
13811 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13812 var1->inst_c0 = pos;
13813 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13814 var1->inst_c0 = pos;
13821 cfg->num_varinfo = pos;
13822 if (cfg->locals_start > cfg->num_varinfo)
13823 cfg->locals_start = cfg->num_varinfo;
13827 * mono_allocate_gsharedvt_vars:
13829 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13830 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13833 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13837 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13839 for (i = 0; i < cfg->num_varinfo; ++i) {
13840 MonoInst *ins = cfg->varinfo [i];
13843 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13844 if (i >= cfg->locals_start) {
13846 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13847 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13848 ins->opcode = OP_GSHAREDVT_LOCAL;
13849 ins->inst_imm = idx;
13852 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13853 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13860 * mono_spill_global_vars:
13862 * Generate spill code for variables which are not allocated to registers,
13863 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13864 * code is generated which could be optimized by the local optimization passes.
13867 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13869 MonoBasicBlock *bb;
13871 int orig_next_vreg;
13872 guint32 *vreg_to_lvreg;
13874 guint32 i, lvregs_len, lvregs_size;
13875 gboolean dest_has_lvreg = FALSE;
13876 MonoStackType stacktypes [128];
13877 MonoInst **live_range_start, **live_range_end;
13878 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13880 *need_local_opts = FALSE;
13882 memset (spec2, 0, sizeof (spec2));
13884 /* FIXME: Move this function to mini.c */
13885 stacktypes ['i'] = STACK_PTR;
13886 stacktypes ['l'] = STACK_I8;
13887 stacktypes ['f'] = STACK_R8;
13888 #ifdef MONO_ARCH_SIMD_INTRINSICS
13889 stacktypes ['x'] = STACK_VTYPE;
13892 #if SIZEOF_REGISTER == 4
13893 /* Create MonoInsts for longs */
13894 for (i = 0; i < cfg->num_varinfo; i++) {
13895 MonoInst *ins = cfg->varinfo [i];
13897 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13898 switch (ins->type) {
13903 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13906 g_assert (ins->opcode == OP_REGOFFSET);
13908 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13910 tree->opcode = OP_REGOFFSET;
13911 tree->inst_basereg = ins->inst_basereg;
13912 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13914 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13916 tree->opcode = OP_REGOFFSET;
13917 tree->inst_basereg = ins->inst_basereg;
13918 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13928 if (cfg->compute_gc_maps) {
13929 /* registers need liveness info even for !non refs */
13930 for (i = 0; i < cfg->num_varinfo; i++) {
13931 MonoInst *ins = cfg->varinfo [i];
13933 if (ins->opcode == OP_REGVAR)
13934 ins->flags |= MONO_INST_GC_TRACK;
13938 /* FIXME: widening and truncation */
13941 * As an optimization, when a variable allocated to the stack is first loaded into
13942 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13943 * the variable again.
13945 orig_next_vreg = cfg->next_vreg;
13946 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13947 lvregs_size = 1024;
13948 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13952 * These arrays contain the first and last instructions accessing a given
13954 * Since we emit bblocks in the same order we process them here, and we
13955 * don't split live ranges, these will precisely describe the live range of
13956 * the variable, i.e. the instruction range where a valid value can be found
13957 * in the variables location.
13958 * The live range is computed using the liveness info computed by the liveness pass.
13959 * We can't use vmv->range, since that is an abstract live range, and we need
13960 * one which is instruction precise.
13961 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13963 /* FIXME: Only do this if debugging info is requested */
13964 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13965 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13966 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13967 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13969 /* Add spill loads/stores */
13970 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13973 if (cfg->verbose_level > 2)
13974 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13976 /* Clear vreg_to_lvreg array */
13977 for (i = 0; i < lvregs_len; i++)
13978 vreg_to_lvreg [lvregs [i]] = 0;
13982 MONO_BB_FOR_EACH_INS (bb, ins) {
13983 const char *spec = INS_INFO (ins->opcode);
13984 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13985 gboolean store, no_lvreg;
13986 int sregs [MONO_MAX_SRC_REGS];
13988 if (G_UNLIKELY (cfg->verbose_level > 2))
13989 mono_print_ins (ins);
13991 if (ins->opcode == OP_NOP)
13995 * We handle LDADDR here as well, since it can only be decomposed
13996 * when variable addresses are known.
13998 if (ins->opcode == OP_LDADDR) {
13999 MonoInst *var = (MonoInst *)ins->inst_p0;
14001 if (var->opcode == OP_VTARG_ADDR) {
14002 /* Happens on SPARC/S390 where vtypes are passed by reference */
14003 MonoInst *vtaddr = var->inst_left;
14004 if (vtaddr->opcode == OP_REGVAR) {
14005 ins->opcode = OP_MOVE;
14006 ins->sreg1 = vtaddr->dreg;
14008 else if (var->inst_left->opcode == OP_REGOFFSET) {
14009 ins->opcode = OP_LOAD_MEMBASE;
14010 ins->inst_basereg = vtaddr->inst_basereg;
14011 ins->inst_offset = vtaddr->inst_offset;
14014 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14015 /* gsharedvt arg passed by ref */
14016 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14018 ins->opcode = OP_LOAD_MEMBASE;
14019 ins->inst_basereg = var->inst_basereg;
14020 ins->inst_offset = var->inst_offset;
14021 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14022 MonoInst *load, *load2, *load3;
14023 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14024 int reg1, reg2, reg3;
14025 MonoInst *info_var = cfg->gsharedvt_info_var;
14026 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14030 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14033 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14035 g_assert (info_var);
14036 g_assert (locals_var);
14038 /* Mark the instruction used to compute the locals var as used */
14039 cfg->gsharedvt_locals_var_ins = NULL;
14041 /* Load the offset */
14042 if (info_var->opcode == OP_REGOFFSET) {
14043 reg1 = alloc_ireg (cfg);
14044 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14045 } else if (info_var->opcode == OP_REGVAR) {
14047 reg1 = info_var->dreg;
14049 g_assert_not_reached ();
14051 reg2 = alloc_ireg (cfg);
14052 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14053 /* Load the locals area address */
14054 reg3 = alloc_ireg (cfg);
14055 if (locals_var->opcode == OP_REGOFFSET) {
14056 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14057 } else if (locals_var->opcode == OP_REGVAR) {
14058 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14060 g_assert_not_reached ();
14062 /* Compute the address */
14063 ins->opcode = OP_PADD;
14067 mono_bblock_insert_before_ins (bb, ins, load3);
14068 mono_bblock_insert_before_ins (bb, load3, load2);
14070 mono_bblock_insert_before_ins (bb, load2, load);
14072 g_assert (var->opcode == OP_REGOFFSET);
14074 ins->opcode = OP_ADD_IMM;
14075 ins->sreg1 = var->inst_basereg;
14076 ins->inst_imm = var->inst_offset;
14079 *need_local_opts = TRUE;
14080 spec = INS_INFO (ins->opcode);
14083 if (ins->opcode < MONO_CEE_LAST) {
14084 mono_print_ins (ins);
14085 g_assert_not_reached ();
14089 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14093 if (MONO_IS_STORE_MEMBASE (ins)) {
14094 tmp_reg = ins->dreg;
14095 ins->dreg = ins->sreg2;
14096 ins->sreg2 = tmp_reg;
14099 spec2 [MONO_INST_DEST] = ' ';
14100 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14101 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14102 spec2 [MONO_INST_SRC3] = ' ';
14104 } else if (MONO_IS_STORE_MEMINDEX (ins))
14105 g_assert_not_reached ();
14110 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14111 printf ("\t %.3s %d", spec, ins->dreg);
14112 num_sregs = mono_inst_get_src_registers (ins, sregs);
14113 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14114 printf (" %d", sregs [srcindex]);
14121 regtype = spec [MONO_INST_DEST];
14122 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14125 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14126 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14127 MonoInst *store_ins;
14129 MonoInst *def_ins = ins;
14130 int dreg = ins->dreg; /* The original vreg */
14132 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14134 if (var->opcode == OP_REGVAR) {
14135 ins->dreg = var->dreg;
14136 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14138 * Instead of emitting a load+store, use a _membase opcode.
14140 g_assert (var->opcode == OP_REGOFFSET);
14141 if (ins->opcode == OP_MOVE) {
14145 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14146 ins->inst_basereg = var->inst_basereg;
14147 ins->inst_offset = var->inst_offset;
14150 spec = INS_INFO (ins->opcode);
14154 g_assert (var->opcode == OP_REGOFFSET);
14156 prev_dreg = ins->dreg;
14158 /* Invalidate any previous lvreg for this vreg */
14159 vreg_to_lvreg [ins->dreg] = 0;
14163 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14165 store_opcode = OP_STOREI8_MEMBASE_REG;
14168 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14170 #if SIZEOF_REGISTER != 8
14171 if (regtype == 'l') {
14172 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14173 mono_bblock_insert_after_ins (bb, ins, store_ins);
14174 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14175 mono_bblock_insert_after_ins (bb, ins, store_ins);
14176 def_ins = store_ins;
14181 g_assert (store_opcode != OP_STOREV_MEMBASE);
14183 /* Try to fuse the store into the instruction itself */
14184 /* FIXME: Add more instructions */
14185 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14186 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14187 ins->inst_imm = ins->inst_c0;
14188 ins->inst_destbasereg = var->inst_basereg;
14189 ins->inst_offset = var->inst_offset;
14190 spec = INS_INFO (ins->opcode);
14191 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14192 ins->opcode = store_opcode;
14193 ins->inst_destbasereg = var->inst_basereg;
14194 ins->inst_offset = var->inst_offset;
14198 tmp_reg = ins->dreg;
14199 ins->dreg = ins->sreg2;
14200 ins->sreg2 = tmp_reg;
14203 spec2 [MONO_INST_DEST] = ' ';
14204 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14205 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14206 spec2 [MONO_INST_SRC3] = ' ';
14208 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14209 // FIXME: The backends expect the base reg to be in inst_basereg
14210 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14212 ins->inst_basereg = var->inst_basereg;
14213 ins->inst_offset = var->inst_offset;
14214 spec = INS_INFO (ins->opcode);
14216 /* printf ("INS: "); mono_print_ins (ins); */
14217 /* Create a store instruction */
14218 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14220 /* Insert it after the instruction */
14221 mono_bblock_insert_after_ins (bb, ins, store_ins);
14223 def_ins = store_ins;
14226 * We can't assign ins->dreg to var->dreg here, since the
14227 * sregs could use it. So set a flag, and do it after
14230 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14231 dest_has_lvreg = TRUE;
14236 if (def_ins && !live_range_start [dreg]) {
14237 live_range_start [dreg] = def_ins;
14238 live_range_start_bb [dreg] = bb;
14241 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14244 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14245 tmp->inst_c1 = dreg;
14246 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14253 num_sregs = mono_inst_get_src_registers (ins, sregs);
14254 for (srcindex = 0; srcindex < 3; ++srcindex) {
14255 regtype = spec [MONO_INST_SRC1 + srcindex];
14256 sreg = sregs [srcindex];
14258 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14259 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14260 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14261 MonoInst *use_ins = ins;
14262 MonoInst *load_ins;
14263 guint32 load_opcode;
14265 if (var->opcode == OP_REGVAR) {
14266 sregs [srcindex] = var->dreg;
14267 //mono_inst_set_src_registers (ins, sregs);
14268 live_range_end [sreg] = use_ins;
14269 live_range_end_bb [sreg] = bb;
14271 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14274 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14275 /* var->dreg is a hreg */
14276 tmp->inst_c1 = sreg;
14277 mono_bblock_insert_after_ins (bb, ins, tmp);
14283 g_assert (var->opcode == OP_REGOFFSET);
14285 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14287 g_assert (load_opcode != OP_LOADV_MEMBASE);
14289 if (vreg_to_lvreg [sreg]) {
14290 g_assert (vreg_to_lvreg [sreg] != -1);
14292 /* The variable is already loaded to an lvreg */
14293 if (G_UNLIKELY (cfg->verbose_level > 2))
14294 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14295 sregs [srcindex] = vreg_to_lvreg [sreg];
14296 //mono_inst_set_src_registers (ins, sregs);
14300 /* Try to fuse the load into the instruction */
14301 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14302 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14303 sregs [0] = var->inst_basereg;
14304 //mono_inst_set_src_registers (ins, sregs);
14305 ins->inst_offset = var->inst_offset;
14306 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14307 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14308 sregs [1] = var->inst_basereg;
14309 //mono_inst_set_src_registers (ins, sregs);
14310 ins->inst_offset = var->inst_offset;
14312 if (MONO_IS_REAL_MOVE (ins)) {
14313 ins->opcode = OP_NOP;
14316 //printf ("%d ", srcindex); mono_print_ins (ins);
14318 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14320 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14321 if (var->dreg == prev_dreg) {
14323 * sreg refers to the value loaded by the load
14324 * emitted below, but we need to use ins->dreg
14325 * since it refers to the store emitted earlier.
14329 g_assert (sreg != -1);
14330 vreg_to_lvreg [var->dreg] = sreg;
14331 if (lvregs_len >= lvregs_size) {
14332 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14333 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14334 lvregs = new_lvregs;
14337 lvregs [lvregs_len ++] = var->dreg;
14341 sregs [srcindex] = sreg;
14342 //mono_inst_set_src_registers (ins, sregs);
14344 #if SIZEOF_REGISTER != 8
14345 if (regtype == 'l') {
14346 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14347 mono_bblock_insert_before_ins (bb, ins, load_ins);
14348 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14349 mono_bblock_insert_before_ins (bb, ins, load_ins);
14350 use_ins = load_ins;
14355 #if SIZEOF_REGISTER == 4
14356 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14358 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14359 mono_bblock_insert_before_ins (bb, ins, load_ins);
14360 use_ins = load_ins;
14364 if (var->dreg < orig_next_vreg) {
14365 live_range_end [var->dreg] = use_ins;
14366 live_range_end_bb [var->dreg] = bb;
14369 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14372 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14373 tmp->inst_c1 = var->dreg;
14374 mono_bblock_insert_after_ins (bb, ins, tmp);
14378 mono_inst_set_src_registers (ins, sregs);
14380 if (dest_has_lvreg) {
14381 g_assert (ins->dreg != -1);
14382 vreg_to_lvreg [prev_dreg] = ins->dreg;
14383 if (lvregs_len >= lvregs_size) {
14384 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14385 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14386 lvregs = new_lvregs;
14389 lvregs [lvregs_len ++] = prev_dreg;
14390 dest_has_lvreg = FALSE;
14394 tmp_reg = ins->dreg;
14395 ins->dreg = ins->sreg2;
14396 ins->sreg2 = tmp_reg;
14399 if (MONO_IS_CALL (ins)) {
14400 /* Clear vreg_to_lvreg array */
14401 for (i = 0; i < lvregs_len; i++)
14402 vreg_to_lvreg [lvregs [i]] = 0;
14404 } else if (ins->opcode == OP_NOP) {
14406 MONO_INST_NULLIFY_SREGS (ins);
14409 if (cfg->verbose_level > 2)
14410 mono_print_ins_index (1, ins);
14413 /* Extend the live range based on the liveness info */
14414 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14415 for (i = 0; i < cfg->num_varinfo; i ++) {
14416 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14418 if (vreg_is_volatile (cfg, vi->vreg))
14419 /* The liveness info is incomplete */
14422 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14423 /* Live from at least the first ins of this bb */
14424 live_range_start [vi->vreg] = bb->code;
14425 live_range_start_bb [vi->vreg] = bb;
14428 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14429 /* Live at least until the last ins of this bb */
14430 live_range_end [vi->vreg] = bb->last_ins;
14431 live_range_end_bb [vi->vreg] = bb;
14438 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14439 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14441 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14442 for (i = 0; i < cfg->num_varinfo; ++i) {
14443 int vreg = MONO_VARINFO (cfg, i)->vreg;
14446 if (live_range_start [vreg]) {
14447 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14449 ins->inst_c1 = vreg;
14450 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14452 if (live_range_end [vreg]) {
14453 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14455 ins->inst_c1 = vreg;
14456 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14457 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14459 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14464 if (cfg->gsharedvt_locals_var_ins) {
14465 /* Nullify if unused */
14466 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14467 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14470 g_free (live_range_start);
14471 g_free (live_range_end);
14472 g_free (live_range_start_bb);
14473 g_free (live_range_end_bb);
14479 * - use 'iadd' instead of 'int_add'
14480 * - handling ovf opcodes: decompose in method_to_ir.
14481 * - unify iregs/fregs
14482 * -> partly done, the missing parts are:
14483 * - a more complete unification would involve unifying the hregs as well, so
14484 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14485 * would no longer map to the machine hregs, so the code generators would need to
14486 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14487 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14488 * fp/non-fp branches speeds it up by about 15%.
14489 * - use sext/zext opcodes instead of shifts
14491 * - get rid of TEMPLOADs if possible and use vregs instead
14492 * - clean up usage of OP_P/OP_ opcodes
14493 * - cleanup usage of DUMMY_USE
14494 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14496 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14497 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14498 * - make sure handle_stack_args () is called before the branch is emitted
14499 * - when the new IR is done, get rid of all unused stuff
14500 * - COMPARE/BEQ as separate instructions or unify them ?
14501 * - keeping them separate allows specialized compare instructions like
14502 * compare_imm, compare_membase
14503 * - most back ends unify fp compare+branch, fp compare+ceq
14504 * - integrate mono_save_args into inline_method
14505 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14506 * - handle long shift opts on 32 bit platforms somehow: they require
14507 * 3 sregs (2 for arg1 and 1 for arg2)
14508 * - make byref a 'normal' type.
14509 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14510 * variable if needed.
14511 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14512 * like inline_method.
14513 * - remove inlining restrictions
14514 * - fix LNEG and enable cfold of INEG
14515 * - generalize x86 optimizations like ldelema as a peephole optimization
14516 * - add store_mem_imm for amd64
14517 * - optimize the loading of the interruption flag in the managed->native wrappers
14518 * - avoid special handling of OP_NOP in passes
14519 * - move code inserting instructions into one function/macro.
14520 * - try a coalescing phase after liveness analysis
14521 * - add float -> vreg conversion + local optimizations on !x86
14522 * - figure out how to handle decomposed branches during optimizations, ie.
14523 * compare+branch, op_jump_table+op_br etc.
14524 * - promote RuntimeXHandles to vregs
14525 * - vtype cleanups:
14526 * - add a NEW_VARLOADA_VREG macro
14527 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14528 * accessing vtype fields.
14529 * - get rid of I8CONST on 64 bit platforms
14530 * - dealing with the increase in code size due to branches created during opcode
14532 * - use extended basic blocks
14533 * - all parts of the JIT
14534 * - handle_global_vregs () && local regalloc
14535 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14536 * - sources of increase in code size:
14539 * - isinst and castclass
14540 * - lvregs not allocated to global registers even if used multiple times
14541 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14543 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14544 * - add all micro optimizations from the old JIT
14545 * - put tree optimizations into the deadce pass
14546 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14547 * specific function.
14548 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14549 * fcompare + branchCC.
14550 * - create a helper function for allocating a stack slot, taking into account
14551 * MONO_CFG_HAS_SPILLUP.
14553 * - merge the ia64 switch changes.
14554 * - optimize mono_regstate2_alloc_int/float.
14555 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14556 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14557 * parts of the tree could be separated by other instructions, killing the tree
14558 * arguments, or stores killing loads etc. Also, should we fold loads into other
14559 * instructions if the result of the load is used multiple times ?
14560 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14561 * - LAST MERGE: 108395.
14562 * - when returning vtypes in registers, generate IR and append it to the end of the
14563 * last bb instead of doing it in the epilog.
14564 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14572 - When to decompose opcodes:
14573 - earlier: this makes some optimizations hard to implement, since the low level IR
14574 no longer contains the neccessary information. But it is easier to do.
14575 - later: harder to implement, enables more optimizations.
14576 - Branches inside bblocks:
14577 - created when decomposing complex opcodes.
14578 - branches to another bblock: harmless, but not tracked by the branch
14579 optimizations, so need to branch to a label at the start of the bblock.
14580 - branches to inside the same bblock: very problematic, trips up the local
14581 reg allocator. Can be fixed by spitting the current bblock, but that is a
14582 complex operation, since some local vregs can become global vregs etc.
14583 - Local/global vregs:
14584 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14585 local register allocator.
14586 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14587 structure, created by mono_create_var (). Assigned to hregs or the stack by
14588 the global register allocator.
14589 - When to do optimizations like alu->alu_imm:
14590 - earlier -> saves work later on since the IR will be smaller/simpler
14591 - later -> can work on more instructions
14592 - Handling of valuetypes:
14593 - When a vtype is pushed on the stack, a new temporary is created, an
14594 instruction computing its address (LDADDR) is emitted and pushed on
14595 the stack. Need to optimize cases when the vtype is used immediately as in
14596 argument passing, stloc etc.
14597 - Instead of the to_end stuff in the old JIT, simply call the function handling
14598 the values on the stack before emitting the last instruction of the bb.
14601 #else /* !DISABLE_JIT */
14604 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14608 #endif /* !DISABLE_JIT */