3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 inline static MonoInst*
152 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
158 static MonoMethodSignature *helper_sig_jit_thread_attach;
159 static MonoMethodSignature *helper_sig_get_tls_tramp;
160 static MonoMethodSignature *helper_sig_set_tls_tramp;
162 /* type loading helpers */
163 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
164 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
167 * Instruction metadata
175 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
176 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
182 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 /* keep in sync with the enum in mini.h */
190 #include "mini-ops.h"
195 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
196 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
198 * This should contain the index of the last sreg + 1. This is not the same
199 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
201 const gint8 ins_sreg_counts[] = {
202 #include "mini-ops.h"
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 type = &type->data.generic_class->container_class->byval_arg;
332 g_assert (cfg->gshared);
333 if (mini_type_var_is_vt (type))
336 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
344 mono_print_bb (MonoBasicBlock *bb, const char *msg)
348 GString *str = g_string_new ("");
350 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 g_string_append_printf (str, ", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 g_string_append_printf (str, " ]\n");
358 g_print ("%s", str->str);
359 g_string_free (str, TRUE);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
372 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
373 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
376 static MONO_NEVER_INLINE void
377 break_on_unverified (void)
379 if (mini_get_debug_options ()->break_on_unverified)
383 static MONO_NEVER_INLINE void
384 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
386 char *method_fname = mono_method_full_name (method, TRUE);
387 char *field_fname = mono_field_full_name (field);
388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
389 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
390 g_free (method_fname);
391 g_free (field_fname);
394 static MONO_NEVER_INLINE void
395 inline_failure (MonoCompile *cfg, const char *msg)
397 if (cfg->verbose_level >= 2)
398 printf ("inline failed: %s\n", msg);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
402 static MONO_NEVER_INLINE void
403 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 if (cfg->verbose_level > 2) \
406 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
410 static MONO_NEVER_INLINE void
411 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
413 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
414 if (cfg->verbose_level >= 2)
415 printf ("%s\n", cfg->exception_message);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
420 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
421 * foo<T> (int i) { ldarg.0; box T; }
423 #define UNVERIFIED do { \
424 if (cfg->gsharedvt) { \
425 if (cfg->verbose_level > 2) \
426 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
428 goto exception_exit; \
430 break_on_unverified (); \
434 #define GET_BBLOCK(cfg,tblock,ip) do { \
435 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
437 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
438 NEW_BBLOCK (cfg, (tblock)); \
439 (tblock)->cil_code = (ip); \
440 ADD_BBLOCK (cfg, (tblock)); \
444 #if defined(TARGET_X86) || defined(TARGET_AMD64)
445 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
446 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
447 (dest)->dreg = alloc_ireg_mp ((cfg)); \
448 (dest)->sreg1 = (sr1); \
449 (dest)->sreg2 = (sr2); \
450 (dest)->inst_imm = (imm); \
451 (dest)->backend.shift_amount = (shift); \
452 MONO_ADD_INS ((cfg)->cbb, (dest)); \
456 /* Emit conversions so both operands of a binary opcode are of the same type */
458 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
460 MonoInst *arg1 = *arg1_ref;
461 MonoInst *arg2 = *arg2_ref;
464 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
465 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
468 /* Mixing r4/r8 is allowed by the spec */
469 if (arg1->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
473 conv->type = STACK_R8;
477 if (arg2->type == STACK_R4) {
478 int dreg = alloc_freg (cfg);
480 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
481 conv->type = STACK_R8;
487 #if SIZEOF_REGISTER == 8
488 /* FIXME: Need to add many more cases */
489 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
492 int dr = alloc_preg (cfg);
493 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
494 (ins)->sreg2 = widen->dreg;
499 #define ADD_BINOP(op) do { \
500 MONO_INST_NEW (cfg, ins, (op)); \
502 ins->sreg1 = sp [0]->dreg; \
503 ins->sreg2 = sp [1]->dreg; \
504 type_from_op (cfg, ins, sp [0], sp [1]); \
506 /* Have to insert a widening op */ \
507 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
508 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
509 MONO_ADD_INS ((cfg)->cbb, (ins)); \
510 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
513 #define ADD_UNOP(op) do { \
514 MONO_INST_NEW (cfg, ins, (op)); \
516 ins->sreg1 = sp [0]->dreg; \
517 type_from_op (cfg, ins, sp [0], NULL); \
519 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
520 MONO_ADD_INS ((cfg)->cbb, (ins)); \
521 *sp++ = mono_decompose_opcode (cfg, ins); \
524 #define ADD_BINCOND(next_block) do { \
527 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
528 cmp->sreg1 = sp [0]->dreg; \
529 cmp->sreg2 = sp [1]->dreg; \
530 type_from_op (cfg, cmp, sp [0], sp [1]); \
532 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
533 type_from_op (cfg, ins, sp [0], sp [1]); \
534 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
535 GET_BBLOCK (cfg, tblock, target); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_true_bb = tblock; \
538 if ((next_block)) { \
539 link_bblock (cfg, cfg->cbb, (next_block)); \
540 ins->inst_false_bb = (next_block); \
541 start_new_bblock = 1; \
543 GET_BBLOCK (cfg, tblock, ip); \
544 link_bblock (cfg, cfg->cbb, tblock); \
545 ins->inst_false_bb = tblock; \
546 start_new_bblock = 2; \
548 if (sp != stack_start) { \
549 handle_stack_args (cfg, stack_start, sp - stack_start); \
550 CHECK_UNVERIFIABLE (cfg); \
552 MONO_ADD_INS (cfg->cbb, cmp); \
553 MONO_ADD_INS (cfg->cbb, ins); \
557 * link_bblock: Links two basic blocks
559 * links two basic blocks in the control flow graph, the 'from'
560 * argument is the starting block and the 'to' argument is the block
561 * the control flow ends to after 'from'.
564 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
566 MonoBasicBlock **newa;
570 if (from->cil_code) {
572 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
574 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
577 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
579 printf ("edge from entry to exit\n");
584 for (i = 0; i < from->out_count; ++i) {
585 if (to == from->out_bb [i]) {
591 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
592 for (i = 0; i < from->out_count; ++i) {
593 newa [i] = from->out_bb [i];
601 for (i = 0; i < to->in_count; ++i) {
602 if (from == to->in_bb [i]) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
609 for (i = 0; i < to->in_count; ++i) {
610 newa [i] = to->in_bb [i];
619 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
621 link_bblock (cfg, from, to);
625 * mono_find_block_region:
627 * We mark each basic block with a region ID. We use that to avoid BB
628 * optimizations when blocks are in different regions.
631 * A region token that encodes where this region is, and information
632 * about the clause owner for this block.
634 * The region encodes the try/catch/filter clause that owns this block
635 * as well as the type. -1 is a special value that represents a block
636 * that is in none of try/catch/filter.
639 mono_find_block_region (MonoCompile *cfg, int offset)
641 MonoMethodHeader *header = cfg->header;
642 MonoExceptionClause *clause;
645 for (i = 0; i < header->num_clauses; ++i) {
646 clause = &header->clauses [i];
647 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
648 (offset < (clause->handler_offset)))
649 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
651 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
652 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
653 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
654 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
655 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
657 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
660 for (i = 0; i < header->num_clauses; ++i) {
661 clause = &header->clauses [i];
663 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
664 return ((i + 1) << 8) | clause->flags;
671 ip_in_finally_clause (MonoCompile *cfg, int offset)
673 MonoMethodHeader *header = cfg->header;
674 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
682 if (MONO_OFFSET_IN_HANDLER (clause, offset))
689 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
691 MonoMethodHeader *header = cfg->header;
692 MonoExceptionClause *clause;
696 for (i = 0; i < header->num_clauses; ++i) {
697 clause = &header->clauses [i];
698 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
699 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
700 if (clause->flags == type)
701 res = g_list_append (res, clause);
708 mono_create_spvar_for_region (MonoCompile *cfg, int region)
712 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
717 /* prevent it from being register allocated */
718 var->flags |= MONO_INST_VOLATILE;
720 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
724 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
726 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
730 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
734 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
738 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
739 /* prevent it from being register allocated */
740 var->flags |= MONO_INST_VOLATILE;
742 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
748 * Returns the type used in the eval stack when @type is loaded.
749 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
752 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
756 type = mini_get_underlying_type (type);
757 inst->klass = klass = mono_class_from_mono_type (type);
759 inst->type = STACK_MP;
764 switch (type->type) {
766 inst->type = STACK_INV;
774 inst->type = STACK_I4;
779 case MONO_TYPE_FNPTR:
780 inst->type = STACK_PTR;
782 case MONO_TYPE_CLASS:
783 case MONO_TYPE_STRING:
784 case MONO_TYPE_OBJECT:
785 case MONO_TYPE_SZARRAY:
786 case MONO_TYPE_ARRAY:
787 inst->type = STACK_OBJ;
791 inst->type = STACK_I8;
794 inst->type = cfg->r4_stack_type;
797 inst->type = STACK_R8;
799 case MONO_TYPE_VALUETYPE:
800 if (type->data.klass->enumtype) {
801 type = mono_class_enum_basetype (type->data.klass);
805 inst->type = STACK_VTYPE;
808 case MONO_TYPE_TYPEDBYREF:
809 inst->klass = mono_defaults.typed_reference_class;
810 inst->type = STACK_VTYPE;
812 case MONO_TYPE_GENERICINST:
813 type = &type->data.generic_class->container_class->byval_arg;
817 g_assert (cfg->gshared);
818 if (mini_is_gsharedvt_type (type)) {
819 g_assert (cfg->gsharedvt);
820 inst->type = STACK_VTYPE;
822 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
826 g_error ("unknown type 0x%02x in eval stack type", type->type);
831 * The following tables are used to quickly validate the IL code in type_from_op ().
834 bin_num_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
840 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
848 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
851 /* reduce the size of this table */
853 bin_int_table [STACK_MAX] [STACK_MAX] = {
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
865 bin_comp_table [STACK_MAX] [STACK_MAX] = {
866 /* Inv i L p F & O vt r4 */
868 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
869 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
870 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
871 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
872 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
873 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
874 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
875 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
878 /* reduce the size of this table */
880 shift_table [STACK_MAX] [STACK_MAX] = {
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
892 * Tables to map from the non-specific opcode to the matching
893 * type-specific opcode.
895 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
897 binops_op_map [STACK_MAX] = {
898 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
901 /* handles from CEE_NEG to CEE_CONV_U8 */
903 unops_op_map [STACK_MAX] = {
904 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
907 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
909 ovfops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
913 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
915 ovf2ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
919 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
921 ovf3ops_op_map [STACK_MAX] = {
922 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
925 /* handles from CEE_BEQ to CEE_BLT_UN */
927 beqops_op_map [STACK_MAX] = {
928 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
931 /* handles from CEE_CEQ to CEE_CLT_UN */
933 ceqops_op_map [STACK_MAX] = {
934 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
938 * Sets ins->type (the type on the eval stack) according to the
939 * type of the opcode and the arguments to it.
940 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
942 * FIXME: this function sets ins->type unconditionally in some cases, but
943 * it should set it to invalid for some types (a conv.x on an object)
946 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
948 switch (ins->opcode) {
955 /* FIXME: check unverifiable args for STACK_MP */
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_int_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = shift_table [src1->type] [src2->type];
971 ins->opcode += binops_op_map [ins->type];
976 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
977 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
978 ins->opcode = OP_LCOMPARE;
979 else if (src1->type == STACK_R4)
980 ins->opcode = OP_RCOMPARE;
981 else if (src1->type == STACK_R8)
982 ins->opcode = OP_FCOMPARE;
984 ins->opcode = OP_ICOMPARE;
986 case OP_ICOMPARE_IMM:
987 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
988 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
989 ins->opcode = OP_LCOMPARE_IMM;
1001 ins->opcode += beqops_op_map [src1->type];
1004 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1011 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1012 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = neg_table [src1->type];
1017 ins->opcode += unops_op_map [ins->type];
1020 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1021 ins->type = src1->type;
1023 ins->type = STACK_INV;
1024 ins->opcode += unops_op_map [ins->type];
1030 ins->type = STACK_I4;
1031 ins->opcode += unops_op_map [src1->type];
1034 ins->type = STACK_R8;
1035 switch (src1->type) {
1038 ins->opcode = OP_ICONV_TO_R_UN;
1041 ins->opcode = OP_LCONV_TO_R_UN;
1045 case CEE_CONV_OVF_I1:
1046 case CEE_CONV_OVF_U1:
1047 case CEE_CONV_OVF_I2:
1048 case CEE_CONV_OVF_U2:
1049 case CEE_CONV_OVF_I4:
1050 case CEE_CONV_OVF_U4:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf3ops_op_map [src1->type];
1054 case CEE_CONV_OVF_I_UN:
1055 case CEE_CONV_OVF_U_UN:
1056 ins->type = STACK_PTR;
1057 ins->opcode += ovf2ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I1_UN:
1060 case CEE_CONV_OVF_I2_UN:
1061 case CEE_CONV_OVF_I4_UN:
1062 case CEE_CONV_OVF_U1_UN:
1063 case CEE_CONV_OVF_U2_UN:
1064 case CEE_CONV_OVF_U4_UN:
1065 ins->type = STACK_I4;
1066 ins->opcode += ovf2ops_op_map [src1->type];
1069 ins->type = STACK_PTR;
1070 switch (src1->type) {
1072 ins->opcode = OP_ICONV_TO_U;
1076 #if SIZEOF_VOID_P == 8
1077 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_MOVE;
1083 ins->opcode = OP_LCONV_TO_U;
1086 ins->opcode = OP_FCONV_TO_U;
1092 ins->type = STACK_I8;
1093 ins->opcode += unops_op_map [src1->type];
1095 case CEE_CONV_OVF_I8:
1096 case CEE_CONV_OVF_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += ovf3ops_op_map [src1->type];
1100 case CEE_CONV_OVF_U8_UN:
1101 case CEE_CONV_OVF_I8_UN:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf2ops_op_map [src1->type];
1106 ins->type = cfg->r4_stack_type;
1107 ins->opcode += unops_op_map [src1->type];
1110 ins->type = STACK_R8;
1111 ins->opcode += unops_op_map [src1->type];
1114 ins->type = STACK_R8;
1118 ins->type = STACK_I4;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_CONV_OVF_I:
1123 case CEE_CONV_OVF_U:
1124 ins->type = STACK_PTR;
1125 ins->opcode += ovfops_op_map [src1->type];
1128 case CEE_ADD_OVF_UN:
1130 case CEE_MUL_OVF_UN:
1132 case CEE_SUB_OVF_UN:
1133 ins->type = bin_num_table [src1->type] [src2->type];
1134 ins->opcode += ovfops_op_map [src1->type];
1135 if (ins->type == STACK_R8)
1136 ins->type = STACK_INV;
1138 case OP_LOAD_MEMBASE:
1139 ins->type = STACK_PTR;
1141 case OP_LOADI1_MEMBASE:
1142 case OP_LOADU1_MEMBASE:
1143 case OP_LOADI2_MEMBASE:
1144 case OP_LOADU2_MEMBASE:
1145 case OP_LOADI4_MEMBASE:
1146 case OP_LOADU4_MEMBASE:
1147 ins->type = STACK_PTR;
1149 case OP_LOADI8_MEMBASE:
1150 ins->type = STACK_I8;
1152 case OP_LOADR4_MEMBASE:
1153 ins->type = cfg->r4_stack_type;
1155 case OP_LOADR8_MEMBASE:
1156 ins->type = STACK_R8;
1159 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1163 if (ins->type == STACK_MP)
1164 ins->klass = mono_defaults.object_class;
1169 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1175 param_table [STACK_MAX] [STACK_MAX] = {
1180 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1185 switch (args->type) {
1195 for (i = 0; i < sig->param_count; ++i) {
1196 switch (args [i].type) {
1200 if (!sig->params [i]->byref)
1204 if (sig->params [i]->byref)
1206 switch (sig->params [i]->type) {
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1218 if (sig->params [i]->byref)
1220 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1229 /*if (!param_table [args [i].type] [sig->params [i]->type])
1237 * When we need a pointer to the current domain many times in a method, we
1238 * call mono_domain_get() once and we store the result in a local variable.
1239 * This function returns the variable that represents the MonoDomain*.
1241 inline static MonoInst *
1242 mono_get_domainvar (MonoCompile *cfg)
1244 if (!cfg->domainvar)
1245 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 return cfg->domainvar;
1250 * The got_var contains the address of the Global Offset Table when AOT
1254 mono_get_got_var (MonoCompile *cfg)
1256 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1258 if (!cfg->got_var) {
1259 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 return cfg->got_var;
1265 mono_create_rgctx_var (MonoCompile *cfg)
1267 if (!cfg->rgctx_var) {
1268 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1269 /* force the var to be stack allocated */
1270 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1275 mono_get_vtable_var (MonoCompile *cfg)
1277 g_assert (cfg->gshared);
1279 mono_create_rgctx_var (cfg);
1281 return cfg->rgctx_var;
1285 type_from_stack_type (MonoInst *ins) {
1286 switch (ins->type) {
1287 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1288 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1289 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1290 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1291 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1293 return &ins->klass->this_arg;
1294 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1295 case STACK_VTYPE: return &ins->klass->byval_arg;
1297 g_error ("stack type %d to monotype not handled\n", ins->type);
1302 static G_GNUC_UNUSED int
1303 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1305 t = mono_type_get_underlying_type (t);
1317 case MONO_TYPE_FNPTR:
1319 case MONO_TYPE_CLASS:
1320 case MONO_TYPE_STRING:
1321 case MONO_TYPE_OBJECT:
1322 case MONO_TYPE_SZARRAY:
1323 case MONO_TYPE_ARRAY:
1329 return cfg->r4_stack_type;
1332 case MONO_TYPE_VALUETYPE:
1333 case MONO_TYPE_TYPEDBYREF:
1335 case MONO_TYPE_GENERICINST:
1336 if (mono_type_generic_inst_is_valuetype (t))
1342 g_assert_not_reached ();
1349 array_access_to_klass (int opcode)
1353 return mono_defaults.byte_class;
1355 return mono_defaults.uint16_class;
1358 return mono_defaults.int_class;
1361 return mono_defaults.sbyte_class;
1364 return mono_defaults.int16_class;
1367 return mono_defaults.int32_class;
1369 return mono_defaults.uint32_class;
1372 return mono_defaults.int64_class;
1375 return mono_defaults.single_class;
1378 return mono_defaults.double_class;
1379 case CEE_LDELEM_REF:
1380 case CEE_STELEM_REF:
1381 return mono_defaults.object_class;
1383 g_assert_not_reached ();
1389 * We try to share variables when possible
1392 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1397 /* inlining can result in deeper stacks */
1398 if (slot >= cfg->header->max_stack)
1399 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1401 pos = ins->type - 1 + slot * STACK_MAX;
1403 switch (ins->type) {
1410 if ((vnum = cfg->intvars [pos]))
1411 return cfg->varinfo [vnum];
1412 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1413 cfg->intvars [pos] = res->inst_c0;
1416 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1422 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1425 * Don't use this if a generic_context is set, since that means AOT can't
1426 * look up the method using just the image+token.
1427 * table == 0 means this is a reference made from a wrapper.
1429 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1430 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1431 jump_info_token->image = image;
1432 jump_info_token->token = token;
1433 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1438 * This function is called to handle items that are left on the evaluation stack
1439 * at basic block boundaries. What happens is that we save the values to local variables
1440 * and we reload them later when first entering the target basic block (with the
1441 * handle_loaded_temps () function).
1442 * A single joint point will use the same variables (stored in the array bb->out_stack or
1443 * bb->in_stack, if the basic block is before or after the joint point).
1445 * This function needs to be called _before_ emitting the last instruction of
1446 * the bb (i.e. before emitting a branch).
1447 * If the stack merge fails at a join point, cfg->unverifiable is set.
1450 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1453 MonoBasicBlock *bb = cfg->cbb;
1454 MonoBasicBlock *outb;
1455 MonoInst *inst, **locals;
1460 if (cfg->verbose_level > 3)
1461 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1462 if (!bb->out_scount) {
1463 bb->out_scount = count;
1464 //printf ("bblock %d has out:", bb->block_num);
1466 for (i = 0; i < bb->out_count; ++i) {
1467 outb = bb->out_bb [i];
1468 /* exception handlers are linked, but they should not be considered for stack args */
1469 if (outb->flags & BB_EXCEPTION_HANDLER)
1471 //printf (" %d", outb->block_num);
1472 if (outb->in_stack) {
1474 bb->out_stack = outb->in_stack;
1480 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1481 for (i = 0; i < count; ++i) {
1483 * try to reuse temps already allocated for this purpouse, if they occupy the same
1484 * stack slot and if they are of the same type.
1485 * This won't cause conflicts since if 'local' is used to
1486 * store one of the values in the in_stack of a bblock, then
1487 * the same variable will be used for the same outgoing stack
1489 * This doesn't work when inlining methods, since the bblocks
1490 * in the inlined methods do not inherit their in_stack from
1491 * the bblock they are inlined to. See bug #58863 for an
1494 if (cfg->inlined_method)
1495 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1497 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1502 for (i = 0; i < bb->out_count; ++i) {
1503 outb = bb->out_bb [i];
1504 /* exception handlers are linked, but they should not be considered for stack args */
1505 if (outb->flags & BB_EXCEPTION_HANDLER)
1507 if (outb->in_scount) {
1508 if (outb->in_scount != bb->out_scount) {
1509 cfg->unverifiable = TRUE;
1512 continue; /* check they are the same locals */
1514 outb->in_scount = count;
1515 outb->in_stack = bb->out_stack;
1518 locals = bb->out_stack;
1520 for (i = 0; i < count; ++i) {
1521 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1522 inst->cil_code = sp [i]->cil_code;
1523 sp [i] = locals [i];
1524 if (cfg->verbose_level > 3)
1525 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1529 * It is possible that the out bblocks already have in_stack assigned, and
1530 * the in_stacks differ. In this case, we will store to all the different
1537 /* Find a bblock which has a different in_stack */
1539 while (bindex < bb->out_count) {
1540 outb = bb->out_bb [bindex];
1541 /* exception handlers are linked, but they should not be considered for stack args */
1542 if (outb->flags & BB_EXCEPTION_HANDLER) {
1546 if (outb->in_stack != locals) {
1547 for (i = 0; i < count; ++i) {
1548 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1549 inst->cil_code = sp [i]->cil_code;
1550 sp [i] = locals [i];
1551 if (cfg->verbose_level > 3)
1552 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1554 locals = outb->in_stack;
1564 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1568 if (cfg->compile_aot) {
1569 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1575 ji.type = patch_type;
1576 ji.data.target = data;
1577 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1578 mono_error_assert_ok (&error);
1580 EMIT_NEW_PCONST (cfg, ins, target);
1586 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1588 return emit_runtime_constant (cfg, patch_type, data);
1592 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1596 g_assert (val == 0);
1601 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1610 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1612 #if SIZEOF_REGISTER == 8
1614 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1620 val_reg = alloc_preg (cfg);
1622 if (SIZEOF_REGISTER == 8)
1623 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1625 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1628 /* This could be optimized further if neccesary */
1630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1637 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1675 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1676 g_assert (size < 10000);
1679 /* This could be optimized further if neccesary */
1681 cur_reg = alloc_preg (cfg);
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1690 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1692 cur_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1702 cur_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1718 cur_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1720 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1728 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1730 int tls_offset = mono_tls_get_tls_offset (key);
1732 if (cfg->compile_aot)
1735 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1737 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1738 ins->dreg = mono_alloc_preg (cfg);
1739 ins->inst_offset = tls_offset;
1746 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1748 int tls_offset = mono_tls_get_tls_offset (key);
1750 if (cfg->compile_aot)
1753 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1755 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1756 ins->sreg1 = value->dreg;
1757 ins->inst_offset = tls_offset;
1765 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1767 MonoInst *fast_tls = NULL;
1769 if (!mini_get_debug_options ()->use_fallback_tls)
1770 fast_tls = mono_create_fast_tls_getter (cfg, key);
1773 MONO_ADD_INS (cfg->cbb, fast_tls);
1777 if (cfg->compile_aot) {
1780 * tls getters are critical pieces of code and we don't want to resolve them
1781 * through the standard plt/tramp mechanism since we might expose ourselves
1782 * to crashes and infinite recursions.
1784 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1785 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1787 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1788 return mono_emit_jit_icall (cfg, getter, NULL);
1793 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1795 MonoInst *fast_tls = NULL;
1797 if (!mini_get_debug_options ()->use_fallback_tls)
1798 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1801 MONO_ADD_INS (cfg->cbb, fast_tls);
1805 if (cfg->compile_aot) {
1807 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1808 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1810 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1811 return mono_emit_jit_icall (cfg, setter, &value);
1818 * Emit IR to push the current LMF onto the LMF stack.
1821 emit_push_lmf (MonoCompile *cfg)
1824 * Emit IR to push the LMF:
1825 * lmf_addr = <lmf_addr from tls>
1826 * lmf->lmf_addr = lmf_addr
1827 * lmf->prev_lmf = *lmf_addr
1830 MonoInst *ins, *lmf_ins;
1835 int lmf_reg, prev_lmf_reg;
1837 * Store lmf_addr in a variable, so it can be allocated to a global register.
1839 if (!cfg->lmf_addr_var)
1840 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1843 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1845 int jit_tls_dreg = ins->dreg;
1847 lmf_reg = alloc_preg (cfg);
1848 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1850 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1853 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1855 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1856 lmf_reg = ins->dreg;
1858 prev_lmf_reg = alloc_preg (cfg);
1859 /* Save previous_lmf */
1860 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1861 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1863 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1869 * Emit IR to pop the current LMF from the LMF stack.
1872 emit_pop_lmf (MonoCompile *cfg)
1874 int lmf_reg, lmf_addr_reg;
1880 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1881 lmf_reg = ins->dreg;
1885 * Emit IR to pop the LMF:
1886 * *(lmf->lmf_addr) = lmf->prev_lmf
1888 /* This could be called before emit_push_lmf () */
1889 if (!cfg->lmf_addr_var)
1890 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1891 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1893 prev_lmf_reg = alloc_preg (cfg);
1894 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1895 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1899 emit_instrumentation_call (MonoCompile *cfg, void *func)
1901 MonoInst *iargs [1];
1904 * Avoid instrumenting inlined methods since it can
1905 * distort profiling results.
1907 if (cfg->method != cfg->current_method)
1910 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1911 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1912 mono_emit_jit_icall (cfg, func, iargs);
1917 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1920 type = mini_get_underlying_type (type);
1921 switch (type->type) {
1922 case MONO_TYPE_VOID:
1923 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1930 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1934 case MONO_TYPE_FNPTR:
1935 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1936 case MONO_TYPE_CLASS:
1937 case MONO_TYPE_STRING:
1938 case MONO_TYPE_OBJECT:
1939 case MONO_TYPE_SZARRAY:
1940 case MONO_TYPE_ARRAY:
1941 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1944 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1947 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1949 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1951 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1952 case MONO_TYPE_VALUETYPE:
1953 if (type->data.klass->enumtype) {
1954 type = mono_class_enum_basetype (type->data.klass);
1957 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1958 case MONO_TYPE_TYPEDBYREF:
1959 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1960 case MONO_TYPE_GENERICINST:
1961 type = &type->data.generic_class->container_class->byval_arg;
1964 case MONO_TYPE_MVAR:
1966 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1968 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1973 //XXX this ignores if t is byref
1974 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1977 * target_type_is_incompatible:
1978 * @cfg: MonoCompile context
1980 * Check that the item @arg on the evaluation stack can be stored
1981 * in the target type (can be a local, or field, etc).
1982 * The cfg arg can be used to check if we need verification or just
1985 * Returns: non-0 value if arg can't be stored on a target.
1988 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1990 MonoType *simple_type;
1993 if (target->byref) {
1994 /* FIXME: check that the pointed to types match */
1995 if (arg->type == STACK_MP) {
1996 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1997 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1998 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2000 /* if the target is native int& or same type */
2001 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2004 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2005 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2006 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2010 if (arg->type == STACK_PTR)
2015 simple_type = mini_get_underlying_type (target);
2016 switch (simple_type->type) {
2017 case MONO_TYPE_VOID:
2025 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2029 /* STACK_MP is needed when setting pinned locals */
2030 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2035 case MONO_TYPE_FNPTR:
2037 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2038 * in native int. (#688008).
2040 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2043 case MONO_TYPE_CLASS:
2044 case MONO_TYPE_STRING:
2045 case MONO_TYPE_OBJECT:
2046 case MONO_TYPE_SZARRAY:
2047 case MONO_TYPE_ARRAY:
2048 if (arg->type != STACK_OBJ)
2050 /* FIXME: check type compatibility */
2054 if (arg->type != STACK_I8)
2058 if (arg->type != cfg->r4_stack_type)
2062 if (arg->type != STACK_R8)
2065 case MONO_TYPE_VALUETYPE:
2066 if (arg->type != STACK_VTYPE)
2068 klass = mono_class_from_mono_type (simple_type);
2069 if (klass != arg->klass)
2072 case MONO_TYPE_TYPEDBYREF:
2073 if (arg->type != STACK_VTYPE)
2075 klass = mono_class_from_mono_type (simple_type);
2076 if (klass != arg->klass)
2079 case MONO_TYPE_GENERICINST:
2080 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2081 MonoClass *target_class;
2082 if (arg->type != STACK_VTYPE)
2084 klass = mono_class_from_mono_type (simple_type);
2085 target_class = mono_class_from_mono_type (target);
2086 /* The second cases is needed when doing partial sharing */
2087 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2091 if (arg->type != STACK_OBJ)
2093 /* FIXME: check type compatibility */
2097 case MONO_TYPE_MVAR:
2098 g_assert (cfg->gshared);
2099 if (mini_type_var_is_vt (simple_type)) {
2100 if (arg->type != STACK_VTYPE)
2103 if (arg->type != STACK_OBJ)
2108 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2114 * Prepare arguments for passing to a function call.
2115 * Return a non-zero value if the arguments can't be passed to the given
2117 * The type checks are not yet complete and some conversions may need
2118 * casts on 32 or 64 bit architectures.
2120 * FIXME: implement this using target_type_is_incompatible ()
2123 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2125 MonoType *simple_type;
2129 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2133 for (i = 0; i < sig->param_count; ++i) {
2134 if (sig->params [i]->byref) {
2135 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2139 simple_type = mini_get_underlying_type (sig->params [i]);
2141 switch (simple_type->type) {
2142 case MONO_TYPE_VOID:
2151 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2157 case MONO_TYPE_FNPTR:
2158 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2161 case MONO_TYPE_CLASS:
2162 case MONO_TYPE_STRING:
2163 case MONO_TYPE_OBJECT:
2164 case MONO_TYPE_SZARRAY:
2165 case MONO_TYPE_ARRAY:
2166 if (args [i]->type != STACK_OBJ)
2171 if (args [i]->type != STACK_I8)
2175 if (args [i]->type != cfg->r4_stack_type)
2179 if (args [i]->type != STACK_R8)
2182 case MONO_TYPE_VALUETYPE:
2183 if (simple_type->data.klass->enumtype) {
2184 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2187 if (args [i]->type != STACK_VTYPE)
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (args [i]->type != STACK_VTYPE)
2194 case MONO_TYPE_GENERICINST:
2195 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2198 case MONO_TYPE_MVAR:
2200 if (args [i]->type != STACK_VTYPE)
2204 g_error ("unknown type 0x%02x in check_call_signature",
2212 callvirt_to_call (int opcode)
2215 case OP_CALL_MEMBASE:
2217 case OP_VOIDCALL_MEMBASE:
2219 case OP_FCALL_MEMBASE:
2221 case OP_RCALL_MEMBASE:
2223 case OP_VCALL_MEMBASE:
2225 case OP_LCALL_MEMBASE:
2228 g_assert_not_reached ();
2235 callvirt_to_call_reg (int opcode)
2238 case OP_CALL_MEMBASE:
2240 case OP_VOIDCALL_MEMBASE:
2241 return OP_VOIDCALL_REG;
2242 case OP_FCALL_MEMBASE:
2243 return OP_FCALL_REG;
2244 case OP_RCALL_MEMBASE:
2245 return OP_RCALL_REG;
2246 case OP_VCALL_MEMBASE:
2247 return OP_VCALL_REG;
2248 case OP_LCALL_MEMBASE:
2249 return OP_LCALL_REG;
2251 g_assert_not_reached ();
2257 /* Either METHOD or IMT_ARG needs to be set */
2259 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2263 if (COMPILE_LLVM (cfg)) {
2265 method_reg = alloc_preg (cfg);
2266 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2268 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2269 method_reg = ins->dreg;
2273 call->imt_arg_reg = method_reg;
2275 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2280 method_reg = alloc_preg (cfg);
2281 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2283 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2284 method_reg = ins->dreg;
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2290 static MonoJumpInfo *
2291 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2293 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2297 ji->data.target = target;
2303 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2306 return mono_class_check_context_used (klass);
2312 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2315 return mono_method_check_context_used (method);
2321 * check_method_sharing:
2323 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2326 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2328 gboolean pass_vtable = FALSE;
2329 gboolean pass_mrgctx = FALSE;
2331 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2332 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2333 gboolean sharable = FALSE;
2335 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2339 * Pass vtable iff target method might
2340 * be shared, which means that sharing
2341 * is enabled for its class and its
2342 * context is sharable (and it's not a
2345 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2349 if (mini_method_get_context (cmethod) &&
2350 mini_method_get_context (cmethod)->method_inst) {
2351 g_assert (!pass_vtable);
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2356 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2361 if (out_pass_vtable)
2362 *out_pass_vtable = pass_vtable;
2363 if (out_pass_mrgctx)
2364 *out_pass_mrgctx = pass_mrgctx;
2367 inline static MonoCallInst *
2368 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2369 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2373 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2381 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2383 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2385 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2388 call->signature = sig;
2389 call->rgctx_reg = rgctx;
2390 sig_ret = mini_get_underlying_type (sig->ret);
2392 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2395 if (mini_type_is_vtype (sig_ret)) {
2396 call->vret_var = cfg->vret_addr;
2397 //g_assert_not_reached ();
2399 } else if (mini_type_is_vtype (sig_ret)) {
2400 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2403 temp->backend.is_pinvoke = sig->pinvoke;
2406 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2407 * address of return value to increase optimization opportunities.
2408 * Before vtype decomposition, the dreg of the call ins itself represents the
2409 * fact the call modifies the return value. After decomposition, the call will
2410 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2411 * will be transformed into an LDADDR.
2413 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2414 loada->dreg = alloc_preg (cfg);
2415 loada->inst_p0 = temp;
2416 /* We reference the call too since call->dreg could change during optimization */
2417 loada->inst_p1 = call;
2418 MONO_ADD_INS (cfg->cbb, loada);
2420 call->inst.dreg = temp->dreg;
2422 call->vret_var = loada;
2423 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2424 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2426 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2427 if (COMPILE_SOFT_FLOAT (cfg)) {
2429 * If the call has a float argument, we would need to do an r8->r4 conversion using
2430 * an icall, but that cannot be done during the call sequence since it would clobber
2431 * the call registers + the stack. So we do it before emitting the call.
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2435 MonoInst *in = call->args [i];
2437 if (i >= sig->hasthis)
2438 t = sig->params [i - sig->hasthis];
2440 t = &mono_defaults.int_class->byval_arg;
2441 t = mono_type_get_underlying_type (t);
2443 if (!t->byref && t->type == MONO_TYPE_R4) {
2444 MonoInst *iargs [1];
2448 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2450 /* The result will be in an int vreg */
2451 call->args [i] = conv;
2457 call->need_unbox_trampoline = unbox_trampoline;
2460 if (COMPILE_LLVM (cfg))
2461 mono_llvm_emit_call (cfg, call);
2463 mono_arch_emit_call (cfg, call);
2465 mono_arch_emit_call (cfg, call);
2468 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2469 cfg->flags |= MONO_CFG_HAS_CALLS;
2475 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2477 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2478 cfg->uses_rgctx_reg = TRUE;
2479 call->rgctx_reg = TRUE;
2481 call->rgctx_arg_reg = rgctx_reg;
2485 inline static MonoInst*
2486 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2491 gboolean check_sp = FALSE;
2493 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2494 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2496 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2501 rgctx_reg = mono_alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2506 if (!cfg->stack_inbalance_var)
2507 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2509 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2510 ins->dreg = cfg->stack_inbalance_var->dreg;
2511 MONO_ADD_INS (cfg->cbb, ins);
2514 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2516 call->inst.sreg1 = addr->dreg;
2519 emit_imt_argument (cfg, call, NULL, imt_arg);
2521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2526 sp_reg = mono_alloc_preg (cfg);
2528 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2530 MONO_ADD_INS (cfg->cbb, ins);
2532 /* Restore the stack so we don't crash when throwing the exception */
2533 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2534 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2535 MONO_ADD_INS (cfg->cbb, ins);
2537 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2538 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2542 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2544 return (MonoInst*)call;
2548 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2551 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2554 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2555 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2557 #ifndef DISABLE_REMOTING
2558 gboolean might_be_remote = FALSE;
2560 gboolean virtual_ = this_ins != NULL;
2561 gboolean enable_for_aot = TRUE;
2564 MonoInst *call_target = NULL;
2566 gboolean need_unbox_trampoline;
2569 sig = mono_method_signature (method);
2571 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2572 g_assert_not_reached ();
2575 rgctx_reg = mono_alloc_preg (cfg);
2576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2579 if (method->string_ctor) {
2580 /* Create the real signature */
2581 /* FIXME: Cache these */
2582 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2583 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2588 context_used = mini_method_check_context_used (cfg, method);
2590 #ifndef DISABLE_REMOTING
2591 might_be_remote = this_ins && sig->hasthis &&
2592 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2593 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2595 if (might_be_remote && context_used) {
2598 g_assert (cfg->gshared);
2600 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2602 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2606 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2607 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2609 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2611 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2613 #ifndef DISABLE_REMOTING
2614 if (might_be_remote)
2615 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2618 call->method = method;
2619 call->inst.flags |= MONO_INST_HAS_METHOD;
2620 call->inst.inst_left = this_ins;
2621 call->tail_call = tail;
2624 int vtable_reg, slot_reg, this_reg;
2627 this_reg = this_ins->dreg;
2629 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2630 MonoInst *dummy_use;
2632 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2634 /* Make a call to delegate->invoke_impl */
2635 call->inst.inst_basereg = this_reg;
2636 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2637 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2639 /* We must emit a dummy use here because the delegate trampoline will
2640 replace the 'this' argument with the delegate target making this activation
2641 no longer a root for the delegate.
2642 This is an issue for delegates that target collectible code such as dynamic
2643 methods of GC'able assemblies.
2645 For a test case look into #667921.
2647 FIXME: a dummy use is not the best way to do it as the local register allocator
2648 will put it on a caller save register and spil it around the call.
2649 Ideally, we would either put it on a callee save register or only do the store part.
2651 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2653 return (MonoInst*)call;
2656 if ((!cfg->compile_aot || enable_for_aot) &&
2657 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2658 (MONO_METHOD_IS_FINAL (method) &&
2659 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2660 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2662 * the method is not virtual, we just need to ensure this is not null
2663 * and then we can call the method directly.
2665 #ifndef DISABLE_REMOTING
2666 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2668 * The check above ensures method is not gshared, this is needed since
2669 * gshared methods can't have wrappers.
2671 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2675 if (!method->string_ctor)
2676 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2678 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2679 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2681 * the method is virtual, but we can statically dispatch since either
2682 * it's class or the method itself are sealed.
2683 * But first we need to ensure it's not a null reference.
2685 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2687 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2688 } else if (call_target) {
2689 vtable_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2692 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2693 call->inst.sreg1 = call_target->dreg;
2694 call->inst.flags &= !MONO_INST_HAS_METHOD;
2696 vtable_reg = alloc_preg (cfg);
2697 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2698 if (mono_class_is_interface (method->klass)) {
2699 guint32 imt_slot = mono_method_get_imt_slot (method);
2700 emit_imt_argument (cfg, call, call->method, imt_arg);
2701 slot_reg = vtable_reg;
2702 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2704 slot_reg = vtable_reg;
2705 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2706 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2708 g_assert (mono_method_signature (method)->generic_param_count);
2709 emit_imt_argument (cfg, call, call->method, imt_arg);
2713 call->inst.sreg1 = slot_reg;
2714 call->inst.inst_offset = offset;
2715 call->is_virtual = TRUE;
2719 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2722 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2724 return (MonoInst*)call;
2728 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2730 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2734 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2741 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2744 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2746 return (MonoInst*)call;
2750 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2752 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2756 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2760 * mono_emit_abs_call:
2762 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2764 inline static MonoInst*
2765 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2766 MonoMethodSignature *sig, MonoInst **args)
2768 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2772 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2775 if (cfg->abs_patches == NULL)
2776 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2777 g_hash_table_insert (cfg->abs_patches, ji, ji);
2778 ins = mono_emit_native_call (cfg, ji, sig, args);
2779 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2783 static MonoMethodSignature*
2784 sig_to_rgctx_sig (MonoMethodSignature *sig)
2786 // FIXME: memory allocation
2787 MonoMethodSignature *res;
2790 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2791 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2792 res->param_count = sig->param_count + 1;
2793 for (i = 0; i < sig->param_count; ++i)
2794 res->params [i] = sig->params [i];
2795 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2799 /* Make an indirect call to FSIG passing an additional argument */
2801 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2803 MonoMethodSignature *csig;
2804 MonoInst *args_buf [16];
2806 int i, pindex, tmp_reg;
2808 /* Make a call with an rgctx/extra arg */
2809 if (fsig->param_count + 2 < 16)
2812 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2815 args [pindex ++] = orig_args [0];
2816 for (i = 0; i < fsig->param_count; ++i)
2817 args [pindex ++] = orig_args [fsig->hasthis + i];
2818 tmp_reg = alloc_preg (cfg);
2819 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2820 csig = sig_to_rgctx_sig (fsig);
2821 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2824 /* Emit an indirect call to the function descriptor ADDR */
2826 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2828 int addr_reg, arg_reg;
2829 MonoInst *call_target;
2831 g_assert (cfg->llvm_only);
2834 * addr points to a <addr, arg> pair, load both of them, and
2835 * make a call to addr, passing arg as an extra arg.
2837 addr_reg = alloc_preg (cfg);
2838 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2839 arg_reg = alloc_preg (cfg);
2840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2842 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2846 direct_icalls_enabled (MonoCompile *cfg)
2850 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2852 if (cfg->compile_llvm && !cfg->llvm_only)
2855 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2861 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2864 * Call the jit icall without a wrapper if possible.
2865 * The wrapper is needed for the following reasons:
2866 * - to handle exceptions thrown using mono_raise_exceptions () from the
2867 * icall function. The EH code needs the lmf frame pushed by the
2868 * wrapper to be able to unwind back to managed code.
2869 * - to be able to do stack walks for asynchronously suspended
2870 * threads when debugging.
2872 if (info->no_raise && direct_icalls_enabled (cfg)) {
2876 if (!info->wrapper_method) {
2877 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2878 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2880 mono_memory_barrier ();
2884 * Inline the wrapper method, which is basically a call to the C icall, and
2885 * an exception check.
2887 costs = inline_method (cfg, info->wrapper_method, NULL,
2888 args, NULL, il_offset, TRUE);
2889 g_assert (costs > 0);
2890 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2894 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2899 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2901 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2902 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2906 * Native code might return non register sized integers
2907 * without initializing the upper bits.
2909 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2910 case OP_LOADI1_MEMBASE:
2911 widen_op = OP_ICONV_TO_I1;
2913 case OP_LOADU1_MEMBASE:
2914 widen_op = OP_ICONV_TO_U1;
2916 case OP_LOADI2_MEMBASE:
2917 widen_op = OP_ICONV_TO_I2;
2919 case OP_LOADU2_MEMBASE:
2920 widen_op = OP_ICONV_TO_U2;
2926 if (widen_op != -1) {
2927 int dreg = alloc_preg (cfg);
2930 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2931 widen->type = ins->type;
2942 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2944 MonoInst *args [16];
2946 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2947 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2949 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2997 if (!cfg->gen_write_barriers)
3000 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3002 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3004 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3007 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3008 wbarrier->sreg1 = ptr->dreg;
3009 wbarrier->sreg2 = value->dreg;
3010 MONO_ADD_INS (cfg->cbb, wbarrier);
3011 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3012 int offset_reg = alloc_preg (cfg);
3016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3017 if (card_table_mask)
3018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3020 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3021 * IMM's larger than 32bits.
3023 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3024 card_reg = ins->dreg;
3026 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3027 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3029 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3030 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3033 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3037 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3039 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3040 unsigned need_wb = 0;
3045 /*types with references can't have alignment smaller than sizeof(void*) */
3046 if (align < SIZEOF_VOID_P)
3049 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3050 if (size > 32 * SIZEOF_VOID_P)
3053 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3055 /* We don't unroll more than 5 stores to avoid code bloat. */
3056 if (size > 5 * SIZEOF_VOID_P) {
3057 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3058 size += (SIZEOF_VOID_P - 1);
3059 size &= ~(SIZEOF_VOID_P - 1);
3061 EMIT_NEW_ICONST (cfg, iargs [2], size);
3062 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3063 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3067 destreg = iargs [0]->dreg;
3068 srcreg = iargs [1]->dreg;
3071 dest_ptr_reg = alloc_preg (cfg);
3072 tmp_reg = alloc_preg (cfg);
3075 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3077 while (size >= SIZEOF_VOID_P) {
3078 MonoInst *load_inst;
3079 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3080 load_inst->dreg = tmp_reg;
3081 load_inst->inst_basereg = srcreg;
3082 load_inst->inst_offset = offset;
3083 MONO_ADD_INS (cfg->cbb, load_inst);
3085 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3088 emit_write_barrier (cfg, iargs [0], load_inst);
3090 offset += SIZEOF_VOID_P;
3091 size -= SIZEOF_VOID_P;
3094 /*tmp += sizeof (void*)*/
3095 if (size >= SIZEOF_VOID_P) {
3096 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3097 MONO_ADD_INS (cfg->cbb, iargs [0]);
3101 /* Those cannot be references since size < sizeof (void*) */
3103 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3110 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3118 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3127 * Emit code to copy a valuetype of type @klass whose address is stored in
3128 * @src->dreg to memory whose address is stored at @dest->dreg.
3131 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3133 MonoInst *iargs [4];
3136 MonoMethod *memcpy_method;
3137 MonoInst *size_ins = NULL;
3138 MonoInst *memcpy_ins = NULL;
3142 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3145 * This check breaks with spilled vars... need to handle it during verification anyway.
3146 * g_assert (klass && klass == src->klass && klass == dest->klass);
3149 if (mini_is_gsharedvt_klass (klass)) {
3151 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3152 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3156 n = mono_class_native_size (klass, &align);
3158 n = mono_class_value_size (klass, &align);
3160 /* if native is true there should be no references in the struct */
3161 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3162 /* Avoid barriers when storing to the stack */
3163 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3164 (dest->opcode == OP_LDADDR))) {
3170 context_used = mini_class_check_context_used (cfg, klass);
3172 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3173 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3175 } else if (context_used) {
3176 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3178 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3179 if (!cfg->compile_aot)
3180 mono_class_compute_gc_descriptor (klass);
3184 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3186 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3191 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3192 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3193 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3198 iargs [2] = size_ins;
3200 EMIT_NEW_ICONST (cfg, iargs [2], n);
3202 memcpy_method = get_memcpy_method ();
3204 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3206 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3211 get_memset_method (void)
3213 static MonoMethod *memset_method = NULL;
3214 if (!memset_method) {
3215 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3217 g_error ("Old corlib found. Install a new one");
3219 return memset_method;
3223 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3225 MonoInst *iargs [3];
3228 MonoMethod *memset_method;
3229 MonoInst *size_ins = NULL;
3230 MonoInst *bzero_ins = NULL;
3231 static MonoMethod *bzero_method;
3233 /* FIXME: Optimize this for the case when dest is an LDADDR */
3234 mono_class_init (klass);
3235 if (mini_is_gsharedvt_klass (klass)) {
3236 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3237 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3239 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3240 g_assert (bzero_method);
3242 iargs [1] = size_ins;
3243 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3247 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3249 n = mono_class_value_size (klass, &align);
3251 if (n <= sizeof (gpointer) * 8) {
3252 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3255 memset_method = get_memset_method ();
3257 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3258 EMIT_NEW_ICONST (cfg, iargs [2], n);
3259 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3266 * Emit IR to return either the this pointer for instance method,
3267 * or the mrgctx for static methods.
3270 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3272 MonoInst *this_ins = NULL;
3274 g_assert (cfg->gshared);
3276 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3277 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3278 !method->klass->valuetype)
3279 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3281 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3282 MonoInst *mrgctx_loc, *mrgctx_var;
3284 g_assert (!this_ins);
3285 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3287 mrgctx_loc = mono_get_vtable_var (cfg);
3288 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3291 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3292 MonoInst *vtable_loc, *vtable_var;
3294 g_assert (!this_ins);
3296 vtable_loc = mono_get_vtable_var (cfg);
3297 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3299 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3300 MonoInst *mrgctx_var = vtable_var;
3303 vtable_reg = alloc_preg (cfg);
3304 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3305 vtable_var->type = STACK_PTR;
3313 vtable_reg = alloc_preg (cfg);
3314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3319 static MonoJumpInfoRgctxEntry *
3320 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3322 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3323 res->method = method;
3324 res->in_mrgctx = in_mrgctx;
3325 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3326 res->data->type = patch_type;
3327 res->data->data.target = patch_data;
3328 res->info_type = info_type;
3333 static inline MonoInst*
3334 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3336 MonoInst *args [16];
3339 // FIXME: No fastpath since the slot is not a compile time constant
3341 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3342 if (entry->in_mrgctx)
3343 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3345 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3349 * FIXME: This can be called during decompose, which is a problem since it creates
3351 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3353 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3355 MonoBasicBlock *is_null_bb, *end_bb;
3356 MonoInst *res, *ins, *call;
3359 slot = mini_get_rgctx_entry_slot (entry);
3361 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3362 index = MONO_RGCTX_SLOT_INDEX (slot);
3364 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3365 for (depth = 0; ; ++depth) {
3366 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3368 if (index < size - 1)
3373 NEW_BBLOCK (cfg, end_bb);
3374 NEW_BBLOCK (cfg, is_null_bb);
3377 rgctx_reg = rgctx->dreg;
3379 rgctx_reg = alloc_preg (cfg);
3381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3382 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3383 NEW_BBLOCK (cfg, is_null_bb);
3385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3389 for (i = 0; i < depth; ++i) {
3390 int array_reg = alloc_preg (cfg);
3392 /* load ptr to next array */
3393 if (mrgctx && i == 0)
3394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3397 rgctx_reg = array_reg;
3398 /* is the ptr null? */
3399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3400 /* if yes, jump to actual trampoline */
3401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3405 val_reg = alloc_preg (cfg);
3406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3407 /* is the slot null? */
3408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3409 /* if yes, jump to actual trampoline */
3410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3413 res_reg = alloc_preg (cfg);
3414 MONO_INST_NEW (cfg, ins, OP_MOVE);
3415 ins->dreg = res_reg;
3416 ins->sreg1 = val_reg;
3417 MONO_ADD_INS (cfg->cbb, ins);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3422 MONO_START_BB (cfg, is_null_bb);
3424 EMIT_NEW_ICONST (cfg, args [1], index);
3426 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3428 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3429 MONO_INST_NEW (cfg, ins, OP_MOVE);
3430 ins->dreg = res_reg;
3431 ins->sreg1 = call->dreg;
3432 MONO_ADD_INS (cfg->cbb, ins);
3433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3435 MONO_START_BB (cfg, end_bb);
3444 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3447 static inline MonoInst*
3448 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3451 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3453 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3457 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3458 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3460 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3461 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3463 return emit_rgctx_fetch (cfg, rgctx, entry);
3467 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3468 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3470 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3471 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3473 return emit_rgctx_fetch (cfg, rgctx, entry);
3477 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3478 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3480 MonoJumpInfoGSharedVtCall *call_info;
3481 MonoJumpInfoRgctxEntry *entry;
3484 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3485 call_info->sig = sig;
3486 call_info->method = cmethod;
3488 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3489 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3491 return emit_rgctx_fetch (cfg, rgctx, entry);
3495 * emit_get_rgctx_virt_method:
3497 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3500 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3501 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3503 MonoJumpInfoVirtMethod *info;
3504 MonoJumpInfoRgctxEntry *entry;
3507 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3508 info->klass = klass;
3509 info->method = virt_method;
3511 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3512 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3514 return emit_rgctx_fetch (cfg, rgctx, entry);
3518 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3519 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3521 MonoJumpInfoRgctxEntry *entry;
3524 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3525 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3527 return emit_rgctx_fetch (cfg, rgctx, entry);
3531 * emit_get_rgctx_method:
3533 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3534 * normal constants, else emit a load from the rgctx.
3537 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3538 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3540 if (!context_used) {
3543 switch (rgctx_type) {
3544 case MONO_RGCTX_INFO_METHOD:
3545 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3547 case MONO_RGCTX_INFO_METHOD_RGCTX:
3548 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3551 g_assert_not_reached ();
3554 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3555 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3557 return emit_rgctx_fetch (cfg, rgctx, entry);
3562 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3563 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3565 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3566 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3568 return emit_rgctx_fetch (cfg, rgctx, entry);
3572 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3574 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3575 MonoRuntimeGenericContextInfoTemplate *template_;
3580 for (i = 0; i < info->num_entries; ++i) {
3581 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3583 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3587 if (info->num_entries == info->count_entries) {
3588 MonoRuntimeGenericContextInfoTemplate *new_entries;
3589 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3591 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3593 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3594 info->entries = new_entries;
3595 info->count_entries = new_count_entries;
3598 idx = info->num_entries;
3599 template_ = &info->entries [idx];
3600 template_->info_type = rgctx_type;
3601 template_->data = data;
3603 info->num_entries ++;
3609 * emit_get_gsharedvt_info:
3611 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3614 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3619 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3620 /* Load info->entries [idx] */
3621 dreg = alloc_preg (cfg);
3622 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3628 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3630 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3634 * On return the caller must check @klass for load errors.
3637 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3639 MonoInst *vtable_arg;
3642 context_used = mini_class_check_context_used (cfg, klass);
3645 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3646 klass, MONO_RGCTX_INFO_VTABLE);
3648 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3652 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3655 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3659 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3660 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3662 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3663 ins->sreg1 = vtable_arg->dreg;
3664 MONO_ADD_INS (cfg->cbb, ins);
3667 MonoBasicBlock *inited_bb;
3668 MonoInst *args [16];
3670 inited_reg = alloc_ireg (cfg);
3672 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3674 NEW_BBLOCK (cfg, inited_bb);
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3679 args [0] = vtable_arg;
3680 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3682 MONO_START_BB (cfg, inited_bb);
3687 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3691 if (cfg->gen_seq_points && cfg->method == method) {
3692 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3694 ins->flags |= MONO_INST_NONEMPTY_STACK;
3695 MONO_ADD_INS (cfg->cbb, ins);
3700 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3702 if (mini_get_debug_options ()->better_cast_details) {
3703 int vtable_reg = alloc_preg (cfg);
3704 int klass_reg = alloc_preg (cfg);
3705 MonoBasicBlock *is_null_bb = NULL;
3707 int to_klass_reg, context_used;
3710 NEW_BBLOCK (cfg, is_null_bb);
3712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3716 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3718 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3725 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3727 context_used = mini_class_check_context_used (cfg, klass);
3729 MonoInst *class_ins;
3731 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3732 to_klass_reg = class_ins->dreg;
3734 to_klass_reg = alloc_preg (cfg);
3735 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3740 MONO_START_BB (cfg, is_null_bb);
3745 mini_reset_cast_details (MonoCompile *cfg)
3747 /* Reset the variables holding the cast details */
3748 if (mini_get_debug_options ()->better_cast_details) {
3749 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3750 /* It is enough to reset the from field */
3751 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3756 * On return the caller must check @array_class for load errors
3759 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3761 int vtable_reg = alloc_preg (cfg);
3764 context_used = mini_class_check_context_used (cfg, array_class);
3766 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3768 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3770 if (cfg->opt & MONO_OPT_SHARED) {
3771 int class_reg = alloc_preg (cfg);
3774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3775 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3776 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3777 } else if (context_used) {
3778 MonoInst *vtable_ins;
3780 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3781 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3783 if (cfg->compile_aot) {
3787 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3789 vt_reg = alloc_preg (cfg);
3790 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3791 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3794 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3800 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3802 mini_reset_cast_details (cfg);
3806 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3807 * generic code is generated.
3810 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3812 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3815 MonoInst *rgctx, *addr;
3817 /* FIXME: What if the class is shared? We might not
3818 have to get the address of the method from the
3820 addr = emit_get_rgctx_method (cfg, context_used, method,
3821 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3822 if (cfg->llvm_only) {
3823 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3824 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3826 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3828 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3831 gboolean pass_vtable, pass_mrgctx;
3832 MonoInst *rgctx_arg = NULL;
3834 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3835 g_assert (!pass_mrgctx);
3838 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3841 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3844 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3849 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3853 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3854 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3855 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3856 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3858 obj_reg = sp [0]->dreg;
3859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3862 /* FIXME: generics */
3863 g_assert (klass->rank == 0);
3866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3867 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3869 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3873 MonoInst *element_class;
3875 /* This assertion is from the unboxcast insn */
3876 g_assert (klass->rank == 0);
3878 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3879 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3881 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3882 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3884 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3885 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3886 mini_reset_cast_details (cfg);
3889 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3890 MONO_ADD_INS (cfg->cbb, add);
3891 add->type = STACK_MP;
3898 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3900 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3901 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3905 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3911 args [1] = klass_inst;
3914 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3916 NEW_BBLOCK (cfg, is_ref_bb);
3917 NEW_BBLOCK (cfg, is_nullable_bb);
3918 NEW_BBLOCK (cfg, end_bb);
3919 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3924 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3926 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3927 addr_reg = alloc_dreg (cfg, STACK_MP);
3931 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3932 MONO_ADD_INS (cfg->cbb, addr);
3934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3937 MONO_START_BB (cfg, is_ref_bb);
3939 /* Save the ref to a temporary */
3940 dreg = alloc_ireg (cfg);
3941 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3942 addr->dreg = addr_reg;
3943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3947 MONO_START_BB (cfg, is_nullable_bb);
3950 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3951 MonoInst *unbox_call;
3952 MonoMethodSignature *unbox_sig;
3954 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3955 unbox_sig->ret = &klass->byval_arg;
3956 unbox_sig->param_count = 1;
3957 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3960 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3962 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3964 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3965 addr->dreg = addr_reg;
3968 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3971 MONO_START_BB (cfg, end_bb);
3974 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3980 * Returns NULL and set the cfg exception on error.
3983 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3985 MonoInst *iargs [2];
3990 MonoRgctxInfoType rgctx_info;
3991 MonoInst *iargs [2];
3992 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3994 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3996 if (cfg->opt & MONO_OPT_SHARED)
3997 rgctx_info = MONO_RGCTX_INFO_KLASS;
3999 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4000 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4002 if (cfg->opt & MONO_OPT_SHARED) {
4003 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4005 alloc_ftn = ves_icall_object_new;
4008 alloc_ftn = ves_icall_object_new_specific;
4011 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4012 if (known_instance_size) {
4013 int size = mono_class_instance_size (klass);
4014 if (size < sizeof (MonoObject))
4015 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4017 EMIT_NEW_ICONST (cfg, iargs [1], size);
4019 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4022 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4025 if (cfg->opt & MONO_OPT_SHARED) {
4026 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4027 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4029 alloc_ftn = ves_icall_object_new;
4030 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4031 /* This happens often in argument checking code, eg. throw new FooException... */
4032 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4033 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4034 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4037 MonoMethod *managed_alloc = NULL;
4041 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4042 cfg->exception_ptr = klass;
4046 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4048 if (managed_alloc) {
4049 int size = mono_class_instance_size (klass);
4050 if (size < sizeof (MonoObject))
4051 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4053 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4054 EMIT_NEW_ICONST (cfg, iargs [1], size);
4055 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4057 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4059 guint32 lw = vtable->klass->instance_size;
4060 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4061 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4062 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4065 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4069 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4073 * Returns NULL and set the cfg exception on error.
4076 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4078 MonoInst *alloc, *ins;
4080 if (mono_class_is_nullable (klass)) {
4081 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4084 if (cfg->llvm_only && cfg->gsharedvt) {
4085 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4086 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4087 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4089 /* FIXME: What if the class is shared? We might not
4090 have to get the method address from the RGCTX. */
4091 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4092 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4093 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4095 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4098 gboolean pass_vtable, pass_mrgctx;
4099 MonoInst *rgctx_arg = NULL;
4101 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4102 g_assert (!pass_mrgctx);
4105 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4108 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4111 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4115 if (mini_is_gsharedvt_klass (klass)) {
4116 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4117 MonoInst *res, *is_ref, *src_var, *addr;
4120 dreg = alloc_ireg (cfg);
4122 NEW_BBLOCK (cfg, is_ref_bb);
4123 NEW_BBLOCK (cfg, is_nullable_bb);
4124 NEW_BBLOCK (cfg, end_bb);
4125 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4126 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4127 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4130 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4133 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4136 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4137 ins->opcode = OP_STOREV_MEMBASE;
4139 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4140 res->type = STACK_OBJ;
4142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4145 MONO_START_BB (cfg, is_ref_bb);
4147 /* val is a vtype, so has to load the value manually */
4148 src_var = get_vreg_to_inst (cfg, val->dreg);
4150 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4151 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4156 MONO_START_BB (cfg, is_nullable_bb);
4159 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4160 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4162 MonoMethodSignature *box_sig;
4165 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4166 * construct that method at JIT time, so have to do things by hand.
4168 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4169 box_sig->ret = &mono_defaults.object_class->byval_arg;
4170 box_sig->param_count = 1;
4171 box_sig->params [0] = &klass->byval_arg;
4174 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4176 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4177 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4178 res->type = STACK_OBJ;
4182 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4184 MONO_START_BB (cfg, end_bb);
4188 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4192 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4197 static GHashTable* direct_icall_type_hash;
4200 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4202 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4203 if (!direct_icalls_enabled (cfg))
4207 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4208 * Whitelist a few icalls for now.
4210 if (!direct_icall_type_hash) {
4211 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4213 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4214 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4215 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4216 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4217 mono_memory_barrier ();
4218 direct_icall_type_hash = h;
4221 if (cmethod->klass == mono_defaults.math_class)
4223 /* No locking needed */
4224 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4230 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4232 if (cmethod->klass == mono_defaults.systemtype_class) {
4233 if (!strcmp (cmethod->name, "GetType"))
4239 static G_GNUC_UNUSED MonoInst*
4240 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4242 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4243 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4246 switch (enum_type->type) {
4249 #if SIZEOF_REGISTER == 8
4261 MonoInst *load, *and_, *cmp, *ceq;
4262 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4263 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4264 int dest_reg = alloc_ireg (cfg);
4266 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4267 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4268 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4269 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4271 ceq->type = STACK_I4;
4274 load = mono_decompose_opcode (cfg, load);
4275 and_ = mono_decompose_opcode (cfg, and_);
4276 cmp = mono_decompose_opcode (cfg, cmp);
4277 ceq = mono_decompose_opcode (cfg, ceq);
4285 * Returns NULL and set the cfg exception on error.
4287 static G_GNUC_UNUSED MonoInst*
4288 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4292 gpointer trampoline;
4293 MonoInst *obj, *method_ins, *tramp_ins;
4297 if (virtual_ && !cfg->llvm_only) {
4298 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4301 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4305 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4309 /* Inline the contents of mono_delegate_ctor */
4311 /* Set target field */
4312 /* Optimize away setting of NULL target */
4313 if (!MONO_INS_IS_PCONST_NULL (target)) {
4314 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4315 if (cfg->gen_write_barriers) {
4316 dreg = alloc_preg (cfg);
4317 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4318 emit_write_barrier (cfg, ptr, target);
4322 /* Set method field */
4323 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4327 * To avoid looking up the compiled code belonging to the target method
4328 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4329 * store it, and we fill it after the method has been compiled.
4331 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4332 MonoInst *code_slot_ins;
4335 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4337 domain = mono_domain_get ();
4338 mono_domain_lock (domain);
4339 if (!domain_jit_info (domain)->method_code_hash)
4340 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4341 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4343 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4344 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4346 mono_domain_unlock (domain);
4348 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4350 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4353 if (cfg->llvm_only) {
4354 MonoInst *args [16];
4359 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4360 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4363 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4369 if (cfg->compile_aot) {
4370 MonoDelegateClassMethodPair *del_tramp;
4372 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4373 del_tramp->klass = klass;
4374 del_tramp->method = context_used ? NULL : method;
4375 del_tramp->is_virtual = virtual_;
4376 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4379 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4381 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4382 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4385 /* Set invoke_impl field */
4387 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4389 dreg = alloc_preg (cfg);
4390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4393 dreg = alloc_preg (cfg);
4394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4398 dreg = alloc_preg (cfg);
4399 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4400 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4402 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4408 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4410 MonoJitICallInfo *info;
4412 /* Need to register the icall so it gets an icall wrapper */
4413 info = mono_get_array_new_va_icall (rank);
4415 cfg->flags |= MONO_CFG_HAS_VARARGS;
4417 /* mono_array_new_va () needs a vararg calling convention */
4418 cfg->exception_message = g_strdup ("array-new");
4419 cfg->disable_llvm = TRUE;
4421 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4422 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4426 * handle_constrained_gsharedvt_call:
4428 * Handle constrained calls where the receiver is a gsharedvt type.
4429 * Return the instruction representing the call. Set the cfg exception on failure.
4432 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4433 gboolean *ref_emit_widen)
4435 MonoInst *ins = NULL;
4436 gboolean emit_widen = *ref_emit_widen;
4439 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4440 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4441 * pack the arguments into an array, and do the rest of the work in in an icall.
4443 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4444 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4445 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4446 MonoInst *args [16];
4449 * This case handles calls to
4450 * - object:ToString()/Equals()/GetHashCode(),
4451 * - System.IComparable<T>:CompareTo()
4452 * - System.IEquatable<T>:Equals ()
4453 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4457 if (mono_method_check_context_used (cmethod))
4458 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4460 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4461 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4463 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4464 if (fsig->hasthis && fsig->param_count) {
4465 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4466 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4467 ins->dreg = alloc_preg (cfg);
4468 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4469 MONO_ADD_INS (cfg->cbb, ins);
4472 if (mini_is_gsharedvt_type (fsig->params [0])) {
4473 int addr_reg, deref_arg_reg;
4475 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4476 deref_arg_reg = alloc_preg (cfg);
4477 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4478 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4480 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4481 addr_reg = ins->dreg;
4482 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4484 EMIT_NEW_ICONST (cfg, args [3], 0);
4485 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4488 EMIT_NEW_ICONST (cfg, args [3], 0);
4489 EMIT_NEW_ICONST (cfg, args [4], 0);
4491 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4494 if (mini_is_gsharedvt_type (fsig->ret)) {
4495 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4496 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4500 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4501 MONO_ADD_INS (cfg->cbb, add);
4503 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4504 MONO_ADD_INS (cfg->cbb, ins);
4505 /* ins represents the call result */
4508 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4511 *ref_emit_widen = emit_widen;
4520 mono_emit_load_got_addr (MonoCompile *cfg)
4522 MonoInst *getaddr, *dummy_use;
4524 if (!cfg->got_var || cfg->got_var_allocated)
4527 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4528 getaddr->cil_code = cfg->header->code;
4529 getaddr->dreg = cfg->got_var->dreg;
4531 /* Add it to the start of the first bblock */
4532 if (cfg->bb_entry->code) {
4533 getaddr->next = cfg->bb_entry->code;
4534 cfg->bb_entry->code = getaddr;
4537 MONO_ADD_INS (cfg->bb_entry, getaddr);
4539 cfg->got_var_allocated = TRUE;
4542 * Add a dummy use to keep the got_var alive, since real uses might
4543 * only be generated by the back ends.
4544 * Add it to end_bblock, so the variable's lifetime covers the whole
4546 * It would be better to make the usage of the got var explicit in all
4547 * cases when the backend needs it (i.e. calls, throw etc.), so this
4548 * wouldn't be needed.
4550 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4551 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4554 static int inline_limit;
4555 static gboolean inline_limit_inited;
4558 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4560 MonoMethodHeaderSummary header;
4562 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4563 MonoMethodSignature *sig = mono_method_signature (method);
4567 if (cfg->disable_inline)
4572 if (cfg->inline_depth > 10)
4575 if (!mono_method_get_header_summary (method, &header))
4578 /*runtime, icall and pinvoke are checked by summary call*/
4579 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4580 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4581 (mono_class_is_marshalbyref (method->klass)) ||
4585 /* also consider num_locals? */
4586 /* Do the size check early to avoid creating vtables */
4587 if (!inline_limit_inited) {
4589 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4590 inline_limit = atoi (inlinelimit);
4591 g_free (inlinelimit);
4593 inline_limit = INLINE_LENGTH_LIMIT;
4594 inline_limit_inited = TRUE;
4596 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4600 * if we can initialize the class of the method right away, we do,
4601 * otherwise we don't allow inlining if the class needs initialization,
4602 * since it would mean inserting a call to mono_runtime_class_init()
4603 * inside the inlined code
4605 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4608 if (!(cfg->opt & MONO_OPT_SHARED)) {
4609 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4610 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4611 if (method->klass->has_cctor) {
4612 vtable = mono_class_vtable (cfg->domain, method->klass);
4615 if (!cfg->compile_aot) {
4617 if (!mono_runtime_class_init_full (vtable, &error)) {
4618 mono_error_cleanup (&error);
4623 } else if (mono_class_is_before_field_init (method->klass)) {
4624 if (cfg->run_cctors && method->klass->has_cctor) {
4625 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4626 if (!method->klass->runtime_info)
4627 /* No vtable created yet */
4629 vtable = mono_class_vtable (cfg->domain, method->klass);
4632 /* This makes so that inline cannot trigger */
4633 /* .cctors: too many apps depend on them */
4634 /* running with a specific order... */
4635 if (! vtable->initialized)
4638 if (!mono_runtime_class_init_full (vtable, &error)) {
4639 mono_error_cleanup (&error);
4643 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4644 if (!method->klass->runtime_info)
4645 /* No vtable created yet */
4647 vtable = mono_class_vtable (cfg->domain, method->klass);
4650 if (!vtable->initialized)
4655 * If we're compiling for shared code
4656 * the cctor will need to be run at aot method load time, for example,
4657 * or at the end of the compilation of the inlining method.
4659 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4663 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4664 if (mono_arch_is_soft_float ()) {
4666 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4668 for (i = 0; i < sig->param_count; ++i)
4669 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4674 if (g_list_find (cfg->dont_inline, method))
4681 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4683 if (!cfg->compile_aot) {
4685 if (vtable->initialized)
4689 if (mono_class_is_before_field_init (klass)) {
4690 if (cfg->method == method)
4694 if (!mono_class_needs_cctor_run (klass, method))
4697 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4698 /* The initialization is already done before the method is called */
4705 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4709 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4712 if (mini_is_gsharedvt_variable_klass (klass)) {
4715 mono_class_init (klass);
4716 size = mono_class_array_element_size (klass);
4719 mult_reg = alloc_preg (cfg);
4720 array_reg = arr->dreg;
4721 index_reg = index->dreg;
4723 #if SIZEOF_REGISTER == 8
4724 /* The array reg is 64 bits but the index reg is only 32 */
4725 if (COMPILE_LLVM (cfg)) {
4727 index2_reg = index_reg;
4729 index2_reg = alloc_preg (cfg);
4730 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4733 if (index->type == STACK_I8) {
4734 index2_reg = alloc_preg (cfg);
4735 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4737 index2_reg = index_reg;
4742 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4744 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4745 if (size == 1 || size == 2 || size == 4 || size == 8) {
4746 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4748 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4749 ins->klass = mono_class_get_element_class (klass);
4750 ins->type = STACK_MP;
4756 add_reg = alloc_ireg_mp (cfg);
4759 MonoInst *rgctx_ins;
4762 g_assert (cfg->gshared);
4763 context_used = mini_class_check_context_used (cfg, klass);
4764 g_assert (context_used);
4765 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4766 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4770 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4771 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4772 ins->klass = mono_class_get_element_class (klass);
4773 ins->type = STACK_MP;
4774 MONO_ADD_INS (cfg->cbb, ins);
4780 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4782 int bounds_reg = alloc_preg (cfg);
4783 int add_reg = alloc_ireg_mp (cfg);
4784 int mult_reg = alloc_preg (cfg);
4785 int mult2_reg = alloc_preg (cfg);
4786 int low1_reg = alloc_preg (cfg);
4787 int low2_reg = alloc_preg (cfg);
4788 int high1_reg = alloc_preg (cfg);
4789 int high2_reg = alloc_preg (cfg);
4790 int realidx1_reg = alloc_preg (cfg);
4791 int realidx2_reg = alloc_preg (cfg);
4792 int sum_reg = alloc_preg (cfg);
4793 int index1, index2, tmpreg;
4797 mono_class_init (klass);
4798 size = mono_class_array_element_size (klass);
4800 index1 = index_ins1->dreg;
4801 index2 = index_ins2->dreg;
4803 #if SIZEOF_REGISTER == 8
4804 /* The array reg is 64 bits but the index reg is only 32 */
4805 if (COMPILE_LLVM (cfg)) {
4808 tmpreg = alloc_preg (cfg);
4809 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4811 tmpreg = alloc_preg (cfg);
4812 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4816 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4820 /* range checking */
4821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4822 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4825 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4826 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4828 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4830 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4833 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4834 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4835 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4836 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4837 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4838 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4840 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4842 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4843 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4844 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4846 ins->type = STACK_MP;
4848 MONO_ADD_INS (cfg->cbb, ins);
4854 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4858 MonoMethod *addr_method;
4860 MonoClass *eclass = cmethod->klass->element_class;
4862 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4865 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4867 /* emit_ldelema_2 depends on OP_LMUL */
4868 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4869 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4872 if (mini_is_gsharedvt_variable_klass (eclass))
4875 element_size = mono_class_array_element_size (eclass);
4876 addr_method = mono_marshal_get_array_address (rank, element_size);
4877 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4882 static MonoBreakPolicy
4883 always_insert_breakpoint (MonoMethod *method)
4885 return MONO_BREAK_POLICY_ALWAYS;
4888 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4891 * mono_set_break_policy:
4892 * \param policy_callback the new callback function
4894 * Allow embedders to decide wherther to actually obey breakpoint instructions
4895 * (both break IL instructions and \c Debugger.Break method calls), for example
4896 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4897 * untrusted or semi-trusted code.
4899 * \p policy_callback will be called every time a break point instruction needs to
4900 * be inserted with the method argument being the method that calls \c Debugger.Break
4901 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4902 * if it wants the breakpoint to not be effective in the given method.
4903 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4906 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4908 if (policy_callback)
4909 break_policy_func = policy_callback;
4911 break_policy_func = always_insert_breakpoint;
4915 should_insert_brekpoint (MonoMethod *method) {
4916 switch (break_policy_func (method)) {
4917 case MONO_BREAK_POLICY_ALWAYS:
4919 case MONO_BREAK_POLICY_NEVER:
4921 case MONO_BREAK_POLICY_ON_DBG:
4922 g_warning ("mdb no longer supported");
4925 g_warning ("Incorrect value returned from break policy callback");
4930 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4932 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4934 MonoInst *addr, *store, *load;
4935 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4937 /* the bounds check is already done by the callers */
4938 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4940 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4941 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4942 if (mini_type_is_reference (&eklass->byval_arg))
4943 emit_write_barrier (cfg, addr, load);
4945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4946 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4953 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4955 return mini_type_is_reference (&klass->byval_arg);
4959 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4961 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4962 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4963 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4964 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4965 MonoInst *iargs [3];
4968 mono_class_setup_vtable (obj_array);
4969 g_assert (helper->slot);
4971 if (sp [0]->type != STACK_OBJ)
4973 if (sp [2]->type != STACK_OBJ)
4980 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4984 if (mini_is_gsharedvt_variable_klass (klass)) {
4987 // FIXME-VT: OP_ICONST optimization
4988 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4989 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4990 ins->opcode = OP_STOREV_MEMBASE;
4991 } else if (sp [1]->opcode == OP_ICONST) {
4992 int array_reg = sp [0]->dreg;
4993 int index_reg = sp [1]->dreg;
4994 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4996 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4997 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5000 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5001 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5003 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5004 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5005 if (generic_class_is_reference_type (cfg, klass))
5006 emit_write_barrier (cfg, addr, sp [2]);
5013 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5018 eklass = mono_class_from_mono_type (fsig->params [2]);
5020 eklass = mono_class_from_mono_type (fsig->ret);
5023 return emit_array_store (cfg, eklass, args, FALSE);
5025 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5026 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5032 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5035 int param_size, return_size;
5037 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5038 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5040 if (cfg->verbose_level > 3)
5041 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5043 //Don't allow mixing reference types with value types
5044 if (param_klass->valuetype != return_klass->valuetype) {
5045 if (cfg->verbose_level > 3)
5046 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5050 if (!param_klass->valuetype) {
5051 if (cfg->verbose_level > 3)
5052 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5057 if (param_klass->has_references || return_klass->has_references)
5060 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5061 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5062 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5063 if (cfg->verbose_level > 3)
5064 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5068 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5069 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5070 if (cfg->verbose_level > 3)
5071 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5075 param_size = mono_class_value_size (param_klass, &align);
5076 return_size = mono_class_value_size (return_klass, &align);
5078 //We can do it if sizes match
5079 if (param_size == return_size) {
5080 if (cfg->verbose_level > 3)
5081 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5085 //No simple way to handle struct if sizes don't match
5086 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5087 if (cfg->verbose_level > 3)
5088 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5093 * Same reg size category.
5094 * A quick note on why we don't require widening here.
5095 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5097 * Since the source value comes from a function argument, the JIT will already have
5098 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5100 if (param_size <= 4 && return_size <= 4) {
5101 if (cfg->verbose_level > 3)
5102 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5110 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5112 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5113 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5115 if (mini_is_gsharedvt_variable_type (fsig->ret))
5118 //Valuetypes that are semantically equivalent or numbers than can be widened to
5119 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5122 //Arrays of valuetypes that are semantically equivalent
5123 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5130 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5132 #ifdef MONO_ARCH_SIMD_INTRINSICS
5133 MonoInst *ins = NULL;
5135 if (cfg->opt & MONO_OPT_SIMD) {
5136 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5142 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5146 emit_memory_barrier (MonoCompile *cfg, int kind)
5148 MonoInst *ins = NULL;
5149 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5150 MONO_ADD_INS (cfg->cbb, ins);
5151 ins->backend.memory_barrier_kind = kind;
5157 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5159 MonoInst *ins = NULL;
5162 /* The LLVM backend supports these intrinsics */
5163 if (cmethod->klass == mono_defaults.math_class) {
5164 if (strcmp (cmethod->name, "Sin") == 0) {
5166 } else if (strcmp (cmethod->name, "Cos") == 0) {
5168 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5170 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5174 if (opcode && fsig->param_count == 1) {
5175 MONO_INST_NEW (cfg, ins, opcode);
5176 ins->type = STACK_R8;
5177 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5178 ins->sreg1 = args [0]->dreg;
5179 MONO_ADD_INS (cfg->cbb, ins);
5183 if (cfg->opt & MONO_OPT_CMOV) {
5184 if (strcmp (cmethod->name, "Min") == 0) {
5185 if (fsig->params [0]->type == MONO_TYPE_I4)
5187 if (fsig->params [0]->type == MONO_TYPE_U4)
5188 opcode = OP_IMIN_UN;
5189 else if (fsig->params [0]->type == MONO_TYPE_I8)
5191 else if (fsig->params [0]->type == MONO_TYPE_U8)
5192 opcode = OP_LMIN_UN;
5193 } else if (strcmp (cmethod->name, "Max") == 0) {
5194 if (fsig->params [0]->type == MONO_TYPE_I4)
5196 if (fsig->params [0]->type == MONO_TYPE_U4)
5197 opcode = OP_IMAX_UN;
5198 else if (fsig->params [0]->type == MONO_TYPE_I8)
5200 else if (fsig->params [0]->type == MONO_TYPE_U8)
5201 opcode = OP_LMAX_UN;
5205 if (opcode && fsig->param_count == 2) {
5206 MONO_INST_NEW (cfg, ins, opcode);
5207 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5208 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5209 ins->sreg1 = args [0]->dreg;
5210 ins->sreg2 = args [1]->dreg;
5211 MONO_ADD_INS (cfg->cbb, ins);
5219 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5221 if (cmethod->klass == mono_defaults.array_class) {
5222 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5223 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5224 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5225 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5226 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5227 return emit_array_unsafe_mov (cfg, fsig, args);
5234 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5236 MonoInst *ins = NULL;
5237 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5239 if (cmethod->klass == mono_defaults.string_class) {
5240 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5241 int dreg = alloc_ireg (cfg);
5242 int index_reg = alloc_preg (cfg);
5243 int add_reg = alloc_preg (cfg);
5245 #if SIZEOF_REGISTER == 8
5246 if (COMPILE_LLVM (cfg)) {
5247 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5249 /* The array reg is 64 bits but the index reg is only 32 */
5250 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5253 index_reg = args [1]->dreg;
5255 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5257 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5258 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5259 add_reg = ins->dreg;
5260 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5263 int mult_reg = alloc_preg (cfg);
5264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5265 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5266 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5267 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5269 type_from_op (cfg, ins, NULL, NULL);
5271 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5272 int dreg = alloc_ireg (cfg);
5273 /* Decompose later to allow more optimizations */
5274 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5275 ins->type = STACK_I4;
5276 ins->flags |= MONO_INST_FAULT;
5277 cfg->cbb->has_array_access = TRUE;
5278 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5283 } else if (cmethod->klass == mono_defaults.object_class) {
5284 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5285 int dreg = alloc_ireg_ref (cfg);
5286 int vt_reg = alloc_preg (cfg);
5287 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5288 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5289 type_from_op (cfg, ins, NULL, NULL);
5292 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5293 int dreg = alloc_ireg (cfg);
5294 int t1 = alloc_ireg (cfg);
5296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5297 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5298 ins->type = STACK_I4;
5301 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5302 MONO_INST_NEW (cfg, ins, OP_NOP);
5303 MONO_ADD_INS (cfg->cbb, ins);
5307 } else if (cmethod->klass == mono_defaults.array_class) {
5308 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5309 return emit_array_generic_access (cfg, fsig, args, FALSE);
5310 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5311 return emit_array_generic_access (cfg, fsig, args, TRUE);
5313 #ifndef MONO_BIG_ARRAYS
5315 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5318 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5319 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5320 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5321 int dreg = alloc_ireg (cfg);
5322 int bounds_reg = alloc_ireg_mp (cfg);
5323 MonoBasicBlock *end_bb, *szarray_bb;
5324 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5326 NEW_BBLOCK (cfg, end_bb);
5327 NEW_BBLOCK (cfg, szarray_bb);
5329 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5330 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5331 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5332 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5333 /* Non-szarray case */
5335 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5336 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5338 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5339 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5341 MONO_START_BB (cfg, szarray_bb);
5344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5345 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5347 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5348 MONO_START_BB (cfg, end_bb);
5350 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5351 ins->type = STACK_I4;
5357 if (cmethod->name [0] != 'g')
5360 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5361 int dreg = alloc_ireg (cfg);
5362 int vtable_reg = alloc_preg (cfg);
5363 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5364 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5365 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5366 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5367 type_from_op (cfg, ins, NULL, NULL);
5370 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5371 int dreg = alloc_ireg (cfg);
5373 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5374 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5375 type_from_op (cfg, ins, NULL, NULL);
5380 } else if (cmethod->klass == runtime_helpers_class) {
5381 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5382 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5384 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5385 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5387 g_assert (ctx->method_inst);
5388 g_assert (ctx->method_inst->type_argc == 1);
5389 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5390 MonoClass *klass = mono_class_from_mono_type (t);
5394 mono_class_init (klass);
5395 if (MONO_TYPE_IS_REFERENCE (t))
5396 EMIT_NEW_ICONST (cfg, ins, 1);
5397 else if (MONO_TYPE_IS_PRIMITIVE (t))
5398 EMIT_NEW_ICONST (cfg, ins, 0);
5399 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5400 EMIT_NEW_ICONST (cfg, ins, 1);
5401 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5402 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5404 g_assert (cfg->gshared);
5406 int context_used = mini_class_check_context_used (cfg, klass);
5408 /* This returns 1 or 2 */
5409 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5410 int dreg = alloc_ireg (cfg);
5411 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5417 } else if (cmethod->klass == mono_defaults.monitor_class) {
5418 gboolean is_enter = FALSE;
5419 gboolean is_v4 = FALSE;
5421 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5425 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5430 * To make async stack traces work, icalls which can block should have a wrapper.
5431 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5433 MonoBasicBlock *end_bb;
5435 NEW_BBLOCK (cfg, end_bb);
5437 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5440 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5441 MONO_START_BB (cfg, end_bb);
5444 } else if (cmethod->klass == mono_defaults.thread_class) {
5445 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5446 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5447 MONO_ADD_INS (cfg->cbb, ins);
5449 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5450 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5451 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5453 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5455 if (fsig->params [0]->type == MONO_TYPE_I1)
5456 opcode = OP_LOADI1_MEMBASE;
5457 else if (fsig->params [0]->type == MONO_TYPE_U1)
5458 opcode = OP_LOADU1_MEMBASE;
5459 else if (fsig->params [0]->type == MONO_TYPE_I2)
5460 opcode = OP_LOADI2_MEMBASE;
5461 else if (fsig->params [0]->type == MONO_TYPE_U2)
5462 opcode = OP_LOADU2_MEMBASE;
5463 else if (fsig->params [0]->type == MONO_TYPE_I4)
5464 opcode = OP_LOADI4_MEMBASE;
5465 else if (fsig->params [0]->type == MONO_TYPE_U4)
5466 opcode = OP_LOADU4_MEMBASE;
5467 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5468 opcode = OP_LOADI8_MEMBASE;
5469 else if (fsig->params [0]->type == MONO_TYPE_R4)
5470 opcode = OP_LOADR4_MEMBASE;
5471 else if (fsig->params [0]->type == MONO_TYPE_R8)
5472 opcode = OP_LOADR8_MEMBASE;
5473 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5474 opcode = OP_LOAD_MEMBASE;
5477 MONO_INST_NEW (cfg, ins, opcode);
5478 ins->inst_basereg = args [0]->dreg;
5479 ins->inst_offset = 0;
5480 MONO_ADD_INS (cfg->cbb, ins);
5482 switch (fsig->params [0]->type) {
5489 ins->dreg = mono_alloc_ireg (cfg);
5490 ins->type = STACK_I4;
5494 ins->dreg = mono_alloc_lreg (cfg);
5495 ins->type = STACK_I8;
5499 ins->dreg = mono_alloc_ireg (cfg);
5500 #if SIZEOF_REGISTER == 8
5501 ins->type = STACK_I8;
5503 ins->type = STACK_I4;
5508 ins->dreg = mono_alloc_freg (cfg);
5509 ins->type = STACK_R8;
5512 g_assert (mini_type_is_reference (fsig->params [0]));
5513 ins->dreg = mono_alloc_ireg_ref (cfg);
5514 ins->type = STACK_OBJ;
5518 if (opcode == OP_LOADI8_MEMBASE)
5519 ins = mono_decompose_opcode (cfg, ins);
5521 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5525 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5527 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5529 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5530 opcode = OP_STOREI1_MEMBASE_REG;
5531 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5532 opcode = OP_STOREI2_MEMBASE_REG;
5533 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5534 opcode = OP_STOREI4_MEMBASE_REG;
5535 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5536 opcode = OP_STOREI8_MEMBASE_REG;
5537 else if (fsig->params [0]->type == MONO_TYPE_R4)
5538 opcode = OP_STORER4_MEMBASE_REG;
5539 else if (fsig->params [0]->type == MONO_TYPE_R8)
5540 opcode = OP_STORER8_MEMBASE_REG;
5541 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5542 opcode = OP_STORE_MEMBASE_REG;
5545 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5547 MONO_INST_NEW (cfg, ins, opcode);
5548 ins->sreg1 = args [1]->dreg;
5549 ins->inst_destbasereg = args [0]->dreg;
5550 ins->inst_offset = 0;
5551 MONO_ADD_INS (cfg->cbb, ins);
5553 if (opcode == OP_STOREI8_MEMBASE_REG)
5554 ins = mono_decompose_opcode (cfg, ins);
5559 } else if (cmethod->klass->image == mono_defaults.corlib &&
5560 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5561 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5564 #if SIZEOF_REGISTER == 8
5565 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5566 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5567 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5568 ins->dreg = mono_alloc_preg (cfg);
5569 ins->sreg1 = args [0]->dreg;
5570 ins->type = STACK_I8;
5571 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5572 MONO_ADD_INS (cfg->cbb, ins);
5576 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5578 /* 64 bit reads are already atomic */
5579 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5580 load_ins->dreg = mono_alloc_preg (cfg);
5581 load_ins->inst_basereg = args [0]->dreg;
5582 load_ins->inst_offset = 0;
5583 load_ins->type = STACK_I8;
5584 MONO_ADD_INS (cfg->cbb, load_ins);
5586 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5593 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5594 MonoInst *ins_iconst;
5597 if (fsig->params [0]->type == MONO_TYPE_I4) {
5598 opcode = OP_ATOMIC_ADD_I4;
5599 cfg->has_atomic_add_i4 = TRUE;
5601 #if SIZEOF_REGISTER == 8
5602 else if (fsig->params [0]->type == MONO_TYPE_I8)
5603 opcode = OP_ATOMIC_ADD_I8;
5606 if (!mono_arch_opcode_supported (opcode))
5608 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5609 ins_iconst->inst_c0 = 1;
5610 ins_iconst->dreg = mono_alloc_ireg (cfg);
5611 MONO_ADD_INS (cfg->cbb, ins_iconst);
5613 MONO_INST_NEW (cfg, ins, opcode);
5614 ins->dreg = mono_alloc_ireg (cfg);
5615 ins->inst_basereg = args [0]->dreg;
5616 ins->inst_offset = 0;
5617 ins->sreg2 = ins_iconst->dreg;
5618 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5619 MONO_ADD_INS (cfg->cbb, ins);
5621 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5622 MonoInst *ins_iconst;
5625 if (fsig->params [0]->type == MONO_TYPE_I4) {
5626 opcode = OP_ATOMIC_ADD_I4;
5627 cfg->has_atomic_add_i4 = TRUE;
5629 #if SIZEOF_REGISTER == 8
5630 else if (fsig->params [0]->type == MONO_TYPE_I8)
5631 opcode = OP_ATOMIC_ADD_I8;
5634 if (!mono_arch_opcode_supported (opcode))
5636 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5637 ins_iconst->inst_c0 = -1;
5638 ins_iconst->dreg = mono_alloc_ireg (cfg);
5639 MONO_ADD_INS (cfg->cbb, ins_iconst);
5641 MONO_INST_NEW (cfg, ins, opcode);
5642 ins->dreg = mono_alloc_ireg (cfg);
5643 ins->inst_basereg = args [0]->dreg;
5644 ins->inst_offset = 0;
5645 ins->sreg2 = ins_iconst->dreg;
5646 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5647 MONO_ADD_INS (cfg->cbb, ins);
5649 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5652 if (fsig->params [0]->type == MONO_TYPE_I4) {
5653 opcode = OP_ATOMIC_ADD_I4;
5654 cfg->has_atomic_add_i4 = TRUE;
5656 #if SIZEOF_REGISTER == 8
5657 else if (fsig->params [0]->type == MONO_TYPE_I8)
5658 opcode = OP_ATOMIC_ADD_I8;
5661 if (!mono_arch_opcode_supported (opcode))
5663 MONO_INST_NEW (cfg, ins, opcode);
5664 ins->dreg = mono_alloc_ireg (cfg);
5665 ins->inst_basereg = args [0]->dreg;
5666 ins->inst_offset = 0;
5667 ins->sreg2 = args [1]->dreg;
5668 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5669 MONO_ADD_INS (cfg->cbb, ins);
5672 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5673 MonoInst *f2i = NULL, *i2f;
5674 guint32 opcode, f2i_opcode, i2f_opcode;
5675 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5676 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5678 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5679 fsig->params [0]->type == MONO_TYPE_R4) {
5680 opcode = OP_ATOMIC_EXCHANGE_I4;
5681 f2i_opcode = OP_MOVE_F_TO_I4;
5682 i2f_opcode = OP_MOVE_I4_TO_F;
5683 cfg->has_atomic_exchange_i4 = TRUE;
5685 #if SIZEOF_REGISTER == 8
5687 fsig->params [0]->type == MONO_TYPE_I8 ||
5688 fsig->params [0]->type == MONO_TYPE_R8 ||
5689 fsig->params [0]->type == MONO_TYPE_I) {
5690 opcode = OP_ATOMIC_EXCHANGE_I8;
5691 f2i_opcode = OP_MOVE_F_TO_I8;
5692 i2f_opcode = OP_MOVE_I8_TO_F;
5695 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5696 opcode = OP_ATOMIC_EXCHANGE_I4;
5697 cfg->has_atomic_exchange_i4 = TRUE;
5703 if (!mono_arch_opcode_supported (opcode))
5707 /* TODO: Decompose these opcodes instead of bailing here. */
5708 if (COMPILE_SOFT_FLOAT (cfg))
5711 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5712 f2i->dreg = mono_alloc_ireg (cfg);
5713 f2i->sreg1 = args [1]->dreg;
5714 if (f2i_opcode == OP_MOVE_F_TO_I4)
5715 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5716 MONO_ADD_INS (cfg->cbb, f2i);
5719 MONO_INST_NEW (cfg, ins, opcode);
5720 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5721 ins->inst_basereg = args [0]->dreg;
5722 ins->inst_offset = 0;
5723 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5724 MONO_ADD_INS (cfg->cbb, ins);
5726 switch (fsig->params [0]->type) {
5728 ins->type = STACK_I4;
5731 ins->type = STACK_I8;
5734 #if SIZEOF_REGISTER == 8
5735 ins->type = STACK_I8;
5737 ins->type = STACK_I4;
5742 ins->type = STACK_R8;
5745 g_assert (mini_type_is_reference (fsig->params [0]));
5746 ins->type = STACK_OBJ;
5751 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5752 i2f->dreg = mono_alloc_freg (cfg);
5753 i2f->sreg1 = ins->dreg;
5754 i2f->type = STACK_R8;
5755 if (i2f_opcode == OP_MOVE_I4_TO_F)
5756 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5757 MONO_ADD_INS (cfg->cbb, i2f);
5762 if (cfg->gen_write_barriers && is_ref)
5763 emit_write_barrier (cfg, args [0], args [1]);
5765 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5766 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5767 guint32 opcode, f2i_opcode, i2f_opcode;
5768 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5769 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5771 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5772 fsig->params [1]->type == MONO_TYPE_R4) {
5773 opcode = OP_ATOMIC_CAS_I4;
5774 f2i_opcode = OP_MOVE_F_TO_I4;
5775 i2f_opcode = OP_MOVE_I4_TO_F;
5776 cfg->has_atomic_cas_i4 = TRUE;
5778 #if SIZEOF_REGISTER == 8
5780 fsig->params [1]->type == MONO_TYPE_I8 ||
5781 fsig->params [1]->type == MONO_TYPE_R8 ||
5782 fsig->params [1]->type == MONO_TYPE_I) {
5783 opcode = OP_ATOMIC_CAS_I8;
5784 f2i_opcode = OP_MOVE_F_TO_I8;
5785 i2f_opcode = OP_MOVE_I8_TO_F;
5788 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5789 opcode = OP_ATOMIC_CAS_I4;
5790 cfg->has_atomic_cas_i4 = TRUE;
5796 if (!mono_arch_opcode_supported (opcode))
5800 /* TODO: Decompose these opcodes instead of bailing here. */
5801 if (COMPILE_SOFT_FLOAT (cfg))
5804 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5805 f2i_new->dreg = mono_alloc_ireg (cfg);
5806 f2i_new->sreg1 = args [1]->dreg;
5807 if (f2i_opcode == OP_MOVE_F_TO_I4)
5808 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5809 MONO_ADD_INS (cfg->cbb, f2i_new);
5811 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5812 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5813 f2i_cmp->sreg1 = args [2]->dreg;
5814 if (f2i_opcode == OP_MOVE_F_TO_I4)
5815 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5816 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5819 MONO_INST_NEW (cfg, ins, opcode);
5820 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5821 ins->sreg1 = args [0]->dreg;
5822 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5823 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5824 MONO_ADD_INS (cfg->cbb, ins);
5826 switch (fsig->params [1]->type) {
5828 ins->type = STACK_I4;
5831 ins->type = STACK_I8;
5834 #if SIZEOF_REGISTER == 8
5835 ins->type = STACK_I8;
5837 ins->type = STACK_I4;
5841 ins->type = cfg->r4_stack_type;
5844 ins->type = STACK_R8;
5847 g_assert (mini_type_is_reference (fsig->params [1]));
5848 ins->type = STACK_OBJ;
5853 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5854 i2f->dreg = mono_alloc_freg (cfg);
5855 i2f->sreg1 = ins->dreg;
5856 i2f->type = STACK_R8;
5857 if (i2f_opcode == OP_MOVE_I4_TO_F)
5858 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5859 MONO_ADD_INS (cfg->cbb, i2f);
5864 if (cfg->gen_write_barriers && is_ref)
5865 emit_write_barrier (cfg, args [0], args [1]);
5867 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5868 fsig->params [1]->type == MONO_TYPE_I4) {
5869 MonoInst *cmp, *ceq;
5871 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5874 /* int32 r = CAS (location, value, comparand); */
5875 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5876 ins->dreg = alloc_ireg (cfg);
5877 ins->sreg1 = args [0]->dreg;
5878 ins->sreg2 = args [1]->dreg;
5879 ins->sreg3 = args [2]->dreg;
5880 ins->type = STACK_I4;
5881 MONO_ADD_INS (cfg->cbb, ins);
5883 /* bool result = r == comparand; */
5884 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5885 cmp->sreg1 = ins->dreg;
5886 cmp->sreg2 = args [2]->dreg;
5887 cmp->type = STACK_I4;
5888 MONO_ADD_INS (cfg->cbb, cmp);
5890 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5891 ceq->dreg = alloc_ireg (cfg);
5892 ceq->type = STACK_I4;
5893 MONO_ADD_INS (cfg->cbb, ceq);
5895 /* *success = result; */
5896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5898 cfg->has_atomic_cas_i4 = TRUE;
5900 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5901 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5905 } else if (cmethod->klass->image == mono_defaults.corlib &&
5906 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5907 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5910 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5912 MonoType *t = fsig->params [0];
5914 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5916 g_assert (t->byref);
5917 /* t is a byref type, so the reference check is more complicated */
5918 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5919 if (t->type == MONO_TYPE_I1)
5920 opcode = OP_ATOMIC_LOAD_I1;
5921 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5922 opcode = OP_ATOMIC_LOAD_U1;
5923 else if (t->type == MONO_TYPE_I2)
5924 opcode = OP_ATOMIC_LOAD_I2;
5925 else if (t->type == MONO_TYPE_U2)
5926 opcode = OP_ATOMIC_LOAD_U2;
5927 else if (t->type == MONO_TYPE_I4)
5928 opcode = OP_ATOMIC_LOAD_I4;
5929 else if (t->type == MONO_TYPE_U4)
5930 opcode = OP_ATOMIC_LOAD_U4;
5931 else if (t->type == MONO_TYPE_R4)
5932 opcode = OP_ATOMIC_LOAD_R4;
5933 else if (t->type == MONO_TYPE_R8)
5934 opcode = OP_ATOMIC_LOAD_R8;
5935 #if SIZEOF_REGISTER == 8
5936 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5937 opcode = OP_ATOMIC_LOAD_I8;
5938 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5939 opcode = OP_ATOMIC_LOAD_U8;
5941 else if (t->type == MONO_TYPE_I)
5942 opcode = OP_ATOMIC_LOAD_I4;
5943 else if (is_ref || t->type == MONO_TYPE_U)
5944 opcode = OP_ATOMIC_LOAD_U4;
5948 if (!mono_arch_opcode_supported (opcode))
5951 MONO_INST_NEW (cfg, ins, opcode);
5952 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5953 ins->sreg1 = args [0]->dreg;
5954 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5955 MONO_ADD_INS (cfg->cbb, ins);
5958 case MONO_TYPE_BOOLEAN:
5965 ins->type = STACK_I4;
5969 ins->type = STACK_I8;
5973 #if SIZEOF_REGISTER == 8
5974 ins->type = STACK_I8;
5976 ins->type = STACK_I4;
5980 ins->type = cfg->r4_stack_type;
5983 ins->type = STACK_R8;
5987 ins->type = STACK_OBJ;
5993 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5995 MonoType *t = fsig->params [0];
5998 g_assert (t->byref);
5999 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6000 if (t->type == MONO_TYPE_I1)
6001 opcode = OP_ATOMIC_STORE_I1;
6002 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6003 opcode = OP_ATOMIC_STORE_U1;
6004 else if (t->type == MONO_TYPE_I2)
6005 opcode = OP_ATOMIC_STORE_I2;
6006 else if (t->type == MONO_TYPE_U2)
6007 opcode = OP_ATOMIC_STORE_U2;
6008 else if (t->type == MONO_TYPE_I4)
6009 opcode = OP_ATOMIC_STORE_I4;
6010 else if (t->type == MONO_TYPE_U4)
6011 opcode = OP_ATOMIC_STORE_U4;
6012 else if (t->type == MONO_TYPE_R4)
6013 opcode = OP_ATOMIC_STORE_R4;
6014 else if (t->type == MONO_TYPE_R8)
6015 opcode = OP_ATOMIC_STORE_R8;
6016 #if SIZEOF_REGISTER == 8
6017 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6018 opcode = OP_ATOMIC_STORE_I8;
6019 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6020 opcode = OP_ATOMIC_STORE_U8;
6022 else if (t->type == MONO_TYPE_I)
6023 opcode = OP_ATOMIC_STORE_I4;
6024 else if (is_ref || t->type == MONO_TYPE_U)
6025 opcode = OP_ATOMIC_STORE_U4;
6029 if (!mono_arch_opcode_supported (opcode))
6032 MONO_INST_NEW (cfg, ins, opcode);
6033 ins->dreg = args [0]->dreg;
6034 ins->sreg1 = args [1]->dreg;
6035 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6036 MONO_ADD_INS (cfg->cbb, ins);
6038 if (cfg->gen_write_barriers && is_ref)
6039 emit_write_barrier (cfg, args [0], args [1]);
6045 } else if (cmethod->klass->image == mono_defaults.corlib &&
6046 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6047 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6048 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6049 if (should_insert_brekpoint (cfg->method)) {
6050 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6052 MONO_INST_NEW (cfg, ins, OP_NOP);
6053 MONO_ADD_INS (cfg->cbb, ins);
6057 } else if (cmethod->klass->image == mono_defaults.corlib &&
6058 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6059 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6060 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6062 EMIT_NEW_ICONST (cfg, ins, 1);
6064 EMIT_NEW_ICONST (cfg, ins, 0);
6067 } else if (cmethod->klass->image == mono_defaults.corlib &&
6068 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6069 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6070 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6071 /* No stack walks are currently available, so implement this as an intrinsic */
6072 MonoInst *assembly_ins;
6074 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6075 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6078 } else if (cmethod->klass->image == mono_defaults.corlib &&
6079 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6080 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6081 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6082 /* No stack walks are currently available, so implement this as an intrinsic */
6083 MonoInst *method_ins;
6084 MonoMethod *declaring = cfg->method;
6086 /* This returns the declaring generic method */
6087 if (declaring->is_inflated)
6088 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6089 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6090 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6091 cfg->no_inline = TRUE;
6092 if (cfg->method != cfg->current_method)
6093 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6096 } else if (cmethod->klass == mono_defaults.math_class) {
6098 * There is general branchless code for Min/Max, but it does not work for
6100 * http://everything2.com/?node_id=1051618
6102 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6103 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6104 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6105 ins->dreg = alloc_preg (cfg);
6106 ins->type = STACK_I4;
6107 MONO_ADD_INS (cfg->cbb, ins);
6109 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6110 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6111 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6112 !strcmp (cmethod->klass->name, "Selector")) ||
6113 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6114 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6115 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6116 !strcmp (cmethod->klass->name, "Selector"))
6118 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6119 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6120 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6123 MonoJumpInfoToken *ji;
6126 if (args [0]->opcode == OP_GOT_ENTRY) {
6127 pi = (MonoInst *)args [0]->inst_p1;
6128 g_assert (pi->opcode == OP_PATCH_INFO);
6129 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6130 ji = (MonoJumpInfoToken *)pi->inst_p0;
6132 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6133 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6136 NULLIFY_INS (args [0]);
6138 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6139 return_val_if_nok (&cfg->error, NULL);
6141 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6142 ins->dreg = mono_alloc_ireg (cfg);
6145 MONO_ADD_INS (cfg->cbb, ins);
6150 #ifdef MONO_ARCH_SIMD_INTRINSICS
6151 if (cfg->opt & MONO_OPT_SIMD) {
6152 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6158 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6162 if (COMPILE_LLVM (cfg)) {
6163 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6168 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6172 * This entry point could be used later for arbitrary method
6175 inline static MonoInst*
6176 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6177 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6179 if (method->klass == mono_defaults.string_class) {
6180 /* managed string allocation support */
6181 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6182 MonoInst *iargs [2];
6183 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6184 MonoMethod *managed_alloc = NULL;
6186 g_assert (vtable); /*Should not fail since it System.String*/
6187 #ifndef MONO_CROSS_COMPILE
6188 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6192 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6193 iargs [1] = args [0];
6194 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6201 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6203 MonoInst *store, *temp;
6206 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6207 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6210 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6211 * would be different than the MonoInst's used to represent arguments, and
6212 * the ldelema implementation can't deal with that.
6213 * Solution: When ldelema is used on an inline argument, create a var for
6214 * it, emit ldelema on that var, and emit the saving code below in
6215 * inline_method () if needed.
6217 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6218 cfg->args [i] = temp;
6219 /* This uses cfg->args [i] which is set by the preceeding line */
6220 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6221 store->cil_code = sp [0]->cil_code;
6226 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6227 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6229 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6231 check_inline_called_method_name_limit (MonoMethod *called_method)
6234 static const char *limit = NULL;
6236 if (limit == NULL) {
6237 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6239 if (limit_string != NULL)
6240 limit = limit_string;
6245 if (limit [0] != '\0') {
6246 char *called_method_name = mono_method_full_name (called_method, TRUE);
6248 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6249 g_free (called_method_name);
6251 //return (strncmp_result <= 0);
6252 return (strncmp_result == 0);
6259 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6261 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6264 static const char *limit = NULL;
6266 if (limit == NULL) {
6267 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6268 if (limit_string != NULL) {
6269 limit = limit_string;
6275 if (limit [0] != '\0') {
6276 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6278 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6279 g_free (caller_method_name);
6281 //return (strncmp_result <= 0);
6282 return (strncmp_result == 0);
6290 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6292 static double r8_0 = 0.0;
6293 static float r4_0 = 0.0;
6297 rtype = mini_get_underlying_type (rtype);
6301 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6302 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6303 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6304 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6305 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6306 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6307 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6308 ins->type = STACK_R4;
6309 ins->inst_p0 = (void*)&r4_0;
6311 MONO_ADD_INS (cfg->cbb, ins);
6312 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6313 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6314 ins->type = STACK_R8;
6315 ins->inst_p0 = (void*)&r8_0;
6317 MONO_ADD_INS (cfg->cbb, ins);
6318 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6319 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6320 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6321 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6322 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6324 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6329 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6333 rtype = mini_get_underlying_type (rtype);
6337 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6338 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6339 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6340 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6341 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6342 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6343 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6344 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6345 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6346 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6347 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6348 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6349 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6350 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6352 emit_init_rvar (cfg, dreg, rtype);
6356 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6358 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6360 MonoInst *var = cfg->locals [local];
6361 if (COMPILE_SOFT_FLOAT (cfg)) {
6363 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6364 emit_init_rvar (cfg, reg, type);
6365 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6368 emit_init_rvar (cfg, var->dreg, type);
6370 emit_dummy_init_rvar (cfg, var->dreg, type);
6375 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6377 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6383 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6386 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6387 guchar *ip, guint real_offset, gboolean inline_always)
6390 MonoInst *ins, *rvar = NULL;
6391 MonoMethodHeader *cheader;
6392 MonoBasicBlock *ebblock, *sbblock;
6394 MonoMethod *prev_inlined_method;
6395 MonoInst **prev_locals, **prev_args;
6396 MonoType **prev_arg_types;
6397 guint prev_real_offset;
6398 GHashTable *prev_cbb_hash;
6399 MonoBasicBlock **prev_cil_offset_to_bb;
6400 MonoBasicBlock *prev_cbb;
6401 const unsigned char *prev_ip;
6402 unsigned char *prev_cil_start;
6403 guint32 prev_cil_offset_to_bb_len;
6404 MonoMethod *prev_current_method;
6405 MonoGenericContext *prev_generic_context;
6406 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6408 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6410 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6411 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6414 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6415 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6420 fsig = mono_method_signature (cmethod);
6422 if (cfg->verbose_level > 2)
6423 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6425 if (!cmethod->inline_info) {
6426 cfg->stat_inlineable_methods++;
6427 cmethod->inline_info = 1;
6430 /* allocate local variables */
6431 cheader = mono_method_get_header_checked (cmethod, &error);
6433 if (inline_always) {
6434 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6435 mono_error_move (&cfg->error, &error);
6437 mono_error_cleanup (&error);
6442 /*Must verify before creating locals as it can cause the JIT to assert.*/
6443 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6444 mono_metadata_free_mh (cheader);
6448 /* allocate space to store the return value */
6449 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6450 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6453 prev_locals = cfg->locals;
6454 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6455 for (i = 0; i < cheader->num_locals; ++i)
6456 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6458 /* allocate start and end blocks */
6459 /* This is needed so if the inline is aborted, we can clean up */
6460 NEW_BBLOCK (cfg, sbblock);
6461 sbblock->real_offset = real_offset;
6463 NEW_BBLOCK (cfg, ebblock);
6464 ebblock->block_num = cfg->num_bblocks++;
6465 ebblock->real_offset = real_offset;
6467 prev_args = cfg->args;
6468 prev_arg_types = cfg->arg_types;
6469 prev_inlined_method = cfg->inlined_method;
6470 cfg->inlined_method = cmethod;
6471 cfg->ret_var_set = FALSE;
6472 cfg->inline_depth ++;
6473 prev_real_offset = cfg->real_offset;
6474 prev_cbb_hash = cfg->cbb_hash;
6475 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6476 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6477 prev_cil_start = cfg->cil_start;
6479 prev_cbb = cfg->cbb;
6480 prev_current_method = cfg->current_method;
6481 prev_generic_context = cfg->generic_context;
6482 prev_ret_var_set = cfg->ret_var_set;
6483 prev_disable_inline = cfg->disable_inline;
6485 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6488 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6490 ret_var_set = cfg->ret_var_set;
6492 cfg->inlined_method = prev_inlined_method;
6493 cfg->real_offset = prev_real_offset;
6494 cfg->cbb_hash = prev_cbb_hash;
6495 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6496 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6497 cfg->cil_start = prev_cil_start;
6499 cfg->locals = prev_locals;
6500 cfg->args = prev_args;
6501 cfg->arg_types = prev_arg_types;
6502 cfg->current_method = prev_current_method;
6503 cfg->generic_context = prev_generic_context;
6504 cfg->ret_var_set = prev_ret_var_set;
6505 cfg->disable_inline = prev_disable_inline;
6506 cfg->inline_depth --;
6508 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6509 if (cfg->verbose_level > 2)
6510 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6512 cfg->stat_inlined_methods++;
6514 /* always add some code to avoid block split failures */
6515 MONO_INST_NEW (cfg, ins, OP_NOP);
6516 MONO_ADD_INS (prev_cbb, ins);
6518 prev_cbb->next_bb = sbblock;
6519 link_bblock (cfg, prev_cbb, sbblock);
6522 * Get rid of the begin and end bblocks if possible to aid local
6525 if (prev_cbb->out_count == 1)
6526 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6528 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6529 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6531 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6532 MonoBasicBlock *prev = ebblock->in_bb [0];
6534 if (prev->next_bb == ebblock) {
6535 mono_merge_basic_blocks (cfg, prev, ebblock);
6537 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6538 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6539 cfg->cbb = prev_cbb;
6542 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6547 * Its possible that the rvar is set in some prev bblock, but not in others.
6553 for (i = 0; i < ebblock->in_count; ++i) {
6554 bb = ebblock->in_bb [i];
6556 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6559 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6569 * If the inlined method contains only a throw, then the ret var is not
6570 * set, so set it to a dummy value.
6573 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6575 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6578 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6581 if (cfg->verbose_level > 2)
6582 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6583 cfg->exception_type = MONO_EXCEPTION_NONE;
6585 /* This gets rid of the newly added bblocks */
6586 cfg->cbb = prev_cbb;
6588 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6593 * Some of these comments may well be out-of-date.
6594 * Design decisions: we do a single pass over the IL code (and we do bblock
6595 * splitting/merging in the few cases when it's required: a back jump to an IL
6596 * address that was not already seen as bblock starting point).
6597 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6598 * Complex operations are decomposed in simpler ones right away. We need to let the
6599 * arch-specific code peek and poke inside this process somehow (except when the
6600 * optimizations can take advantage of the full semantic info of coarse opcodes).
6601 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6602 * MonoInst->opcode initially is the IL opcode or some simplification of that
6603 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6604 * opcode with value bigger than OP_LAST.
6605 * At this point the IR can be handed over to an interpreter, a dumb code generator
6606 * or to the optimizing code generator that will translate it to SSA form.
6608 * Profiling directed optimizations.
6609 * We may compile by default with few or no optimizations and instrument the code
6610 * or the user may indicate what methods to optimize the most either in a config file
6611 * or through repeated runs where the compiler applies offline the optimizations to
6612 * each method and then decides if it was worth it.
6615 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6616 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6617 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6618 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6619 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6620 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6621 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6622 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6624 /* offset from br.s -> br like opcodes */
6625 #define BIG_BRANCH_OFFSET 13
6628 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6630 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6632 return b == NULL || b == bb;
6636 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6638 unsigned char *ip = start;
6639 unsigned char *target;
6642 MonoBasicBlock *bblock;
6643 const MonoOpcode *opcode;
6646 cli_addr = ip - start;
6647 i = mono_opcode_value ((const guint8 **)&ip, end);
6650 opcode = &mono_opcodes [i];
6651 switch (opcode->argument) {
6652 case MonoInlineNone:
6655 case MonoInlineString:
6656 case MonoInlineType:
6657 case MonoInlineField:
6658 case MonoInlineMethod:
6661 case MonoShortInlineR:
6668 case MonoShortInlineVar:
6669 case MonoShortInlineI:
6672 case MonoShortInlineBrTarget:
6673 target = start + cli_addr + 2 + (signed char)ip [1];
6674 GET_BBLOCK (cfg, bblock, target);
6677 GET_BBLOCK (cfg, bblock, ip);
6679 case MonoInlineBrTarget:
6680 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6681 GET_BBLOCK (cfg, bblock, target);
6684 GET_BBLOCK (cfg, bblock, ip);
6686 case MonoInlineSwitch: {
6687 guint32 n = read32 (ip + 1);
6690 cli_addr += 5 + 4 * n;
6691 target = start + cli_addr;
6692 GET_BBLOCK (cfg, bblock, target);
6694 for (j = 0; j < n; ++j) {
6695 target = start + cli_addr + (gint32)read32 (ip);
6696 GET_BBLOCK (cfg, bblock, target);
6706 g_assert_not_reached ();
6709 if (i == CEE_THROW) {
6710 unsigned char *bb_start = ip - 1;
6712 /* Find the start of the bblock containing the throw */
6714 while ((bb_start >= start) && !bblock) {
6715 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6719 bblock->out_of_line = 1;
6729 static inline MonoMethod *
6730 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6736 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6737 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6739 method = mono_class_inflate_generic_method_checked (method, context, error);
6742 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6748 static inline MonoMethod *
6749 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6752 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6754 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6755 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6759 if (!method && !cfg)
6760 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6765 static inline MonoClass*
6766 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6771 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6772 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6774 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6775 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6778 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6779 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6782 mono_class_init (klass);
6786 static inline MonoMethodSignature*
6787 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6789 MonoMethodSignature *fsig;
6792 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6793 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6795 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6796 return_val_if_nok (error, NULL);
6799 fsig = mono_inflate_generic_signature(fsig, context, error);
6805 throw_exception (void)
6807 static MonoMethod *method = NULL;
6810 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6811 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6818 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6820 MonoMethod *thrower = throw_exception ();
6823 EMIT_NEW_PCONST (cfg, args [0], ex);
6824 mono_emit_method_call (cfg, thrower, args, NULL);
6828 * Return the original method is a wrapper is specified. We can only access
6829 * the custom attributes from the original method.
6832 get_original_method (MonoMethod *method)
6834 if (method->wrapper_type == MONO_WRAPPER_NONE)
6837 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6838 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6841 /* in other cases we need to find the original method */
6842 return mono_marshal_method_from_wrapper (method);
6846 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6848 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6849 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6851 emit_throw_exception (cfg, ex);
6855 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6857 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6858 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6860 emit_throw_exception (cfg, ex);
6864 * Check that the IL instructions at ip are the array initialization
6865 * sequence and return the pointer to the data and the size.
6868 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6871 * newarr[System.Int32]
6873 * ldtoken field valuetype ...
6874 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6876 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6878 guint32 token = read32 (ip + 7);
6879 guint32 field_token = read32 (ip + 2);
6880 guint32 field_index = field_token & 0xffffff;
6882 const char *data_ptr;
6884 MonoMethod *cmethod;
6885 MonoClass *dummy_class;
6886 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6890 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6894 *out_field_token = field_token;
6896 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6899 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6901 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6905 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6906 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6923 if (size > mono_type_size (field->type, &dummy_align))
6926 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6927 if (!image_is_dynamic (method->klass->image)) {
6928 field_index = read32 (ip + 2) & 0xffffff;
6929 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6930 data_ptr = mono_image_rva_map (method->klass->image, rva);
6931 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6932 /* for aot code we do the lookup on load */
6933 if (aot && data_ptr)
6934 return (const char *)GUINT_TO_POINTER (rva);
6936 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6938 data_ptr = mono_field_get_data (field);
6946 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6949 char *method_fname = mono_method_full_name (method, TRUE);
6951 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6954 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6955 mono_error_cleanup (&error);
6956 } else if (header->code_size == 0)
6957 method_code = g_strdup ("method body is empty.");
6959 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6960 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6961 g_free (method_fname);
6962 g_free (method_code);
6963 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6967 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6970 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6971 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6972 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6973 /* Optimize reg-reg moves away */
6975 * Can't optimize other opcodes, since sp[0] might point to
6976 * the last ins of a decomposed opcode.
6978 sp [0]->dreg = (cfg)->locals [n]->dreg;
6980 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6985 * ldloca inhibits many optimizations so try to get rid of it in common
6988 static inline unsigned char *
6989 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6999 local = read16 (ip + 2);
7003 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7004 /* From the INITOBJ case */
7005 token = read32 (ip + 2);
7006 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7007 CHECK_TYPELOAD (klass);
7008 type = mini_get_underlying_type (&klass->byval_arg);
7009 emit_init_local (cfg, local, type, TRUE);
7017 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7019 MonoInst *icall_args [16];
7020 MonoInst *call_target, *ins, *vtable_ins;
7021 int arg_reg, this_reg, vtable_reg;
7022 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7023 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7024 gboolean variant_iface = FALSE;
7027 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7030 * In llvm-only mode, vtables contain function descriptors instead of
7031 * method addresses/trampolines.
7033 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7036 slot = mono_method_get_imt_slot (cmethod);
7038 slot = mono_method_get_vtable_index (cmethod);
7040 this_reg = sp [0]->dreg;
7042 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7043 variant_iface = TRUE;
7045 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7047 * The simplest case, a normal virtual call.
7049 int slot_reg = alloc_preg (cfg);
7050 int addr_reg = alloc_preg (cfg);
7051 int arg_reg = alloc_preg (cfg);
7052 MonoBasicBlock *non_null_bb;
7054 vtable_reg = alloc_preg (cfg);
7055 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7056 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7058 /* Load the vtable slot, which contains a function descriptor. */
7059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7061 NEW_BBLOCK (cfg, non_null_bb);
7063 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7064 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7068 // FIXME: Make the wrapper use the preserveall cconv
7069 // FIXME: Use one icall per slot for small slot numbers ?
7070 icall_args [0] = vtable_ins;
7071 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7072 /* Make the icall return the vtable slot value to save some code space */
7073 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7074 ins->dreg = slot_reg;
7075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7078 MONO_START_BB (cfg, non_null_bb);
7079 /* Load the address + arg from the vtable slot */
7080 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7083 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7086 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7088 * A simple interface call
7090 * We make a call through an imt slot to obtain the function descriptor we need to call.
7091 * The imt slot contains a function descriptor for a runtime function + arg.
7093 int slot_reg = alloc_preg (cfg);
7094 int addr_reg = alloc_preg (cfg);
7095 int arg_reg = alloc_preg (cfg);
7096 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7098 vtable_reg = alloc_preg (cfg);
7099 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7100 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7103 * The slot is already initialized when the vtable is created so there is no need
7107 /* Load the imt slot, which contains a function descriptor. */
7108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7110 /* Load the address + arg of the imt thunk from the imt slot */
7111 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7112 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7114 * IMT thunks in llvm-only mode are C functions which take an info argument
7115 * plus the imt method and return the ftndesc to call.
7117 icall_args [0] = thunk_arg_ins;
7118 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7119 cmethod, MONO_RGCTX_INFO_METHOD);
7120 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7122 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7125 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7127 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7128 * dynamically extended as more instantiations are discovered.
7129 * This handles generic virtual methods both on classes and interfaces.
7131 int slot_reg = alloc_preg (cfg);
7132 int addr_reg = alloc_preg (cfg);
7133 int arg_reg = alloc_preg (cfg);
7134 int ftndesc_reg = alloc_preg (cfg);
7135 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7136 MonoBasicBlock *slowpath_bb, *end_bb;
7138 NEW_BBLOCK (cfg, slowpath_bb);
7139 NEW_BBLOCK (cfg, end_bb);
7141 vtable_reg = alloc_preg (cfg);
7142 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7144 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7146 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7148 /* Load the slot, which contains a function descriptor. */
7149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7151 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7152 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7157 /* Same as with iface calls */
7158 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7159 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7160 icall_args [0] = thunk_arg_ins;
7161 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7162 cmethod, MONO_RGCTX_INFO_METHOD);
7163 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7164 ftndesc_ins->dreg = ftndesc_reg;
7166 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7167 * they don't know about yet. Fall back to the slowpath in that case.
7169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7175 MONO_START_BB (cfg, slowpath_bb);
7176 icall_args [0] = vtable_ins;
7177 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7178 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7179 cmethod, MONO_RGCTX_INFO_METHOD);
7181 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7183 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7184 ftndesc_ins->dreg = ftndesc_reg;
7185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7188 MONO_START_BB (cfg, end_bb);
7189 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7193 * Non-optimized cases
7195 icall_args [0] = sp [0];
7196 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7198 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7199 cmethod, MONO_RGCTX_INFO_METHOD);
7201 arg_reg = alloc_preg (cfg);
7202 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7203 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7205 g_assert (is_gsharedvt);
7207 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7209 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7212 * Pass the extra argument even if the callee doesn't receive it, most
7213 * calling conventions allow this.
7215 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7219 is_exception_class (MonoClass *klass)
7222 if (klass == mono_defaults.exception_class)
7224 klass = klass->parent;
7230 * is_jit_optimizer_disabled:
7232 * Determine whenever M's assembly has a DebuggableAttribute with the
7233 * IsJITOptimizerDisabled flag set.
7236 is_jit_optimizer_disabled (MonoMethod *m)
7239 MonoAssembly *ass = m->klass->image->assembly;
7240 MonoCustomAttrInfo* attrs;
7243 gboolean val = FALSE;
7246 if (ass->jit_optimizer_disabled_inited)
7247 return ass->jit_optimizer_disabled;
7249 klass = mono_class_try_get_debuggable_attribute_class ();
7253 ass->jit_optimizer_disabled = FALSE;
7254 mono_memory_barrier ();
7255 ass->jit_optimizer_disabled_inited = TRUE;
7259 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7260 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7262 for (i = 0; i < attrs->num_attrs; ++i) {
7263 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7265 MonoMethodSignature *sig;
7267 if (!attr->ctor || attr->ctor->klass != klass)
7269 /* Decode the attribute. See reflection.c */
7270 p = (const char*)attr->data;
7271 g_assert (read16 (p) == 0x0001);
7274 // FIXME: Support named parameters
7275 sig = mono_method_signature (attr->ctor);
7276 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7278 /* Two boolean arguments */
7282 mono_custom_attrs_free (attrs);
7285 ass->jit_optimizer_disabled = val;
7286 mono_memory_barrier ();
7287 ass->jit_optimizer_disabled_inited = TRUE;
7293 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7295 gboolean supported_tail_call;
7298 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7300 for (i = 0; i < fsig->param_count; ++i) {
7301 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7302 /* These can point to the current method's stack */
7303 supported_tail_call = FALSE;
7305 if (fsig->hasthis && cmethod->klass->valuetype)
7306 /* this might point to the current method's stack */
7307 supported_tail_call = FALSE;
7308 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7309 supported_tail_call = FALSE;
7310 if (cfg->method->save_lmf)
7311 supported_tail_call = FALSE;
7312 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7313 supported_tail_call = FALSE;
7314 if (call_opcode != CEE_CALL)
7315 supported_tail_call = FALSE;
7317 /* Debugging support */
7319 if (supported_tail_call) {
7320 if (!mono_debug_count ())
7321 supported_tail_call = FALSE;
7325 return supported_tail_call;
7331 * Handle calls made to ctors from NEWOBJ opcodes.
7334 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7335 MonoInst **sp, guint8 *ip, int *inline_costs)
7337 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7339 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7340 mono_method_is_generic_sharable (cmethod, TRUE)) {
7341 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7342 mono_class_vtable (cfg->domain, cmethod->klass);
7343 CHECK_TYPELOAD (cmethod->klass);
7345 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7346 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7349 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7350 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7352 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7354 CHECK_TYPELOAD (cmethod->klass);
7355 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7360 /* Avoid virtual calls to ctors if possible */
7361 if (mono_class_is_marshalbyref (cmethod->klass))
7362 callvirt_this_arg = sp [0];
7364 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7365 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7366 CHECK_CFG_EXCEPTION;
7367 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7368 mono_method_check_inlining (cfg, cmethod) &&
7369 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7372 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7373 cfg->real_offset += 5;
7375 *inline_costs += costs - 5;
7377 INLINE_FAILURE ("inline failure");
7378 // FIXME-VT: Clean this up
7379 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7380 GSHAREDVT_FAILURE(*ip);
7381 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7383 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7386 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7388 if (cfg->llvm_only) {
7389 // FIXME: Avoid initializing vtable_arg
7390 emit_llvmonly_calli (cfg, fsig, sp, addr);
7392 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7394 } else if (context_used &&
7395 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7396 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7397 MonoInst *cmethod_addr;
7399 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7401 if (cfg->llvm_only) {
7402 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7403 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7404 emit_llvmonly_calli (cfg, fsig, sp, addr);
7406 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7407 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7409 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7412 INLINE_FAILURE ("ctor call");
7413 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7414 callvirt_this_arg, NULL, vtable_arg);
7421 emit_setret (MonoCompile *cfg, MonoInst *val)
7423 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7426 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7429 if (!cfg->vret_addr) {
7430 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7432 EMIT_NEW_RETLOADA (cfg, ret_addr);
7434 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7435 ins->klass = mono_class_from_mono_type (ret_type);
7438 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7439 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7440 MonoInst *iargs [1];
7444 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7445 mono_arch_emit_setret (cfg, cfg->method, conv);
7447 mono_arch_emit_setret (cfg, cfg->method, val);
7450 mono_arch_emit_setret (cfg, cfg->method, val);
7456 * mono_method_to_ir:
7458 * Translate the .net IL into linear IR.
7460 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7461 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7462 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7463 * @inline_args: if not NULL, contains the arguments to the inline call
7464 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7465 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7467 * This method is used to turn ECMA IL into Mono's internal Linear IR
7468 * reprensetation. It is used both for entire methods, as well as
7469 * inlining existing methods. In the former case, the @start_bblock,
7470 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7471 * inline_offset is set to zero.
7473 * Returns: the inline cost, or -1 if there was an error processing this method.
7476 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7477 MonoInst *return_var, MonoInst **inline_args,
7478 guint inline_offset, gboolean is_virtual_call)
7481 MonoInst *ins, **sp, **stack_start;
7482 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7483 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7484 MonoMethod *cmethod, *method_definition;
7485 MonoInst **arg_array;
7486 MonoMethodHeader *header;
7488 guint32 token, ins_flag;
7490 MonoClass *constrained_class = NULL;
7491 unsigned char *ip, *end, *target, *err_pos;
7492 MonoMethodSignature *sig;
7493 MonoGenericContext *generic_context = NULL;
7494 MonoGenericContainer *generic_container = NULL;
7495 MonoType **param_types;
7496 int i, n, start_new_bblock, dreg;
7497 int num_calls = 0, inline_costs = 0;
7498 int breakpoint_id = 0;
7500 GSList *class_inits = NULL;
7501 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7503 gboolean init_locals, seq_points, skip_dead_blocks;
7504 gboolean sym_seq_points = FALSE;
7505 MonoDebugMethodInfo *minfo;
7506 MonoBitSet *seq_point_locs = NULL;
7507 MonoBitSet *seq_point_set_locs = NULL;
7509 cfg->disable_inline = is_jit_optimizer_disabled (method);
7511 /* serialization and xdomain stuff may need access to private fields and methods */
7512 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7513 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7514 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7515 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7516 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7517 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7519 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7520 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7521 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7522 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7523 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7525 image = method->klass->image;
7526 header = mono_method_get_header_checked (method, &cfg->error);
7528 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7529 goto exception_exit;
7531 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7534 generic_container = mono_method_get_generic_container (method);
7535 sig = mono_method_signature (method);
7536 num_args = sig->hasthis + sig->param_count;
7537 ip = (unsigned char*)header->code;
7538 cfg->cil_start = ip;
7539 end = ip + header->code_size;
7540 cfg->stat_cil_code_size += header->code_size;
7542 seq_points = cfg->gen_seq_points && cfg->method == method;
7544 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7545 /* We could hit a seq point before attaching to the JIT (#8338) */
7549 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7550 minfo = mono_debug_lookup_method (method);
7552 MonoSymSeqPoint *sps;
7553 int i, n_il_offsets;
7555 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7556 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7557 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7558 sym_seq_points = TRUE;
7559 for (i = 0; i < n_il_offsets; ++i) {
7560 if (sps [i].il_offset < header->code_size)
7561 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7565 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7567 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7569 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7570 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7572 mono_debug_free_method_async_debug_info (asyncMethod);
7574 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7575 /* Methods without line number info like auto-generated property accessors */
7576 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7577 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7578 sym_seq_points = TRUE;
7583 * Methods without init_locals set could cause asserts in various passes
7584 * (#497220). To work around this, we emit dummy initialization opcodes
7585 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7586 * on some platforms.
7588 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7589 init_locals = header->init_locals;
7593 method_definition = method;
7594 while (method_definition->is_inflated) {
7595 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7596 method_definition = imethod->declaring;
7599 /* SkipVerification is not allowed if core-clr is enabled */
7600 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7602 dont_verify_stloc = TRUE;
7605 if (sig->is_inflated)
7606 generic_context = mono_method_get_context (method);
7607 else if (generic_container)
7608 generic_context = &generic_container->context;
7609 cfg->generic_context = generic_context;
7612 g_assert (!sig->has_type_parameters);
7614 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7615 g_assert (method->is_inflated);
7616 g_assert (mono_method_get_context (method)->method_inst);
7618 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7619 g_assert (sig->generic_param_count);
7621 if (cfg->method == method) {
7622 cfg->real_offset = 0;
7624 cfg->real_offset = inline_offset;
7627 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7628 cfg->cil_offset_to_bb_len = header->code_size;
7630 cfg->current_method = method;
7632 if (cfg->verbose_level > 2)
7633 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7635 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7637 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7638 for (n = 0; n < sig->param_count; ++n)
7639 param_types [n + sig->hasthis] = sig->params [n];
7640 cfg->arg_types = param_types;
7642 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7643 if (cfg->method == method) {
7645 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7646 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7649 NEW_BBLOCK (cfg, start_bblock);
7650 cfg->bb_entry = start_bblock;
7651 start_bblock->cil_code = NULL;
7652 start_bblock->cil_length = 0;
7655 NEW_BBLOCK (cfg, end_bblock);
7656 cfg->bb_exit = end_bblock;
7657 end_bblock->cil_code = NULL;
7658 end_bblock->cil_length = 0;
7659 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7660 g_assert (cfg->num_bblocks == 2);
7662 arg_array = cfg->args;
7664 if (header->num_clauses) {
7665 cfg->spvars = g_hash_table_new (NULL, NULL);
7666 cfg->exvars = g_hash_table_new (NULL, NULL);
7668 /* handle exception clauses */
7669 for (i = 0; i < header->num_clauses; ++i) {
7670 MonoBasicBlock *try_bb;
7671 MonoExceptionClause *clause = &header->clauses [i];
7672 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7674 try_bb->real_offset = clause->try_offset;
7675 try_bb->try_start = TRUE;
7676 try_bb->region = ((i + 1) << 8) | clause->flags;
7677 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7678 tblock->real_offset = clause->handler_offset;
7679 tblock->flags |= BB_EXCEPTION_HANDLER;
7682 * Linking the try block with the EH block hinders inlining as we won't be able to
7683 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7685 if (COMPILE_LLVM (cfg))
7686 link_bblock (cfg, try_bb, tblock);
7688 if (*(ip + clause->handler_offset) == CEE_POP)
7689 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7691 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7692 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7693 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7694 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7695 MONO_ADD_INS (tblock, ins);
7697 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7698 /* finally clauses already have a seq point */
7699 /* seq points for filter clauses are emitted below */
7700 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7701 MONO_ADD_INS (tblock, ins);
7704 /* todo: is a fault block unsafe to optimize? */
7705 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7706 tblock->flags |= BB_EXCEPTION_UNSAFE;
7709 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7711 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7713 /* catch and filter blocks get the exception object on the stack */
7714 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7715 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7717 /* mostly like handle_stack_args (), but just sets the input args */
7718 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7719 tblock->in_scount = 1;
7720 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7721 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7725 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7726 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7727 if (!cfg->compile_llvm) {
7728 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7729 ins->dreg = tblock->in_stack [0]->dreg;
7730 MONO_ADD_INS (tblock, ins);
7733 MonoInst *dummy_use;
7736 * Add a dummy use for the exvar so its liveness info will be
7739 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7742 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7743 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7744 MONO_ADD_INS (tblock, ins);
7747 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7748 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7749 tblock->flags |= BB_EXCEPTION_HANDLER;
7750 tblock->real_offset = clause->data.filter_offset;
7751 tblock->in_scount = 1;
7752 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7753 /* The filter block shares the exvar with the handler block */
7754 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7755 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7756 MONO_ADD_INS (tblock, ins);
7760 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7761 clause->data.catch_class &&
7763 mono_class_check_context_used (clause->data.catch_class)) {
7765 * In shared generic code with catch
7766 * clauses containing type variables
7767 * the exception handling code has to
7768 * be able to get to the rgctx.
7769 * Therefore we have to make sure that
7770 * the vtable/mrgctx argument (for
7771 * static or generic methods) or the
7772 * "this" argument (for non-static
7773 * methods) are live.
7775 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7776 mini_method_get_context (method)->method_inst ||
7777 method->klass->valuetype) {
7778 mono_get_vtable_var (cfg);
7780 MonoInst *dummy_use;
7782 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7787 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7788 cfg->cbb = start_bblock;
7789 cfg->args = arg_array;
7790 mono_save_args (cfg, sig, inline_args);
7793 /* FIRST CODE BLOCK */
7794 NEW_BBLOCK (cfg, tblock);
7795 tblock->cil_code = ip;
7799 ADD_BBLOCK (cfg, tblock);
7801 if (cfg->method == method) {
7802 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7803 if (breakpoint_id) {
7804 MONO_INST_NEW (cfg, ins, OP_BREAK);
7805 MONO_ADD_INS (cfg->cbb, ins);
7809 /* we use a separate basic block for the initialization code */
7810 NEW_BBLOCK (cfg, init_localsbb);
7811 if (cfg->method == method)
7812 cfg->bb_init = init_localsbb;
7813 init_localsbb->real_offset = cfg->real_offset;
7814 start_bblock->next_bb = init_localsbb;
7815 init_localsbb->next_bb = cfg->cbb;
7816 link_bblock (cfg, start_bblock, init_localsbb);
7817 link_bblock (cfg, init_localsbb, cfg->cbb);
7819 cfg->cbb = init_localsbb;
7821 if (cfg->gsharedvt && cfg->method == method) {
7822 MonoGSharedVtMethodInfo *info;
7823 MonoInst *var, *locals_var;
7826 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7827 info->method = cfg->method;
7828 info->count_entries = 16;
7829 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7830 cfg->gsharedvt_info = info;
7832 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7833 /* prevent it from being register allocated */
7834 //var->flags |= MONO_INST_VOLATILE;
7835 cfg->gsharedvt_info_var = var;
7837 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7838 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7840 /* Allocate locals */
7841 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7842 /* prevent it from being register allocated */
7843 //locals_var->flags |= MONO_INST_VOLATILE;
7844 cfg->gsharedvt_locals_var = locals_var;
7846 dreg = alloc_ireg (cfg);
7847 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7849 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7850 ins->dreg = locals_var->dreg;
7852 MONO_ADD_INS (cfg->cbb, ins);
7853 cfg->gsharedvt_locals_var_ins = ins;
7855 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7858 ins->flags |= MONO_INST_INIT;
7862 if (mono_security_core_clr_enabled ()) {
7863 /* check if this is native code, e.g. an icall or a p/invoke */
7864 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7865 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7867 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7868 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7870 /* if this ia a native call then it can only be JITted from platform code */
7871 if ((icall || pinvk) && method->klass && method->klass->image) {
7872 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7873 MonoException *ex = icall ? mono_get_exception_security () :
7874 mono_get_exception_method_access ();
7875 emit_throw_exception (cfg, ex);
7882 CHECK_CFG_EXCEPTION;
7884 if (header->code_size == 0)
7887 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7892 if (cfg->method == method)
7893 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7895 for (n = 0; n < header->num_locals; ++n) {
7896 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7901 /* We force the vtable variable here for all shared methods
7902 for the possibility that they might show up in a stack
7903 trace where their exact instantiation is needed. */
7904 if (cfg->gshared && method == cfg->method) {
7905 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7906 mini_method_get_context (method)->method_inst ||
7907 method->klass->valuetype) {
7908 mono_get_vtable_var (cfg);
7910 /* FIXME: Is there a better way to do this?
7911 We need the variable live for the duration
7912 of the whole method. */
7913 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7917 /* add a check for this != NULL to inlined methods */
7918 if (is_virtual_call) {
7921 NEW_ARGLOAD (cfg, arg_ins, 0);
7922 MONO_ADD_INS (cfg->cbb, arg_ins);
7923 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7926 skip_dead_blocks = !dont_verify;
7927 if (skip_dead_blocks) {
7928 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7933 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7934 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7937 start_new_bblock = 0;
7939 if (cfg->method == method)
7940 cfg->real_offset = ip - header->code;
7942 cfg->real_offset = inline_offset;
7947 if (start_new_bblock) {
7948 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7949 if (start_new_bblock == 2) {
7950 g_assert (ip == tblock->cil_code);
7952 GET_BBLOCK (cfg, tblock, ip);
7954 cfg->cbb->next_bb = tblock;
7956 start_new_bblock = 0;
7957 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7958 if (cfg->verbose_level > 3)
7959 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7960 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7964 g_slist_free (class_inits);
7967 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7968 link_bblock (cfg, cfg->cbb, tblock);
7969 if (sp != stack_start) {
7970 handle_stack_args (cfg, stack_start, sp - stack_start);
7972 CHECK_UNVERIFIABLE (cfg);
7974 cfg->cbb->next_bb = tblock;
7976 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7977 if (cfg->verbose_level > 3)
7978 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7979 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7982 g_slist_free (class_inits);
7987 if (skip_dead_blocks) {
7988 int ip_offset = ip - header->code;
7990 if (ip_offset == bb->end)
7994 int op_size = mono_opcode_size (ip, end);
7995 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7997 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7999 if (ip_offset + op_size == bb->end) {
8000 MONO_INST_NEW (cfg, ins, OP_NOP);
8001 MONO_ADD_INS (cfg->cbb, ins);
8002 start_new_bblock = 1;
8010 * Sequence points are points where the debugger can place a breakpoint.
8011 * Currently, we generate these automatically at points where the IL
8014 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8016 * Make methods interruptable at the beginning, and at the targets of
8017 * backward branches.
8018 * Also, do this at the start of every bblock in methods with clauses too,
8019 * to be able to handle instructions with inprecise control flow like
8021 * Backward branches are handled at the end of method-to-ir ().
8023 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8024 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8026 /* Avoid sequence points on empty IL like .volatile */
8027 // FIXME: Enable this
8028 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8029 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8030 if ((sp != stack_start) && !sym_seq_point)
8031 ins->flags |= MONO_INST_NONEMPTY_STACK;
8032 MONO_ADD_INS (cfg->cbb, ins);
8035 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8038 cfg->cbb->real_offset = cfg->real_offset;
8040 if ((cfg->method == method) && cfg->coverage_info) {
8041 guint32 cil_offset = ip - header->code;
8042 cfg->coverage_info->data [cil_offset].cil_code = ip;
8044 /* TODO: Use an increment here */
8045 #if defined(TARGET_X86)
8046 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8047 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8049 MONO_ADD_INS (cfg->cbb, ins);
8051 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8052 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8056 if (cfg->verbose_level > 3)
8057 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8061 if (seq_points && !sym_seq_points && sp != stack_start) {
8063 * The C# compiler uses these nops to notify the JIT that it should
8064 * insert seq points.
8066 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8067 MONO_ADD_INS (cfg->cbb, ins);
8069 if (cfg->keep_cil_nops)
8070 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8072 MONO_INST_NEW (cfg, ins, OP_NOP);
8074 MONO_ADD_INS (cfg->cbb, ins);
8077 if (should_insert_brekpoint (cfg->method)) {
8078 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8080 MONO_INST_NEW (cfg, ins, OP_NOP);
8083 MONO_ADD_INS (cfg->cbb, ins);
8089 CHECK_STACK_OVF (1);
8090 n = (*ip)-CEE_LDARG_0;
8092 EMIT_NEW_ARGLOAD (cfg, ins, n);
8100 CHECK_STACK_OVF (1);
8101 n = (*ip)-CEE_LDLOC_0;
8103 EMIT_NEW_LOCLOAD (cfg, ins, n);
8112 n = (*ip)-CEE_STLOC_0;
8115 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8117 emit_stloc_ir (cfg, sp, header, n);
8124 CHECK_STACK_OVF (1);
8127 EMIT_NEW_ARGLOAD (cfg, ins, n);
8133 CHECK_STACK_OVF (1);
8136 NEW_ARGLOADA (cfg, ins, n);
8137 MONO_ADD_INS (cfg->cbb, ins);
8147 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8149 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8154 CHECK_STACK_OVF (1);
8157 EMIT_NEW_LOCLOAD (cfg, ins, n);
8161 case CEE_LDLOCA_S: {
8162 unsigned char *tmp_ip;
8164 CHECK_STACK_OVF (1);
8165 CHECK_LOCAL (ip [1]);
8167 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8173 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8182 CHECK_LOCAL (ip [1]);
8183 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8185 emit_stloc_ir (cfg, sp, header, ip [1]);
8190 CHECK_STACK_OVF (1);
8191 EMIT_NEW_PCONST (cfg, ins, NULL);
8192 ins->type = STACK_OBJ;
8197 CHECK_STACK_OVF (1);
8198 EMIT_NEW_ICONST (cfg, ins, -1);
8211 CHECK_STACK_OVF (1);
8212 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8218 CHECK_STACK_OVF (1);
8220 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8226 CHECK_STACK_OVF (1);
8227 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8233 CHECK_STACK_OVF (1);
8234 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8235 ins->type = STACK_I8;
8236 ins->dreg = alloc_dreg (cfg, STACK_I8);
8238 ins->inst_l = (gint64)read64 (ip);
8239 MONO_ADD_INS (cfg->cbb, ins);
8245 gboolean use_aotconst = FALSE;
8247 #ifdef TARGET_POWERPC
8248 /* FIXME: Clean this up */
8249 if (cfg->compile_aot)
8250 use_aotconst = TRUE;
8253 /* FIXME: we should really allocate this only late in the compilation process */
8254 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8256 CHECK_STACK_OVF (1);
8262 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8264 dreg = alloc_freg (cfg);
8265 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8266 ins->type = cfg->r4_stack_type;
8268 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8269 ins->type = cfg->r4_stack_type;
8270 ins->dreg = alloc_dreg (cfg, STACK_R8);
8272 MONO_ADD_INS (cfg->cbb, ins);
8282 gboolean use_aotconst = FALSE;
8284 #ifdef TARGET_POWERPC
8285 /* FIXME: Clean this up */
8286 if (cfg->compile_aot)
8287 use_aotconst = TRUE;
8290 /* FIXME: we should really allocate this only late in the compilation process */
8291 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8293 CHECK_STACK_OVF (1);
8299 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8301 dreg = alloc_freg (cfg);
8302 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8303 ins->type = STACK_R8;
8305 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8306 ins->type = STACK_R8;
8307 ins->dreg = alloc_dreg (cfg, STACK_R8);
8309 MONO_ADD_INS (cfg->cbb, ins);
8318 MonoInst *temp, *store;
8320 CHECK_STACK_OVF (1);
8324 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8325 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8327 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8330 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8343 if (sp [0]->type == STACK_R8)
8344 /* we need to pop the value from the x86 FP stack */
8345 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8350 MonoMethodSignature *fsig;
8353 INLINE_FAILURE ("jmp");
8354 GSHAREDVT_FAILURE (*ip);
8357 if (stack_start != sp)
8359 token = read32 (ip + 1);
8360 /* FIXME: check the signature matches */
8361 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8364 if (cfg->gshared && mono_method_check_context_used (cmethod))
8365 GENERIC_SHARING_FAILURE (CEE_JMP);
8367 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8369 fsig = mono_method_signature (cmethod);
8370 n = fsig->param_count + fsig->hasthis;
8371 if (cfg->llvm_only) {
8374 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8375 for (i = 0; i < n; ++i)
8376 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8377 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8379 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8380 * have to emit a normal return since llvm expects it.
8383 emit_setret (cfg, ins);
8384 MONO_INST_NEW (cfg, ins, OP_BR);
8385 ins->inst_target_bb = end_bblock;
8386 MONO_ADD_INS (cfg->cbb, ins);
8387 link_bblock (cfg, cfg->cbb, end_bblock);
8390 } else if (cfg->backend->have_op_tail_call) {
8391 /* Handle tail calls similarly to calls */
8394 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8395 call->method = cmethod;
8396 call->tail_call = TRUE;
8397 call->signature = mono_method_signature (cmethod);
8398 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8399 call->inst.inst_p0 = cmethod;
8400 for (i = 0; i < n; ++i)
8401 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8403 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8404 call->vret_var = cfg->vret_addr;
8406 mono_arch_emit_call (cfg, call);
8407 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8410 for (i = 0; i < num_args; ++i)
8411 /* Prevent arguments from being optimized away */
8412 arg_array [i]->flags |= MONO_INST_VOLATILE;
8414 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8415 ins = (MonoInst*)call;
8416 ins->inst_p0 = cmethod;
8417 MONO_ADD_INS (cfg->cbb, ins);
8421 start_new_bblock = 1;
8426 MonoMethodSignature *fsig;
8429 token = read32 (ip + 1);
8433 //GSHAREDVT_FAILURE (*ip);
8438 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8441 if (method->dynamic && fsig->pinvoke) {
8445 * This is a call through a function pointer using a pinvoke
8446 * signature. Have to create a wrapper and call that instead.
8447 * FIXME: This is very slow, need to create a wrapper at JIT time
8448 * instead based on the signature.
8450 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8451 EMIT_NEW_PCONST (cfg, args [1], fsig);
8453 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8456 n = fsig->param_count + fsig->hasthis;
8460 //g_assert (!virtual_ || fsig->hasthis);
8464 inline_costs += 10 * num_calls++;
8467 * Making generic calls out of gsharedvt methods.
8468 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8469 * patching gshared method addresses into a gsharedvt method.
8471 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8473 * We pass the address to the gsharedvt trampoline in the rgctx reg
8475 MonoInst *callee = addr;
8477 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8479 GSHAREDVT_FAILURE (*ip);
8483 GSHAREDVT_FAILURE (*ip);
8485 addr = emit_get_rgctx_sig (cfg, context_used,
8486 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8487 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8491 /* Prevent inlining of methods with indirect calls */
8492 INLINE_FAILURE ("indirect call");
8494 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8495 MonoJumpInfoType info_type;
8499 * Instead of emitting an indirect call, emit a direct call
8500 * with the contents of the aotconst as the patch info.
8502 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8503 info_type = (MonoJumpInfoType)addr->inst_c1;
8504 info_data = addr->inst_p0;
8506 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8507 info_data = addr->inst_right->inst_left;
8510 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8511 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8514 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8515 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8520 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8524 /* End of call, INS should contain the result of the call, if any */
8526 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8528 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8531 CHECK_CFG_EXCEPTION;
8535 constrained_class = NULL;
8539 case CEE_CALLVIRT: {
8540 MonoInst *addr = NULL;
8541 MonoMethodSignature *fsig = NULL;
8543 int virtual_ = *ip == CEE_CALLVIRT;
8544 gboolean pass_imt_from_rgctx = FALSE;
8545 MonoInst *imt_arg = NULL;
8546 MonoInst *keep_this_alive = NULL;
8547 gboolean pass_vtable = FALSE;
8548 gboolean pass_mrgctx = FALSE;
8549 MonoInst *vtable_arg = NULL;
8550 gboolean check_this = FALSE;
8551 gboolean supported_tail_call = FALSE;
8552 gboolean tail_call = FALSE;
8553 gboolean need_seq_point = FALSE;
8554 guint32 call_opcode = *ip;
8555 gboolean emit_widen = TRUE;
8556 gboolean push_res = TRUE;
8557 gboolean skip_ret = FALSE;
8558 gboolean delegate_invoke = FALSE;
8559 gboolean direct_icall = FALSE;
8560 gboolean constrained_partial_call = FALSE;
8561 MonoMethod *cil_method;
8564 token = read32 (ip + 1);
8568 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8571 cil_method = cmethod;
8573 if (constrained_class) {
8574 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8575 if (!mini_is_gsharedvt_klass (constrained_class)) {
8576 g_assert (!cmethod->klass->valuetype);
8577 if (!mini_type_is_reference (&constrained_class->byval_arg))
8578 constrained_partial_call = TRUE;
8582 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8583 if (cfg->verbose_level > 2)
8584 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8585 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8586 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8588 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8592 if (cfg->verbose_level > 2)
8593 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8595 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8597 * This is needed since get_method_constrained can't find
8598 * the method in klass representing a type var.
8599 * The type var is guaranteed to be a reference type in this
8602 if (!mini_is_gsharedvt_klass (constrained_class))
8603 g_assert (!cmethod->klass->valuetype);
8605 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8610 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8611 /* Use the corresponding method from the base type to avoid boxing */
8612 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8613 g_assert (base_type);
8614 constrained_class = mono_class_from_mono_type (base_type);
8615 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8620 if (!dont_verify && !cfg->skip_visibility) {
8621 MonoMethod *target_method = cil_method;
8622 if (method->is_inflated) {
8623 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8626 if (!mono_method_can_access_method (method_definition, target_method) &&
8627 !mono_method_can_access_method (method, cil_method))
8628 emit_method_access_failure (cfg, method, cil_method);
8631 if (mono_security_core_clr_enabled ())
8632 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8634 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8635 /* MS.NET seems to silently convert this to a callvirt */
8640 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8641 * converts to a callvirt.
8643 * tests/bug-515884.il is an example of this behavior
8645 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8646 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8647 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8651 if (!cmethod->klass->inited)
8652 if (!mono_class_init (cmethod->klass))
8653 TYPE_LOAD_ERROR (cmethod->klass);
8655 fsig = mono_method_signature (cmethod);
8658 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8659 mini_class_is_system_array (cmethod->klass)) {
8660 array_rank = cmethod->klass->rank;
8661 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8662 direct_icall = TRUE;
8663 } else if (fsig->pinvoke) {
8664 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8665 fsig = mono_method_signature (wrapper);
8666 } else if (constrained_class) {
8668 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8672 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8673 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8675 /* See code below */
8676 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8677 MonoBasicBlock *tbb;
8679 GET_BBLOCK (cfg, tbb, ip + 5);
8680 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8682 * We want to extend the try block to cover the call, but we can't do it if the
8683 * call is made directly since its followed by an exception check.
8685 direct_icall = FALSE;
8689 mono_save_token_info (cfg, image, token, cil_method);
8691 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8692 need_seq_point = TRUE;
8694 /* Don't support calls made using type arguments for now */
8696 if (cfg->gsharedvt) {
8697 if (mini_is_gsharedvt_signature (fsig))
8698 GSHAREDVT_FAILURE (*ip);
8702 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8703 g_assert_not_reached ();
8705 n = fsig->param_count + fsig->hasthis;
8707 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8711 g_assert (!mono_method_check_context_used (cmethod));
8715 //g_assert (!virtual_ || fsig->hasthis);
8720 * We have the `constrained.' prefix opcode.
8722 if (constrained_class) {
8723 if (mini_is_gsharedvt_klass (constrained_class)) {
8724 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8725 /* The 'Own method' case below */
8726 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8727 /* 'The type parameter is instantiated as a reference type' case below. */
8729 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8730 CHECK_CFG_EXCEPTION;
8736 if (constrained_partial_call) {
8737 gboolean need_box = TRUE;
8740 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8741 * called method is not known at compile time either. The called method could end up being
8742 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8743 * to box the receiver.
8744 * A simple solution would be to box always and make a normal virtual call, but that would
8745 * be bad performance wise.
8747 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8749 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8754 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8755 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8756 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8757 ins->klass = constrained_class;
8758 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8759 CHECK_CFG_EXCEPTION;
8760 } else if (need_box) {
8762 MonoBasicBlock *is_ref_bb, *end_bb;
8763 MonoInst *nonbox_call;
8766 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8768 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8769 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8771 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8773 NEW_BBLOCK (cfg, is_ref_bb);
8774 NEW_BBLOCK (cfg, end_bb);
8776 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8781 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8783 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8786 MONO_START_BB (cfg, is_ref_bb);
8787 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8788 ins->klass = constrained_class;
8789 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8790 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8792 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8794 MONO_START_BB (cfg, end_bb);
8797 nonbox_call->dreg = ins->dreg;
8800 g_assert (mono_class_is_interface (cmethod->klass));
8801 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8802 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8805 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8807 * The type parameter is instantiated as a valuetype,
8808 * but that type doesn't override the method we're
8809 * calling, so we need to box `this'.
8811 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8812 ins->klass = constrained_class;
8813 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8814 CHECK_CFG_EXCEPTION;
8815 } else if (!constrained_class->valuetype) {
8816 int dreg = alloc_ireg_ref (cfg);
8819 * The type parameter is instantiated as a reference
8820 * type. We have a managed pointer on the stack, so
8821 * we need to dereference it here.
8823 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8824 ins->type = STACK_OBJ;
8827 if (cmethod->klass->valuetype) {
8830 /* Interface method */
8833 mono_class_setup_vtable (constrained_class);
8834 CHECK_TYPELOAD (constrained_class);
8835 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8837 TYPE_LOAD_ERROR (constrained_class);
8838 slot = mono_method_get_vtable_slot (cmethod);
8840 TYPE_LOAD_ERROR (cmethod->klass);
8841 cmethod = constrained_class->vtable [ioffset + slot];
8843 if (cmethod->klass == mono_defaults.enum_class) {
8844 /* Enum implements some interfaces, so treat this as the first case */
8845 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8846 ins->klass = constrained_class;
8847 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8848 CHECK_CFG_EXCEPTION;
8853 constrained_class = NULL;
8856 if (check_call_signature (cfg, fsig, sp))
8859 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8860 delegate_invoke = TRUE;
8862 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8863 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8864 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8872 * If the callee is a shared method, then its static cctor
8873 * might not get called after the call was patched.
8875 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8876 emit_class_init (cfg, cmethod->klass);
8877 CHECK_TYPELOAD (cmethod->klass);
8880 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8883 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8885 context_used = mini_method_check_context_used (cfg, cmethod);
8887 if (context_used && mono_class_is_interface (cmethod->klass)) {
8888 /* Generic method interface
8889 calls are resolved via a
8890 helper function and don't
8892 if (!cmethod_context || !cmethod_context->method_inst)
8893 pass_imt_from_rgctx = TRUE;
8897 * If a shared method calls another
8898 * shared method then the caller must
8899 * have a generic sharing context
8900 * because the magic trampoline
8901 * requires it. FIXME: We shouldn't
8902 * have to force the vtable/mrgctx
8903 * variable here. Instead there
8904 * should be a flag in the cfg to
8905 * request a generic sharing context.
8908 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8909 mono_get_vtable_var (cfg);
8914 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8916 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8918 CHECK_TYPELOAD (cmethod->klass);
8919 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8924 g_assert (!vtable_arg);
8926 if (!cfg->compile_aot) {
8928 * emit_get_rgctx_method () calls mono_class_vtable () so check
8929 * for type load errors before.
8931 mono_class_setup_vtable (cmethod->klass);
8932 CHECK_TYPELOAD (cmethod->klass);
8935 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8937 /* !marshalbyref is needed to properly handle generic methods + remoting */
8938 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8939 MONO_METHOD_IS_FINAL (cmethod)) &&
8940 !mono_class_is_marshalbyref (cmethod->klass)) {
8947 if (pass_imt_from_rgctx) {
8948 g_assert (!pass_vtable);
8950 imt_arg = emit_get_rgctx_method (cfg, context_used,
8951 cmethod, MONO_RGCTX_INFO_METHOD);
8955 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8957 /* Calling virtual generic methods */
8958 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8959 !(MONO_METHOD_IS_FINAL (cmethod) &&
8960 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8961 fsig->generic_param_count &&
8962 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8964 MonoInst *this_temp, *this_arg_temp, *store;
8965 MonoInst *iargs [4];
8967 g_assert (fsig->is_inflated);
8969 /* Prevent inlining of methods that contain indirect calls */
8970 INLINE_FAILURE ("virtual generic call");
8972 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8973 GSHAREDVT_FAILURE (*ip);
8975 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8976 g_assert (!imt_arg);
8978 g_assert (cmethod->is_inflated);
8979 imt_arg = emit_get_rgctx_method (cfg, context_used,
8980 cmethod, MONO_RGCTX_INFO_METHOD);
8981 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8983 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8984 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8985 MONO_ADD_INS (cfg->cbb, store);
8987 /* FIXME: This should be a managed pointer */
8988 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8990 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8991 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8992 cmethod, MONO_RGCTX_INFO_METHOD);
8993 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8994 addr = mono_emit_jit_icall (cfg,
8995 mono_helper_compile_generic_method, iargs);
8997 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8999 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9006 * Implement a workaround for the inherent races involved in locking:
9012 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9013 * try block, the Exit () won't be executed, see:
9014 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9015 * To work around this, we extend such try blocks to include the last x bytes
9016 * of the Monitor.Enter () call.
9018 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9019 MonoBasicBlock *tbb;
9021 GET_BBLOCK (cfg, tbb, ip + 5);
9023 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9024 * from Monitor.Enter like ArgumentNullException.
9026 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9027 /* Mark this bblock as needing to be extended */
9028 tbb->extend_try_block = TRUE;
9032 /* Conversion to a JIT intrinsic */
9033 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9034 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9035 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9043 if ((cfg->opt & MONO_OPT_INLINE) &&
9044 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9045 mono_method_check_inlining (cfg, cmethod)) {
9047 gboolean always = FALSE;
9049 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9050 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9051 /* Prevent inlining of methods that call wrappers */
9052 INLINE_FAILURE ("wrapper call");
9053 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9057 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9059 cfg->real_offset += 5;
9061 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9062 /* *sp is already set by inline_method */
9067 inline_costs += costs;
9073 /* Tail recursion elimination */
9074 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9075 gboolean has_vtargs = FALSE;
9078 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9079 INLINE_FAILURE ("tail call");
9081 /* keep it simple */
9082 for (i = fsig->param_count - 1; i >= 0; i--) {
9083 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9088 if (need_seq_point) {
9089 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9090 need_seq_point = FALSE;
9092 for (i = 0; i < n; ++i)
9093 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9094 MONO_INST_NEW (cfg, ins, OP_BR);
9095 MONO_ADD_INS (cfg->cbb, ins);
9096 tblock = start_bblock->out_bb [0];
9097 link_bblock (cfg, cfg->cbb, tblock);
9098 ins->inst_target_bb = tblock;
9099 start_new_bblock = 1;
9101 /* skip the CEE_RET, too */
9102 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9109 inline_costs += 10 * num_calls++;
9112 * Synchronized wrappers.
9113 * Its hard to determine where to replace a method with its synchronized
9114 * wrapper without causing an infinite recursion. The current solution is
9115 * to add the synchronized wrapper in the trampolines, and to
9116 * change the called method to a dummy wrapper, and resolve that wrapper
9117 * to the real method in mono_jit_compile_method ().
9119 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9120 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9121 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9122 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9126 * Making generic calls out of gsharedvt methods.
9127 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9128 * patching gshared method addresses into a gsharedvt method.
9130 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9131 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9132 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9133 MonoRgctxInfoType info_type;
9136 //if (mono_class_is_interface (cmethod->klass))
9137 //GSHAREDVT_FAILURE (*ip);
9138 // disable for possible remoting calls
9139 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9140 GSHAREDVT_FAILURE (*ip);
9141 if (fsig->generic_param_count) {
9142 /* virtual generic call */
9143 g_assert (!imt_arg);
9144 /* Same as the virtual generic case above */
9145 imt_arg = emit_get_rgctx_method (cfg, context_used,
9146 cmethod, MONO_RGCTX_INFO_METHOD);
9147 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9149 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9150 /* This can happen when we call a fully instantiated iface method */
9151 imt_arg = emit_get_rgctx_method (cfg, context_used,
9152 cmethod, MONO_RGCTX_INFO_METHOD);
9157 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9158 keep_this_alive = sp [0];
9160 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9161 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9163 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9164 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9166 if (cfg->llvm_only) {
9167 // FIXME: Avoid initializing vtable_arg
9168 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9170 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9175 /* Generic sharing */
9178 * Use this if the callee is gsharedvt sharable too, since
9179 * at runtime we might find an instantiation so the call cannot
9180 * be patched (the 'no_patch' code path in mini-trampolines.c).
9182 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9183 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9184 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9185 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9186 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9187 INLINE_FAILURE ("gshared");
9189 g_assert (cfg->gshared && cmethod);
9193 * We are compiling a call to a
9194 * generic method from shared code,
9195 * which means that we have to look up
9196 * the method in the rgctx and do an
9200 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9202 if (cfg->llvm_only) {
9203 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9204 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9206 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9207 // FIXME: Avoid initializing imt_arg/vtable_arg
9208 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9210 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9211 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9216 /* Direct calls to icalls */
9218 MonoMethod *wrapper;
9221 /* Inline the wrapper */
9222 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9224 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9225 g_assert (costs > 0);
9226 cfg->real_offset += 5;
9228 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9229 /* *sp is already set by inline_method */
9234 inline_costs += costs;
9243 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9244 MonoInst *val = sp [fsig->param_count];
9246 if (val->type == STACK_OBJ) {
9247 MonoInst *iargs [2];
9252 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9255 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9256 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9257 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9258 emit_write_barrier (cfg, addr, val);
9259 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9260 GSHAREDVT_FAILURE (*ip);
9261 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9262 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9264 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9265 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9266 if (!cmethod->klass->element_class->valuetype && !readonly)
9267 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9268 CHECK_TYPELOAD (cmethod->klass);
9271 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9274 g_assert_not_reached ();
9281 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9285 /* Tail prefix / tail call optimization */
9287 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9288 /* FIXME: runtime generic context pointer for jumps? */
9289 /* FIXME: handle this for generic sharing eventually */
9290 if ((ins_flag & MONO_INST_TAILCALL) &&
9291 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9292 supported_tail_call = TRUE;
9294 if (supported_tail_call) {
9297 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9298 INLINE_FAILURE ("tail call");
9300 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9302 if (cfg->backend->have_op_tail_call) {
9303 /* Handle tail calls similarly to normal calls */
9306 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9308 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9309 call->tail_call = TRUE;
9310 call->method = cmethod;
9311 call->signature = mono_method_signature (cmethod);
9314 * We implement tail calls by storing the actual arguments into the
9315 * argument variables, then emitting a CEE_JMP.
9317 for (i = 0; i < n; ++i) {
9318 /* Prevent argument from being register allocated */
9319 arg_array [i]->flags |= MONO_INST_VOLATILE;
9320 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9322 ins = (MonoInst*)call;
9323 ins->inst_p0 = cmethod;
9324 ins->inst_p1 = arg_array [0];
9325 MONO_ADD_INS (cfg->cbb, ins);
9326 link_bblock (cfg, cfg->cbb, end_bblock);
9327 start_new_bblock = 1;
9329 // FIXME: Eliminate unreachable epilogs
9332 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9333 * only reachable from this call.
9335 GET_BBLOCK (cfg, tblock, ip + 5);
9336 if (tblock == cfg->cbb || tblock->in_count == 0)
9345 * Virtual calls in llvm-only mode.
9347 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9348 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9353 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9354 INLINE_FAILURE ("call");
9355 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9356 imt_arg, vtable_arg);
9358 if (tail_call && !cfg->llvm_only) {
9359 link_bblock (cfg, cfg->cbb, end_bblock);
9360 start_new_bblock = 1;
9362 // FIXME: Eliminate unreachable epilogs
9365 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9366 * only reachable from this call.
9368 GET_BBLOCK (cfg, tblock, ip + 5);
9369 if (tblock == cfg->cbb || tblock->in_count == 0)
9376 /* End of call, INS should contain the result of the call, if any */
9378 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9381 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9386 if (keep_this_alive) {
9387 MonoInst *dummy_use;
9389 /* See mono_emit_method_call_full () */
9390 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9393 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9395 * Clang can convert these calls to tail calls which screw up the stack
9396 * walk. This happens even when the -fno-optimize-sibling-calls
9397 * option is passed to clang.
9398 * Work around this by emitting a dummy call.
9400 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9403 CHECK_CFG_EXCEPTION;
9407 g_assert (*ip == CEE_RET);
9411 constrained_class = NULL;
9413 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9417 if (cfg->method != method) {
9418 /* return from inlined method */
9420 * If in_count == 0, that means the ret is unreachable due to
9421 * being preceeded by a throw. In that case, inline_method () will
9422 * handle setting the return value
9423 * (test case: test_0_inline_throw ()).
9425 if (return_var && cfg->cbb->in_count) {
9426 MonoType *ret_type = mono_method_signature (method)->ret;
9432 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9435 //g_assert (returnvar != -1);
9436 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9437 cfg->ret_var_set = TRUE;
9440 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9442 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9446 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9448 if (seq_points && !sym_seq_points) {
9450 * Place a seq point here too even through the IL stack is not
9451 * empty, so a step over on
9454 * will work correctly.
9456 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9457 MONO_ADD_INS (cfg->cbb, ins);
9460 g_assert (!return_var);
9464 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9467 emit_setret (cfg, *sp);
9470 if (sp != stack_start)
9472 MONO_INST_NEW (cfg, ins, OP_BR);
9474 ins->inst_target_bb = end_bblock;
9475 MONO_ADD_INS (cfg->cbb, ins);
9476 link_bblock (cfg, cfg->cbb, end_bblock);
9477 start_new_bblock = 1;
9481 MONO_INST_NEW (cfg, ins, OP_BR);
9483 target = ip + 1 + (signed char)(*ip);
9485 GET_BBLOCK (cfg, tblock, target);
9486 link_bblock (cfg, cfg->cbb, tblock);
9487 ins->inst_target_bb = tblock;
9488 if (sp != stack_start) {
9489 handle_stack_args (cfg, stack_start, sp - stack_start);
9491 CHECK_UNVERIFIABLE (cfg);
9493 MONO_ADD_INS (cfg->cbb, ins);
9494 start_new_bblock = 1;
9495 inline_costs += BRANCH_COST;
9509 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9511 target = ip + 1 + *(signed char*)ip;
9517 inline_costs += BRANCH_COST;
9521 MONO_INST_NEW (cfg, ins, OP_BR);
9524 target = ip + 4 + (gint32)read32(ip);
9526 GET_BBLOCK (cfg, tblock, target);
9527 link_bblock (cfg, cfg->cbb, tblock);
9528 ins->inst_target_bb = tblock;
9529 if (sp != stack_start) {
9530 handle_stack_args (cfg, stack_start, sp - stack_start);
9532 CHECK_UNVERIFIABLE (cfg);
9535 MONO_ADD_INS (cfg->cbb, ins);
9537 start_new_bblock = 1;
9538 inline_costs += BRANCH_COST;
9545 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9546 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9547 guint32 opsize = is_short ? 1 : 4;
9549 CHECK_OPSIZE (opsize);
9551 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9554 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9559 GET_BBLOCK (cfg, tblock, target);
9560 link_bblock (cfg, cfg->cbb, tblock);
9561 GET_BBLOCK (cfg, tblock, ip);
9562 link_bblock (cfg, cfg->cbb, tblock);
9564 if (sp != stack_start) {
9565 handle_stack_args (cfg, stack_start, sp - stack_start);
9566 CHECK_UNVERIFIABLE (cfg);
9569 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9570 cmp->sreg1 = sp [0]->dreg;
9571 type_from_op (cfg, cmp, sp [0], NULL);
9574 #if SIZEOF_REGISTER == 4
9575 if (cmp->opcode == OP_LCOMPARE_IMM) {
9576 /* Convert it to OP_LCOMPARE */
9577 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9578 ins->type = STACK_I8;
9579 ins->dreg = alloc_dreg (cfg, STACK_I8);
9581 MONO_ADD_INS (cfg->cbb, ins);
9582 cmp->opcode = OP_LCOMPARE;
9583 cmp->sreg2 = ins->dreg;
9586 MONO_ADD_INS (cfg->cbb, cmp);
9588 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9589 type_from_op (cfg, ins, sp [0], NULL);
9590 MONO_ADD_INS (cfg->cbb, ins);
9591 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9592 GET_BBLOCK (cfg, tblock, target);
9593 ins->inst_true_bb = tblock;
9594 GET_BBLOCK (cfg, tblock, ip);
9595 ins->inst_false_bb = tblock;
9596 start_new_bblock = 2;
9599 inline_costs += BRANCH_COST;
9614 MONO_INST_NEW (cfg, ins, *ip);
9616 target = ip + 4 + (gint32)read32(ip);
9622 inline_costs += BRANCH_COST;
9626 MonoBasicBlock **targets;
9627 MonoBasicBlock *default_bblock;
9628 MonoJumpInfoBBTable *table;
9629 int offset_reg = alloc_preg (cfg);
9630 int target_reg = alloc_preg (cfg);
9631 int table_reg = alloc_preg (cfg);
9632 int sum_reg = alloc_preg (cfg);
9633 gboolean use_op_switch;
9637 n = read32 (ip + 1);
9640 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9644 CHECK_OPSIZE (n * sizeof (guint32));
9645 target = ip + n * sizeof (guint32);
9647 GET_BBLOCK (cfg, default_bblock, target);
9648 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9650 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9651 for (i = 0; i < n; ++i) {
9652 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9653 targets [i] = tblock;
9654 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9658 if (sp != stack_start) {
9660 * Link the current bb with the targets as well, so handle_stack_args
9661 * will set their in_stack correctly.
9663 link_bblock (cfg, cfg->cbb, default_bblock);
9664 for (i = 0; i < n; ++i)
9665 link_bblock (cfg, cfg->cbb, targets [i]);
9667 handle_stack_args (cfg, stack_start, sp - stack_start);
9669 CHECK_UNVERIFIABLE (cfg);
9671 /* Undo the links */
9672 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9673 for (i = 0; i < n; ++i)
9674 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9680 for (i = 0; i < n; ++i)
9681 link_bblock (cfg, cfg->cbb, targets [i]);
9683 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9684 table->table = targets;
9685 table->table_size = n;
9687 use_op_switch = FALSE;
9689 /* ARM implements SWITCH statements differently */
9690 /* FIXME: Make it use the generic implementation */
9691 if (!cfg->compile_aot)
9692 use_op_switch = TRUE;
9695 if (COMPILE_LLVM (cfg))
9696 use_op_switch = TRUE;
9698 cfg->cbb->has_jump_table = 1;
9700 if (use_op_switch) {
9701 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9702 ins->sreg1 = src1->dreg;
9703 ins->inst_p0 = table;
9704 ins->inst_many_bb = targets;
9705 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9706 MONO_ADD_INS (cfg->cbb, ins);
9708 if (sizeof (gpointer) == 8)
9709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9713 #if SIZEOF_REGISTER == 8
9714 /* The upper word might not be zero, and we add it to a 64 bit address later */
9715 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9718 if (cfg->compile_aot) {
9719 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9721 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9722 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9723 ins->inst_p0 = table;
9724 ins->dreg = table_reg;
9725 MONO_ADD_INS (cfg->cbb, ins);
9728 /* FIXME: Use load_memindex */
9729 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9731 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9733 start_new_bblock = 1;
9734 inline_costs += (BRANCH_COST * 2);
9754 dreg = alloc_freg (cfg);
9757 dreg = alloc_lreg (cfg);
9760 dreg = alloc_ireg_ref (cfg);
9763 dreg = alloc_preg (cfg);
9766 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9767 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9768 if (*ip == CEE_LDIND_R4)
9769 ins->type = cfg->r4_stack_type;
9770 ins->flags |= ins_flag;
9771 MONO_ADD_INS (cfg->cbb, ins);
9773 if (ins_flag & MONO_INST_VOLATILE) {
9774 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9775 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9791 if (ins_flag & MONO_INST_VOLATILE) {
9792 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9796 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9797 ins->flags |= ins_flag;
9800 MONO_ADD_INS (cfg->cbb, ins);
9802 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9803 emit_write_barrier (cfg, sp [0], sp [1]);
9812 MONO_INST_NEW (cfg, ins, (*ip));
9814 ins->sreg1 = sp [0]->dreg;
9815 ins->sreg2 = sp [1]->dreg;
9816 type_from_op (cfg, ins, sp [0], sp [1]);
9818 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9820 /* Use the immediate opcodes if possible */
9821 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9822 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9823 if (imm_opcode != -1) {
9824 ins->opcode = imm_opcode;
9825 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9828 NULLIFY_INS (sp [1]);
9832 MONO_ADD_INS ((cfg)->cbb, (ins));
9834 *sp++ = mono_decompose_opcode (cfg, ins);
9851 MONO_INST_NEW (cfg, ins, (*ip));
9853 ins->sreg1 = sp [0]->dreg;
9854 ins->sreg2 = sp [1]->dreg;
9855 type_from_op (cfg, ins, sp [0], sp [1]);
9857 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9858 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9860 /* FIXME: Pass opcode to is_inst_imm */
9862 /* Use the immediate opcodes if possible */
9863 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9864 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9865 if (imm_opcode != -1) {
9866 ins->opcode = imm_opcode;
9867 if (sp [1]->opcode == OP_I8CONST) {
9868 #if SIZEOF_REGISTER == 8
9869 ins->inst_imm = sp [1]->inst_l;
9871 ins->inst_ls_word = sp [1]->inst_ls_word;
9872 ins->inst_ms_word = sp [1]->inst_ms_word;
9876 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9879 /* Might be followed by an instruction added by add_widen_op */
9880 if (sp [1]->next == NULL)
9881 NULLIFY_INS (sp [1]);
9884 MONO_ADD_INS ((cfg)->cbb, (ins));
9886 *sp++ = mono_decompose_opcode (cfg, ins);
9899 case CEE_CONV_OVF_I8:
9900 case CEE_CONV_OVF_U8:
9904 /* Special case this earlier so we have long constants in the IR */
9905 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9906 int data = sp [-1]->inst_c0;
9907 sp [-1]->opcode = OP_I8CONST;
9908 sp [-1]->type = STACK_I8;
9909 #if SIZEOF_REGISTER == 8
9910 if ((*ip) == CEE_CONV_U8)
9911 sp [-1]->inst_c0 = (guint32)data;
9913 sp [-1]->inst_c0 = data;
9915 sp [-1]->inst_ls_word = data;
9916 if ((*ip) == CEE_CONV_U8)
9917 sp [-1]->inst_ms_word = 0;
9919 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9921 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9928 case CEE_CONV_OVF_I4:
9929 case CEE_CONV_OVF_I1:
9930 case CEE_CONV_OVF_I2:
9931 case CEE_CONV_OVF_I:
9932 case CEE_CONV_OVF_U:
9935 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9936 ADD_UNOP (CEE_CONV_OVF_I8);
9943 case CEE_CONV_OVF_U1:
9944 case CEE_CONV_OVF_U2:
9945 case CEE_CONV_OVF_U4:
9948 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9949 ADD_UNOP (CEE_CONV_OVF_U8);
9956 case CEE_CONV_OVF_I1_UN:
9957 case CEE_CONV_OVF_I2_UN:
9958 case CEE_CONV_OVF_I4_UN:
9959 case CEE_CONV_OVF_I8_UN:
9960 case CEE_CONV_OVF_U1_UN:
9961 case CEE_CONV_OVF_U2_UN:
9962 case CEE_CONV_OVF_U4_UN:
9963 case CEE_CONV_OVF_U8_UN:
9964 case CEE_CONV_OVF_I_UN:
9965 case CEE_CONV_OVF_U_UN:
9972 CHECK_CFG_EXCEPTION;
9976 case CEE_ADD_OVF_UN:
9978 case CEE_MUL_OVF_UN:
9980 case CEE_SUB_OVF_UN:
9986 GSHAREDVT_FAILURE (*ip);
9989 token = read32 (ip + 1);
9990 klass = mini_get_class (method, token, generic_context);
9991 CHECK_TYPELOAD (klass);
9993 if (generic_class_is_reference_type (cfg, klass)) {
9994 MonoInst *store, *load;
9995 int dreg = alloc_ireg_ref (cfg);
9997 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9998 load->flags |= ins_flag;
9999 MONO_ADD_INS (cfg->cbb, load);
10001 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10002 store->flags |= ins_flag;
10003 MONO_ADD_INS (cfg->cbb, store);
10005 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10006 emit_write_barrier (cfg, sp [0], sp [1]);
10008 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10014 int loc_index = -1;
10020 token = read32 (ip + 1);
10021 klass = mini_get_class (method, token, generic_context);
10022 CHECK_TYPELOAD (klass);
10024 /* Optimize the common ldobj+stloc combination */
10027 loc_index = ip [6];
10034 loc_index = ip [5] - CEE_STLOC_0;
10041 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10042 CHECK_LOCAL (loc_index);
10044 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10045 ins->dreg = cfg->locals [loc_index]->dreg;
10046 ins->flags |= ins_flag;
10049 if (ins_flag & MONO_INST_VOLATILE) {
10050 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10051 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10057 /* Optimize the ldobj+stobj combination */
10058 /* The reference case ends up being a load+store anyway */
10059 /* Skip this if the operation is volatile. */
10060 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10065 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10072 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10073 ins->flags |= ins_flag;
10076 if (ins_flag & MONO_INST_VOLATILE) {
10077 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10078 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10087 CHECK_STACK_OVF (1);
10089 n = read32 (ip + 1);
10091 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10092 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10093 ins->type = STACK_OBJ;
10096 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10097 MonoInst *iargs [1];
10098 char *str = (char *)mono_method_get_wrapper_data (method, n);
10100 if (cfg->compile_aot)
10101 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10103 EMIT_NEW_PCONST (cfg, iargs [0], str);
10104 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10106 if (cfg->opt & MONO_OPT_SHARED) {
10107 MonoInst *iargs [3];
10109 if (cfg->compile_aot) {
10110 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10112 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10113 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10114 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10115 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10116 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10119 if (cfg->cbb->out_of_line) {
10120 MonoInst *iargs [2];
10122 if (image == mono_defaults.corlib) {
10124 * Avoid relocations in AOT and save some space by using a
10125 * version of helper_ldstr specialized to mscorlib.
10127 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10128 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10130 /* Avoid creating the string object */
10131 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10132 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10133 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10137 if (cfg->compile_aot) {
10138 NEW_LDSTRCONST (cfg, ins, image, n);
10140 MONO_ADD_INS (cfg->cbb, ins);
10143 NEW_PCONST (cfg, ins, NULL);
10144 ins->type = STACK_OBJ;
10145 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10149 OUT_OF_MEMORY_FAILURE;
10152 MONO_ADD_INS (cfg->cbb, ins);
10161 MonoInst *iargs [2];
10162 MonoMethodSignature *fsig;
10165 MonoInst *vtable_arg = NULL;
10168 token = read32 (ip + 1);
10169 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10172 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10175 mono_save_token_info (cfg, image, token, cmethod);
10177 if (!mono_class_init (cmethod->klass))
10178 TYPE_LOAD_ERROR (cmethod->klass);
10180 context_used = mini_method_check_context_used (cfg, cmethod);
10182 if (mono_security_core_clr_enabled ())
10183 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10185 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10186 emit_class_init (cfg, cmethod->klass);
10187 CHECK_TYPELOAD (cmethod->klass);
10191 if (cfg->gsharedvt) {
10192 if (mini_is_gsharedvt_variable_signature (sig))
10193 GSHAREDVT_FAILURE (*ip);
10197 n = fsig->param_count;
10201 * Generate smaller code for the common newobj <exception> instruction in
10202 * argument checking code.
10204 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10205 is_exception_class (cmethod->klass) && n <= 2 &&
10206 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10207 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10208 MonoInst *iargs [3];
10212 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10215 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10218 iargs [1] = sp [0];
10219 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10222 iargs [1] = sp [0];
10223 iargs [2] = sp [1];
10224 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10227 g_assert_not_reached ();
10235 /* move the args to allow room for 'this' in the first position */
10241 /* check_call_signature () requires sp[0] to be set */
10242 this_ins.type = STACK_OBJ;
10243 sp [0] = &this_ins;
10244 if (check_call_signature (cfg, fsig, sp))
10249 if (mini_class_is_system_array (cmethod->klass)) {
10250 *sp = emit_get_rgctx_method (cfg, context_used,
10251 cmethod, MONO_RGCTX_INFO_METHOD);
10253 /* Avoid varargs in the common case */
10254 if (fsig->param_count == 1)
10255 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10256 else if (fsig->param_count == 2)
10257 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10258 else if (fsig->param_count == 3)
10259 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10260 else if (fsig->param_count == 4)
10261 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10263 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10264 } else if (cmethod->string_ctor) {
10265 g_assert (!context_used);
10266 g_assert (!vtable_arg);
10267 /* we simply pass a null pointer */
10268 EMIT_NEW_PCONST (cfg, *sp, NULL);
10269 /* now call the string ctor */
10270 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10272 if (cmethod->klass->valuetype) {
10273 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10274 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10275 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10280 * The code generated by mini_emit_virtual_call () expects
10281 * iargs [0] to be a boxed instance, but luckily the vcall
10282 * will be transformed into a normal call there.
10284 } else if (context_used) {
10285 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10288 MonoVTable *vtable = NULL;
10290 if (!cfg->compile_aot)
10291 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10292 CHECK_TYPELOAD (cmethod->klass);
10295 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10296 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10297 * As a workaround, we call class cctors before allocating objects.
10299 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10300 emit_class_init (cfg, cmethod->klass);
10301 if (cfg->verbose_level > 2)
10302 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10303 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10306 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10309 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10312 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10314 /* Now call the actual ctor */
10315 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10316 CHECK_CFG_EXCEPTION;
10319 if (alloc == NULL) {
10321 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10322 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10330 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10331 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10334 case CEE_CASTCLASS:
10339 token = read32 (ip + 1);
10340 klass = mini_get_class (method, token, generic_context);
10341 CHECK_TYPELOAD (klass);
10342 if (sp [0]->type != STACK_OBJ)
10345 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10346 ins->dreg = alloc_preg (cfg);
10347 ins->sreg1 = (*sp)->dreg;
10348 ins->klass = klass;
10349 ins->type = STACK_OBJ;
10350 MONO_ADD_INS (cfg->cbb, ins);
10352 CHECK_CFG_EXCEPTION;
10356 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10359 case CEE_UNBOX_ANY: {
10360 MonoInst *res, *addr;
10365 token = read32 (ip + 1);
10366 klass = mini_get_class (method, token, generic_context);
10367 CHECK_TYPELOAD (klass);
10369 mono_save_token_info (cfg, image, token, klass);
10371 context_used = mini_class_check_context_used (cfg, klass);
10373 if (mini_is_gsharedvt_klass (klass)) {
10374 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10376 } else if (generic_class_is_reference_type (cfg, klass)) {
10377 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10378 EMIT_NEW_PCONST (cfg, res, NULL);
10379 res->type = STACK_OBJ;
10381 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10382 res->dreg = alloc_preg (cfg);
10383 res->sreg1 = (*sp)->dreg;
10384 res->klass = klass;
10385 res->type = STACK_OBJ;
10386 MONO_ADD_INS (cfg->cbb, res);
10387 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10389 } else if (mono_class_is_nullable (klass)) {
10390 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10392 addr = handle_unbox (cfg, klass, sp, context_used);
10394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10405 MonoClass *enum_class;
10406 MonoMethod *has_flag;
10412 token = read32 (ip + 1);
10413 klass = mini_get_class (method, token, generic_context);
10414 CHECK_TYPELOAD (klass);
10416 mono_save_token_info (cfg, image, token, klass);
10418 context_used = mini_class_check_context_used (cfg, klass);
10420 if (generic_class_is_reference_type (cfg, klass)) {
10426 if (klass == mono_defaults.void_class)
10428 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10430 /* frequent check in generic code: box (struct), brtrue */
10435 * <push int/long ptr>
10438 * constrained. MyFlags
10439 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10441 * If we find this sequence and the operand types on box and constrained
10442 * are equal, we can emit a specialized instruction sequence instead of
10443 * the very slow HasFlag () call.
10445 if ((cfg->opt & MONO_OPT_INTRINS) &&
10446 /* Cheap checks first. */
10447 ip + 5 + 6 + 5 < end &&
10448 ip [5] == CEE_PREFIX1 &&
10449 ip [6] == CEE_CONSTRAINED_ &&
10450 ip [11] == CEE_CALLVIRT &&
10451 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10452 mono_class_is_enum (klass) &&
10453 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10454 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10455 has_flag->klass == mono_defaults.enum_class &&
10456 !strcmp (has_flag->name, "HasFlag") &&
10457 has_flag->signature->hasthis &&
10458 has_flag->signature->param_count == 1) {
10459 CHECK_TYPELOAD (enum_class);
10461 if (enum_class == klass) {
10462 MonoInst *enum_this, *enum_flag;
10467 enum_this = sp [0];
10468 enum_flag = sp [1];
10470 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10475 // FIXME: LLVM can't handle the inconsistent bb linking
10476 if (!mono_class_is_nullable (klass) &&
10477 !mini_is_gsharedvt_klass (klass) &&
10478 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10479 (ip [5] == CEE_BRTRUE ||
10480 ip [5] == CEE_BRTRUE_S ||
10481 ip [5] == CEE_BRFALSE ||
10482 ip [5] == CEE_BRFALSE_S)) {
10483 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10485 MonoBasicBlock *true_bb, *false_bb;
10489 if (cfg->verbose_level > 3) {
10490 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10491 printf ("<box+brtrue opt>\n");
10496 case CEE_BRFALSE_S:
10499 target = ip + 1 + (signed char)(*ip);
10506 target = ip + 4 + (gint)(read32 (ip));
10510 g_assert_not_reached ();
10514 * We need to link both bblocks, since it is needed for handling stack
10515 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10516 * Branching to only one of them would lead to inconsistencies, so
10517 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10519 GET_BBLOCK (cfg, true_bb, target);
10520 GET_BBLOCK (cfg, false_bb, ip);
10522 mono_link_bblock (cfg, cfg->cbb, true_bb);
10523 mono_link_bblock (cfg, cfg->cbb, false_bb);
10525 if (sp != stack_start) {
10526 handle_stack_args (cfg, stack_start, sp - stack_start);
10528 CHECK_UNVERIFIABLE (cfg);
10531 if (COMPILE_LLVM (cfg)) {
10532 dreg = alloc_ireg (cfg);
10533 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10536 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10538 /* The JIT can't eliminate the iconst+compare */
10539 MONO_INST_NEW (cfg, ins, OP_BR);
10540 ins->inst_target_bb = is_true ? true_bb : false_bb;
10541 MONO_ADD_INS (cfg->cbb, ins);
10544 start_new_bblock = 1;
10548 *sp++ = handle_box (cfg, val, klass, context_used);
10550 CHECK_CFG_EXCEPTION;
10559 token = read32 (ip + 1);
10560 klass = mini_get_class (method, token, generic_context);
10561 CHECK_TYPELOAD (klass);
10563 mono_save_token_info (cfg, image, token, klass);
10565 context_used = mini_class_check_context_used (cfg, klass);
10567 if (mono_class_is_nullable (klass)) {
10570 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10571 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10575 ins = handle_unbox (cfg, klass, sp, context_used);
10588 MonoClassField *field;
10589 #ifndef DISABLE_REMOTING
10593 gboolean is_instance;
10595 gpointer addr = NULL;
10596 gboolean is_special_static;
10598 MonoInst *store_val = NULL;
10599 MonoInst *thread_ins;
10602 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10604 if (op == CEE_STFLD) {
10607 store_val = sp [1];
10612 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10614 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10617 if (op == CEE_STSFLD) {
10620 store_val = sp [0];
10625 token = read32 (ip + 1);
10626 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10627 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10628 klass = field->parent;
10631 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10634 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10635 FIELD_ACCESS_FAILURE (method, field);
10636 mono_class_init (klass);
10638 /* if the class is Critical then transparent code cannot access it's fields */
10639 if (!is_instance && mono_security_core_clr_enabled ())
10640 ensure_method_is_allowed_to_access_field (cfg, method, field);
10642 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10643 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10644 if (mono_security_core_clr_enabled ())
10645 ensure_method_is_allowed_to_access_field (cfg, method, field);
10648 ftype = mono_field_get_type (field);
10651 * LDFLD etc. is usable on static fields as well, so convert those cases to
10654 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10666 g_assert_not_reached ();
10668 is_instance = FALSE;
10671 context_used = mini_class_check_context_used (cfg, klass);
10673 /* INSTANCE CASE */
10675 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10676 if (op == CEE_STFLD) {
10677 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10679 #ifndef DISABLE_REMOTING
10680 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10681 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10682 MonoInst *iargs [5];
10684 GSHAREDVT_FAILURE (op);
10686 iargs [0] = sp [0];
10687 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10688 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10689 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10691 iargs [4] = sp [1];
10693 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10694 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10695 iargs, ip, cfg->real_offset, TRUE);
10696 CHECK_CFG_EXCEPTION;
10697 g_assert (costs > 0);
10699 cfg->real_offset += 5;
10701 inline_costs += costs;
10703 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10708 MonoInst *store, *wbarrier_ptr_ins = NULL;
10710 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10712 if (ins_flag & MONO_INST_VOLATILE) {
10713 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10714 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10717 if (mini_is_gsharedvt_klass (klass)) {
10718 MonoInst *offset_ins;
10720 context_used = mini_class_check_context_used (cfg, klass);
10722 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10723 /* The value is offset by 1 */
10724 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10725 dreg = alloc_ireg_mp (cfg);
10726 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10727 wbarrier_ptr_ins = ins;
10728 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10729 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10733 if (sp [0]->opcode != OP_LDADDR)
10734 store->flags |= MONO_INST_FAULT;
10736 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10737 if (mini_is_gsharedvt_klass (klass)) {
10738 g_assert (wbarrier_ptr_ins);
10739 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10741 /* insert call to write barrier */
10745 dreg = alloc_ireg_mp (cfg);
10746 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10747 emit_write_barrier (cfg, ptr, sp [1]);
10751 store->flags |= ins_flag;
10758 #ifndef DISABLE_REMOTING
10759 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10760 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10761 MonoInst *iargs [4];
10763 GSHAREDVT_FAILURE (op);
10765 iargs [0] = sp [0];
10766 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10767 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10768 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10769 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10770 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10771 iargs, ip, cfg->real_offset, TRUE);
10772 CHECK_CFG_EXCEPTION;
10773 g_assert (costs > 0);
10775 cfg->real_offset += 5;
10779 inline_costs += costs;
10781 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10787 if (sp [0]->type == STACK_VTYPE) {
10790 /* Have to compute the address of the variable */
10792 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10794 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10796 g_assert (var->klass == klass);
10798 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10802 if (op == CEE_LDFLDA) {
10803 if (sp [0]->type == STACK_OBJ) {
10804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10805 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10808 dreg = alloc_ireg_mp (cfg);
10810 if (mini_is_gsharedvt_klass (klass)) {
10811 MonoInst *offset_ins;
10813 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10814 /* The value is offset by 1 */
10815 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10816 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10818 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10820 ins->klass = mono_class_from_mono_type (field->type);
10821 ins->type = STACK_MP;
10826 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10828 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10829 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10838 if (mini_is_gsharedvt_klass (klass)) {
10839 MonoInst *offset_ins;
10841 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10842 /* The value is offset by 1 */
10843 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10844 dreg = alloc_ireg_mp (cfg);
10845 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10846 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10848 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10850 load->flags |= ins_flag;
10851 if (sp [0]->opcode != OP_LDADDR)
10852 load->flags |= MONO_INST_FAULT;
10864 context_used = mini_class_check_context_used (cfg, klass);
10866 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10867 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10871 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10872 * to be called here.
10874 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10875 mono_class_vtable (cfg->domain, klass);
10876 CHECK_TYPELOAD (klass);
10878 mono_domain_lock (cfg->domain);
10879 if (cfg->domain->special_static_fields)
10880 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10881 mono_domain_unlock (cfg->domain);
10883 is_special_static = mono_class_field_is_special_static (field);
10885 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10886 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10890 /* Generate IR to compute the field address */
10891 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10893 * Fast access to TLS data
10894 * Inline version of get_thread_static_data () in
10898 int idx, static_data_reg, array_reg, dreg;
10900 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10901 GSHAREDVT_FAILURE (op);
10903 static_data_reg = alloc_ireg (cfg);
10904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10906 if (cfg->compile_aot) {
10907 int offset_reg, offset2_reg, idx_reg;
10909 /* For TLS variables, this will return the TLS offset */
10910 EMIT_NEW_SFLDACONST (cfg, ins, field);
10911 offset_reg = ins->dreg;
10912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10913 idx_reg = alloc_ireg (cfg);
10914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10916 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10917 array_reg = alloc_ireg (cfg);
10918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10919 offset2_reg = alloc_ireg (cfg);
10920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10922 dreg = alloc_ireg (cfg);
10923 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10925 offset = (gsize)addr & 0x7fffffff;
10926 idx = offset & 0x3f;
10928 array_reg = alloc_ireg (cfg);
10929 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10930 dreg = alloc_ireg (cfg);
10931 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10933 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10934 (cfg->compile_aot && is_special_static) ||
10935 (context_used && is_special_static)) {
10936 MonoInst *iargs [2];
10938 g_assert (field->parent);
10939 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10940 if (context_used) {
10941 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10942 field, MONO_RGCTX_INFO_CLASS_FIELD);
10944 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10946 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10947 } else if (context_used) {
10948 MonoInst *static_data;
10951 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10952 method->klass->name_space, method->klass->name, method->name,
10953 depth, field->offset);
10956 if (mono_class_needs_cctor_run (klass, method))
10957 emit_class_init (cfg, klass);
10960 * The pointer we're computing here is
10962 * super_info.static_data + field->offset
10964 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10965 klass, MONO_RGCTX_INFO_STATIC_DATA);
10967 if (mini_is_gsharedvt_klass (klass)) {
10968 MonoInst *offset_ins;
10970 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10971 /* The value is offset by 1 */
10972 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10973 dreg = alloc_ireg_mp (cfg);
10974 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10975 } else if (field->offset == 0) {
10978 int addr_reg = mono_alloc_preg (cfg);
10979 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10981 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10982 MonoInst *iargs [2];
10984 g_assert (field->parent);
10985 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10986 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10987 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10989 MonoVTable *vtable = NULL;
10991 if (!cfg->compile_aot)
10992 vtable = mono_class_vtable (cfg->domain, klass);
10993 CHECK_TYPELOAD (klass);
10996 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10997 if (!(g_slist_find (class_inits, klass))) {
10998 emit_class_init (cfg, klass);
10999 if (cfg->verbose_level > 2)
11000 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11001 class_inits = g_slist_prepend (class_inits, klass);
11004 if (cfg->run_cctors) {
11005 /* This makes so that inline cannot trigger */
11006 /* .cctors: too many apps depend on them */
11007 /* running with a specific order... */
11009 if (! vtable->initialized)
11010 INLINE_FAILURE ("class init");
11011 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11012 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11013 goto exception_exit;
11017 if (cfg->compile_aot)
11018 EMIT_NEW_SFLDACONST (cfg, ins, field);
11021 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11023 EMIT_NEW_PCONST (cfg, ins, addr);
11026 MonoInst *iargs [1];
11027 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11028 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11032 /* Generate IR to do the actual load/store operation */
11034 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11035 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11036 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11039 if (op == CEE_LDSFLDA) {
11040 ins->klass = mono_class_from_mono_type (ftype);
11041 ins->type = STACK_PTR;
11043 } else if (op == CEE_STSFLD) {
11046 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11047 store->flags |= ins_flag;
11049 gboolean is_const = FALSE;
11050 MonoVTable *vtable = NULL;
11051 gpointer addr = NULL;
11053 if (!context_used) {
11054 vtable = mono_class_vtable (cfg->domain, klass);
11055 CHECK_TYPELOAD (klass);
11057 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11058 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11059 int ro_type = ftype->type;
11061 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11062 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11063 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11066 GSHAREDVT_FAILURE (op);
11068 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11071 case MONO_TYPE_BOOLEAN:
11073 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11077 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11080 case MONO_TYPE_CHAR:
11082 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11086 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11091 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11095 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11100 case MONO_TYPE_PTR:
11101 case MONO_TYPE_FNPTR:
11102 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11103 type_to_eval_stack_type ((cfg), field->type, *sp);
11106 case MONO_TYPE_STRING:
11107 case MONO_TYPE_OBJECT:
11108 case MONO_TYPE_CLASS:
11109 case MONO_TYPE_SZARRAY:
11110 case MONO_TYPE_ARRAY:
11111 if (!mono_gc_is_moving ()) {
11112 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11113 type_to_eval_stack_type ((cfg), field->type, *sp);
11121 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11126 case MONO_TYPE_VALUETYPE:
11136 CHECK_STACK_OVF (1);
11138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11139 load->flags |= ins_flag;
11145 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11146 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11147 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11158 token = read32 (ip + 1);
11159 klass = mini_get_class (method, token, generic_context);
11160 CHECK_TYPELOAD (klass);
11161 if (ins_flag & MONO_INST_VOLATILE) {
11162 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11163 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11165 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11166 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11167 ins->flags |= ins_flag;
11168 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11169 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11170 /* insert call to write barrier */
11171 emit_write_barrier (cfg, sp [0], sp [1]);
11183 const char *data_ptr;
11185 guint32 field_token;
11191 token = read32 (ip + 1);
11193 klass = mini_get_class (method, token, generic_context);
11194 CHECK_TYPELOAD (klass);
11196 context_used = mini_class_check_context_used (cfg, klass);
11198 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11199 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11200 ins->sreg1 = sp [0]->dreg;
11201 ins->type = STACK_I4;
11202 ins->dreg = alloc_ireg (cfg);
11203 MONO_ADD_INS (cfg->cbb, ins);
11204 *sp = mono_decompose_opcode (cfg, ins);
11207 if (context_used) {
11208 MonoInst *args [3];
11209 MonoClass *array_class = mono_array_class_get (klass, 1);
11210 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11212 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11215 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11216 array_class, MONO_RGCTX_INFO_VTABLE);
11221 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11223 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11225 if (cfg->opt & MONO_OPT_SHARED) {
11226 /* Decompose now to avoid problems with references to the domainvar */
11227 MonoInst *iargs [3];
11229 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11230 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11231 iargs [2] = sp [0];
11233 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11235 /* Decompose later since it is needed by abcrem */
11236 MonoClass *array_type = mono_array_class_get (klass, 1);
11237 mono_class_vtable (cfg->domain, array_type);
11238 CHECK_TYPELOAD (array_type);
11240 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11241 ins->dreg = alloc_ireg_ref (cfg);
11242 ins->sreg1 = sp [0]->dreg;
11243 ins->inst_newa_class = klass;
11244 ins->type = STACK_OBJ;
11245 ins->klass = array_type;
11246 MONO_ADD_INS (cfg->cbb, ins);
11247 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11248 cfg->cbb->has_array_access = TRUE;
11250 /* Needed so mono_emit_load_get_addr () gets called */
11251 mono_get_got_var (cfg);
11261 * we inline/optimize the initialization sequence if possible.
11262 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11263 * for small sizes open code the memcpy
11264 * ensure the rva field is big enough
11266 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11267 MonoMethod *memcpy_method = get_memcpy_method ();
11268 MonoInst *iargs [3];
11269 int add_reg = alloc_ireg_mp (cfg);
11271 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11272 if (cfg->compile_aot) {
11273 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11275 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11277 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11278 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11287 if (sp [0]->type != STACK_OBJ)
11290 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11291 ins->dreg = alloc_preg (cfg);
11292 ins->sreg1 = sp [0]->dreg;
11293 ins->type = STACK_I4;
11294 /* This flag will be inherited by the decomposition */
11295 ins->flags |= MONO_INST_FAULT;
11296 MONO_ADD_INS (cfg->cbb, ins);
11297 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11298 cfg->cbb->has_array_access = TRUE;
11306 if (sp [0]->type != STACK_OBJ)
11309 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11311 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11312 CHECK_TYPELOAD (klass);
11313 /* we need to make sure that this array is exactly the type it needs
11314 * to be for correctness. the wrappers are lax with their usage
11315 * so we need to ignore them here
11317 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11318 MonoClass *array_class = mono_array_class_get (klass, 1);
11319 mini_emit_check_array_type (cfg, sp [0], array_class);
11320 CHECK_TYPELOAD (array_class);
11324 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11329 case CEE_LDELEM_I1:
11330 case CEE_LDELEM_U1:
11331 case CEE_LDELEM_I2:
11332 case CEE_LDELEM_U2:
11333 case CEE_LDELEM_I4:
11334 case CEE_LDELEM_U4:
11335 case CEE_LDELEM_I8:
11337 case CEE_LDELEM_R4:
11338 case CEE_LDELEM_R8:
11339 case CEE_LDELEM_REF: {
11345 if (*ip == CEE_LDELEM) {
11347 token = read32 (ip + 1);
11348 klass = mini_get_class (method, token, generic_context);
11349 CHECK_TYPELOAD (klass);
11350 mono_class_init (klass);
11353 klass = array_access_to_klass (*ip);
11355 if (sp [0]->type != STACK_OBJ)
11358 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11360 if (mini_is_gsharedvt_variable_klass (klass)) {
11361 // FIXME-VT: OP_ICONST optimization
11362 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11363 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11364 ins->opcode = OP_LOADV_MEMBASE;
11365 } else if (sp [1]->opcode == OP_ICONST) {
11366 int array_reg = sp [0]->dreg;
11367 int index_reg = sp [1]->dreg;
11368 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11370 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11371 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11373 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11376 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11377 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11380 if (*ip == CEE_LDELEM)
11387 case CEE_STELEM_I1:
11388 case CEE_STELEM_I2:
11389 case CEE_STELEM_I4:
11390 case CEE_STELEM_I8:
11391 case CEE_STELEM_R4:
11392 case CEE_STELEM_R8:
11393 case CEE_STELEM_REF:
11398 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11400 if (*ip == CEE_STELEM) {
11402 token = read32 (ip + 1);
11403 klass = mini_get_class (method, token, generic_context);
11404 CHECK_TYPELOAD (klass);
11405 mono_class_init (klass);
11408 klass = array_access_to_klass (*ip);
11410 if (sp [0]->type != STACK_OBJ)
11413 emit_array_store (cfg, klass, sp, TRUE);
11415 if (*ip == CEE_STELEM)
11422 case CEE_CKFINITE: {
11426 if (cfg->llvm_only) {
11427 MonoInst *iargs [1];
11429 iargs [0] = sp [0];
11430 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11432 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11433 ins->sreg1 = sp [0]->dreg;
11434 ins->dreg = alloc_freg (cfg);
11435 ins->type = STACK_R8;
11436 MONO_ADD_INS (cfg->cbb, ins);
11438 *sp++ = mono_decompose_opcode (cfg, ins);
11444 case CEE_REFANYVAL: {
11445 MonoInst *src_var, *src;
11447 int klass_reg = alloc_preg (cfg);
11448 int dreg = alloc_preg (cfg);
11450 GSHAREDVT_FAILURE (*ip);
11453 MONO_INST_NEW (cfg, ins, *ip);
11456 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11457 CHECK_TYPELOAD (klass);
11459 context_used = mini_class_check_context_used (cfg, klass);
11462 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11464 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11465 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11468 if (context_used) {
11469 MonoInst *klass_ins;
11471 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11472 klass, MONO_RGCTX_INFO_KLASS);
11475 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11476 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11478 mini_emit_class_check (cfg, klass_reg, klass);
11480 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11481 ins->type = STACK_MP;
11482 ins->klass = klass;
11487 case CEE_MKREFANY: {
11488 MonoInst *loc, *addr;
11490 GSHAREDVT_FAILURE (*ip);
11493 MONO_INST_NEW (cfg, ins, *ip);
11496 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11497 CHECK_TYPELOAD (klass);
11499 context_used = mini_class_check_context_used (cfg, klass);
11501 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11502 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11504 if (context_used) {
11505 MonoInst *const_ins;
11506 int type_reg = alloc_preg (cfg);
11508 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11513 int const_reg = alloc_preg (cfg);
11514 int type_reg = alloc_preg (cfg);
11516 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11521 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11523 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11524 ins->type = STACK_VTYPE;
11525 ins->klass = mono_defaults.typed_reference_class;
11530 case CEE_LDTOKEN: {
11532 MonoClass *handle_class;
11534 CHECK_STACK_OVF (1);
11537 n = read32 (ip + 1);
11539 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11540 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11541 handle = mono_method_get_wrapper_data (method, n);
11542 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11543 if (handle_class == mono_defaults.typehandle_class)
11544 handle = &((MonoClass*)handle)->byval_arg;
11547 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11552 mono_class_init (handle_class);
11553 if (cfg->gshared) {
11554 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11555 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11556 /* This case handles ldtoken
11557 of an open type, like for
11560 } else if (handle_class == mono_defaults.typehandle_class) {
11561 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11562 } else if (handle_class == mono_defaults.fieldhandle_class)
11563 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11564 else if (handle_class == mono_defaults.methodhandle_class)
11565 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11567 g_assert_not_reached ();
11570 if ((cfg->opt & MONO_OPT_SHARED) &&
11571 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11572 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11573 MonoInst *addr, *vtvar, *iargs [3];
11574 int method_context_used;
11576 method_context_used = mini_method_check_context_used (cfg, method);
11578 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11580 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11581 EMIT_NEW_ICONST (cfg, iargs [1], n);
11582 if (method_context_used) {
11583 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11584 method, MONO_RGCTX_INFO_METHOD);
11585 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11587 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11588 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11590 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11592 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11594 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11596 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11597 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11598 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11599 (cmethod->klass == mono_defaults.systemtype_class) &&
11600 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11601 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11603 mono_class_init (tclass);
11604 if (context_used) {
11605 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11606 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11607 } else if (cfg->compile_aot) {
11608 if (method->wrapper_type) {
11609 error_init (&error); //got to do it since there are multiple conditionals below
11610 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11611 /* Special case for static synchronized wrappers */
11612 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11614 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11615 /* FIXME: n is not a normal token */
11617 EMIT_NEW_PCONST (cfg, ins, NULL);
11620 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11623 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11625 EMIT_NEW_PCONST (cfg, ins, rt);
11627 ins->type = STACK_OBJ;
11628 ins->klass = cmethod->klass;
11631 MonoInst *addr, *vtvar;
11633 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11635 if (context_used) {
11636 if (handle_class == mono_defaults.typehandle_class) {
11637 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11638 mono_class_from_mono_type ((MonoType *)handle),
11639 MONO_RGCTX_INFO_TYPE);
11640 } else if (handle_class == mono_defaults.methodhandle_class) {
11641 ins = emit_get_rgctx_method (cfg, context_used,
11642 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11643 } else if (handle_class == mono_defaults.fieldhandle_class) {
11644 ins = emit_get_rgctx_field (cfg, context_used,
11645 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11647 g_assert_not_reached ();
11649 } else if (cfg->compile_aot) {
11650 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11652 EMIT_NEW_PCONST (cfg, ins, handle);
11654 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11656 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11666 if (sp [-1]->type != STACK_OBJ)
11669 MONO_INST_NEW (cfg, ins, OP_THROW);
11671 ins->sreg1 = sp [0]->dreg;
11673 cfg->cbb->out_of_line = TRUE;
11674 MONO_ADD_INS (cfg->cbb, ins);
11675 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11676 MONO_ADD_INS (cfg->cbb, ins);
11679 link_bblock (cfg, cfg->cbb, end_bblock);
11680 start_new_bblock = 1;
11681 /* This can complicate code generation for llvm since the return value might not be defined */
11682 if (COMPILE_LLVM (cfg))
11683 INLINE_FAILURE ("throw");
11685 case CEE_ENDFINALLY:
11686 if (!ip_in_finally_clause (cfg, ip - header->code))
11688 /* mono_save_seq_point_info () depends on this */
11689 if (sp != stack_start)
11690 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11691 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11692 MONO_ADD_INS (cfg->cbb, ins);
11694 start_new_bblock = 1;
11697 * Control will leave the method so empty the stack, otherwise
11698 * the next basic block will start with a nonempty stack.
11700 while (sp != stack_start) {
11705 case CEE_LEAVE_S: {
11708 if (*ip == CEE_LEAVE) {
11710 target = ip + 5 + (gint32)read32(ip + 1);
11713 target = ip + 2 + (signed char)(ip [1]);
11716 /* empty the stack */
11717 while (sp != stack_start) {
11722 * If this leave statement is in a catch block, check for a
11723 * pending exception, and rethrow it if necessary.
11724 * We avoid doing this in runtime invoke wrappers, since those are called
11725 * by native code which excepts the wrapper to catch all exceptions.
11727 for (i = 0; i < header->num_clauses; ++i) {
11728 MonoExceptionClause *clause = &header->clauses [i];
11731 * Use <= in the final comparison to handle clauses with multiple
11732 * leave statements, like in bug #78024.
11733 * The ordering of the exception clauses guarantees that we find the
11734 * innermost clause.
11736 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11738 MonoBasicBlock *dont_throw;
11743 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11746 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11748 NEW_BBLOCK (cfg, dont_throw);
11751 * Currently, we always rethrow the abort exception, despite the
11752 * fact that this is not correct. See thread6.cs for an example.
11753 * But propagating the abort exception is more important than
11754 * getting the sematics right.
11756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11757 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11758 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11760 MONO_START_BB (cfg, dont_throw);
11765 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11768 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11770 MonoExceptionClause *clause;
11772 for (tmp = handlers; tmp; tmp = tmp->next) {
11773 clause = (MonoExceptionClause *)tmp->data;
11774 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11776 link_bblock (cfg, cfg->cbb, tblock);
11777 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11778 ins->inst_target_bb = tblock;
11779 ins->inst_eh_block = clause;
11780 MONO_ADD_INS (cfg->cbb, ins);
11781 cfg->cbb->has_call_handler = 1;
11782 if (COMPILE_LLVM (cfg)) {
11783 MonoBasicBlock *target_bb;
11786 * Link the finally bblock with the target, since it will
11787 * conceptually branch there.
11789 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11790 GET_BBLOCK (cfg, target_bb, target);
11791 link_bblock (cfg, tblock, target_bb);
11794 g_list_free (handlers);
11797 MONO_INST_NEW (cfg, ins, OP_BR);
11798 MONO_ADD_INS (cfg->cbb, ins);
11799 GET_BBLOCK (cfg, tblock, target);
11800 link_bblock (cfg, cfg->cbb, tblock);
11801 ins->inst_target_bb = tblock;
11803 start_new_bblock = 1;
11805 if (*ip == CEE_LEAVE)
11814 * Mono specific opcodes
11816 case MONO_CUSTOM_PREFIX: {
11818 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11822 case CEE_MONO_ICALL: {
11824 MonoJitICallInfo *info;
11826 token = read32 (ip + 2);
11827 func = mono_method_get_wrapper_data (method, token);
11828 info = mono_find_jit_icall_by_addr (func);
11830 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11833 CHECK_STACK (info->sig->param_count);
11834 sp -= info->sig->param_count;
11836 ins = mono_emit_jit_icall (cfg, info->func, sp);
11837 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11841 inline_costs += 10 * num_calls++;
11845 case CEE_MONO_LDPTR_CARD_TABLE:
11846 case CEE_MONO_LDPTR_NURSERY_START:
11847 case CEE_MONO_LDPTR_NURSERY_BITS:
11848 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11849 CHECK_STACK_OVF (1);
11852 case CEE_MONO_LDPTR_CARD_TABLE:
11853 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11855 case CEE_MONO_LDPTR_NURSERY_START:
11856 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11858 case CEE_MONO_LDPTR_NURSERY_BITS:
11859 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11861 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11862 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11868 inline_costs += 10 * num_calls++;
11871 case CEE_MONO_LDPTR: {
11874 CHECK_STACK_OVF (1);
11876 token = read32 (ip + 2);
11878 ptr = mono_method_get_wrapper_data (method, token);
11879 EMIT_NEW_PCONST (cfg, ins, ptr);
11882 inline_costs += 10 * num_calls++;
11883 /* Can't embed random pointers into AOT code */
11887 case CEE_MONO_JIT_ICALL_ADDR: {
11888 MonoJitICallInfo *callinfo;
11891 CHECK_STACK_OVF (1);
11893 token = read32 (ip + 2);
11895 ptr = mono_method_get_wrapper_data (method, token);
11896 callinfo = mono_find_jit_icall_by_addr (ptr);
11897 g_assert (callinfo);
11898 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11901 inline_costs += 10 * num_calls++;
11904 case CEE_MONO_ICALL_ADDR: {
11905 MonoMethod *cmethod;
11908 CHECK_STACK_OVF (1);
11910 token = read32 (ip + 2);
11912 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11914 if (cfg->compile_aot) {
11915 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11917 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11918 * before the call, its not needed when using direct pinvoke.
11919 * This is not an optimization, but its used to avoid looking up pinvokes
11920 * on platforms which don't support dlopen ().
11922 EMIT_NEW_PCONST (cfg, ins, NULL);
11924 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11927 ptr = mono_lookup_internal_call (cmethod);
11929 EMIT_NEW_PCONST (cfg, ins, ptr);
11935 case CEE_MONO_VTADDR: {
11936 MonoInst *src_var, *src;
11942 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11943 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11948 case CEE_MONO_NEWOBJ: {
11949 MonoInst *iargs [2];
11951 CHECK_STACK_OVF (1);
11953 token = read32 (ip + 2);
11954 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11955 mono_class_init (klass);
11956 NEW_DOMAINCONST (cfg, iargs [0]);
11957 MONO_ADD_INS (cfg->cbb, iargs [0]);
11958 NEW_CLASSCONST (cfg, iargs [1], klass);
11959 MONO_ADD_INS (cfg->cbb, iargs [1]);
11960 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11962 inline_costs += 10 * num_calls++;
11965 case CEE_MONO_OBJADDR:
11968 MONO_INST_NEW (cfg, ins, OP_MOVE);
11969 ins->dreg = alloc_ireg_mp (cfg);
11970 ins->sreg1 = sp [0]->dreg;
11971 ins->type = STACK_MP;
11972 MONO_ADD_INS (cfg->cbb, ins);
11976 case CEE_MONO_LDNATIVEOBJ:
11978 * Similar to LDOBJ, but instead load the unmanaged
11979 * representation of the vtype to the stack.
11984 token = read32 (ip + 2);
11985 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11986 g_assert (klass->valuetype);
11987 mono_class_init (klass);
11990 MonoInst *src, *dest, *temp;
11993 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11994 temp->backend.is_pinvoke = 1;
11995 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11996 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11998 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11999 dest->type = STACK_VTYPE;
12000 dest->klass = klass;
12006 case CEE_MONO_RETOBJ: {
12008 * Same as RET, but return the native representation of a vtype
12011 g_assert (cfg->ret);
12012 g_assert (mono_method_signature (method)->pinvoke);
12017 token = read32 (ip + 2);
12018 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12020 if (!cfg->vret_addr) {
12021 g_assert (cfg->ret_var_is_local);
12023 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12025 EMIT_NEW_RETLOADA (cfg, ins);
12027 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12029 if (sp != stack_start)
12032 MONO_INST_NEW (cfg, ins, OP_BR);
12033 ins->inst_target_bb = end_bblock;
12034 MONO_ADD_INS (cfg->cbb, ins);
12035 link_bblock (cfg, cfg->cbb, end_bblock);
12036 start_new_bblock = 1;
12040 case CEE_MONO_SAVE_LMF:
12041 case CEE_MONO_RESTORE_LMF:
12044 case CEE_MONO_CLASSCONST:
12045 CHECK_STACK_OVF (1);
12047 token = read32 (ip + 2);
12048 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12051 inline_costs += 10 * num_calls++;
12053 case CEE_MONO_NOT_TAKEN:
12054 cfg->cbb->out_of_line = TRUE;
12057 case CEE_MONO_TLS: {
12060 CHECK_STACK_OVF (1);
12062 key = (MonoTlsKey)read32 (ip + 2);
12063 g_assert (key < TLS_KEY_NUM);
12065 ins = mono_create_tls_get (cfg, key);
12067 ins->type = STACK_PTR;
12072 case CEE_MONO_DYN_CALL: {
12073 MonoCallInst *call;
12075 /* It would be easier to call a trampoline, but that would put an
12076 * extra frame on the stack, confusing exception handling. So
12077 * implement it inline using an opcode for now.
12080 if (!cfg->dyn_call_var) {
12081 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12082 /* prevent it from being register allocated */
12083 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12086 /* Has to use a call inst since it local regalloc expects it */
12087 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12088 ins = (MonoInst*)call;
12090 ins->sreg1 = sp [0]->dreg;
12091 ins->sreg2 = sp [1]->dreg;
12092 MONO_ADD_INS (cfg->cbb, ins);
12094 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12097 inline_costs += 10 * num_calls++;
12101 case CEE_MONO_MEMORY_BARRIER: {
12103 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12107 case CEE_MONO_ATOMIC_STORE_I4: {
12108 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12114 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12115 ins->dreg = sp [0]->dreg;
12116 ins->sreg1 = sp [1]->dreg;
12117 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12118 MONO_ADD_INS (cfg->cbb, ins);
12123 case CEE_MONO_JIT_ATTACH: {
12124 MonoInst *args [16], *domain_ins;
12125 MonoInst *ad_ins, *jit_tls_ins;
12126 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12128 g_assert (!mono_threads_is_coop_enabled ());
12130 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12132 EMIT_NEW_PCONST (cfg, ins, NULL);
12133 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12135 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12136 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12138 if (ad_ins && jit_tls_ins) {
12139 NEW_BBLOCK (cfg, next_bb);
12140 NEW_BBLOCK (cfg, call_bb);
12142 if (cfg->compile_aot) {
12143 /* AOT code is only used in the root domain */
12144 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12146 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12148 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12155 MONO_START_BB (cfg, call_bb);
12158 /* AOT code is only used in the root domain */
12159 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12160 if (cfg->compile_aot) {
12164 * This is called on unattached threads, so it cannot go through the trampoline
12165 * infrastructure. Use an indirect call through a got slot initialized at load time
12168 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12169 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12171 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12173 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12176 MONO_START_BB (cfg, next_bb);
12181 case CEE_MONO_JIT_DETACH: {
12182 MonoInst *args [16];
12184 /* Restore the original domain */
12185 dreg = alloc_ireg (cfg);
12186 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12187 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12191 case CEE_MONO_CALLI_EXTRA_ARG: {
12193 MonoMethodSignature *fsig;
12197 * This is the same as CEE_CALLI, but passes an additional argument
12198 * to the called method in llvmonly mode.
12199 * This is only used by delegate invoke wrappers to call the
12200 * actual delegate method.
12202 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12205 token = read32 (ip + 2);
12213 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12216 if (cfg->llvm_only)
12217 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12219 n = fsig->param_count + fsig->hasthis + 1;
12226 if (cfg->llvm_only) {
12228 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12229 * cconv. This is set by mono_init_delegate ().
12231 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12232 MonoInst *callee = addr;
12233 MonoInst *call, *localloc_ins;
12234 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12235 int low_bit_reg = alloc_preg (cfg);
12237 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12238 NEW_BBLOCK (cfg, end_bb);
12240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12244 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12245 addr = emit_get_rgctx_sig (cfg, context_used,
12246 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12248 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12250 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12251 ins->dreg = alloc_preg (cfg);
12252 ins->inst_imm = 2 * SIZEOF_VOID_P;
12253 MONO_ADD_INS (cfg->cbb, ins);
12254 localloc_ins = ins;
12255 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12256 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12257 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12259 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12262 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12263 MONO_START_BB (cfg, is_gsharedvt_bb);
12264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12265 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12266 ins->dreg = call->dreg;
12268 MONO_START_BB (cfg, end_bb);
12270 /* Caller uses a normal calling conv */
12272 MonoInst *callee = addr;
12273 MonoInst *call, *localloc_ins;
12274 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12275 int low_bit_reg = alloc_preg (cfg);
12277 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12278 NEW_BBLOCK (cfg, end_bb);
12280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12284 /* Normal case: callee uses a normal cconv, no conversion is needed */
12285 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12287 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12288 MONO_START_BB (cfg, is_gsharedvt_bb);
12289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12290 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12291 MONO_ADD_INS (cfg->cbb, addr);
12293 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12295 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12296 ins->dreg = alloc_preg (cfg);
12297 ins->inst_imm = 2 * SIZEOF_VOID_P;
12298 MONO_ADD_INS (cfg->cbb, ins);
12299 localloc_ins = ins;
12300 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12301 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12304 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12305 ins->dreg = call->dreg;
12306 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12308 MONO_START_BB (cfg, end_bb);
12311 /* Same as CEE_CALLI */
12312 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12314 * We pass the address to the gsharedvt trampoline in the rgctx reg
12316 MonoInst *callee = addr;
12318 addr = emit_get_rgctx_sig (cfg, context_used,
12319 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12320 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12322 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12326 if (!MONO_TYPE_IS_VOID (fsig->ret))
12327 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12329 CHECK_CFG_EXCEPTION;
12333 constrained_class = NULL;
12336 case CEE_MONO_LDDOMAIN:
12337 CHECK_STACK_OVF (1);
12338 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12342 case CEE_MONO_GET_LAST_ERROR:
12344 CHECK_STACK_OVF (1);
12346 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12347 ins->dreg = alloc_dreg (cfg, STACK_I4);
12348 ins->type = STACK_I4;
12349 MONO_ADD_INS (cfg->cbb, ins);
12354 case CEE_MONO_GET_RGCTX_ARG:
12356 CHECK_STACK_OVF (1);
12358 mono_create_rgctx_var (cfg);
12360 MONO_INST_NEW (cfg, ins, OP_MOVE);
12361 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12362 ins->sreg1 = cfg->rgctx_var->dreg;
12363 ins->type = STACK_PTR;
12364 MONO_ADD_INS (cfg->cbb, ins);
12370 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12376 case CEE_PREFIX1: {
12379 case CEE_ARGLIST: {
12380 /* somewhat similar to LDTOKEN */
12381 MonoInst *addr, *vtvar;
12382 CHECK_STACK_OVF (1);
12383 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12385 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12386 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12388 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12389 ins->type = STACK_VTYPE;
12390 ins->klass = mono_defaults.argumenthandle_class;
12400 MonoInst *cmp, *arg1, *arg2;
12408 * The following transforms:
12409 * CEE_CEQ into OP_CEQ
12410 * CEE_CGT into OP_CGT
12411 * CEE_CGT_UN into OP_CGT_UN
12412 * CEE_CLT into OP_CLT
12413 * CEE_CLT_UN into OP_CLT_UN
12415 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12417 MONO_INST_NEW (cfg, ins, cmp->opcode);
12418 cmp->sreg1 = arg1->dreg;
12419 cmp->sreg2 = arg2->dreg;
12420 type_from_op (cfg, cmp, arg1, arg2);
12422 add_widen_op (cfg, cmp, &arg1, &arg2);
12423 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12424 cmp->opcode = OP_LCOMPARE;
12425 else if (arg1->type == STACK_R4)
12426 cmp->opcode = OP_RCOMPARE;
12427 else if (arg1->type == STACK_R8)
12428 cmp->opcode = OP_FCOMPARE;
12430 cmp->opcode = OP_ICOMPARE;
12431 MONO_ADD_INS (cfg->cbb, cmp);
12432 ins->type = STACK_I4;
12433 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12434 type_from_op (cfg, ins, arg1, arg2);
12436 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12438 * The backends expect the fceq opcodes to do the
12441 ins->sreg1 = cmp->sreg1;
12442 ins->sreg2 = cmp->sreg2;
12445 MONO_ADD_INS (cfg->cbb, ins);
12451 MonoInst *argconst;
12452 MonoMethod *cil_method;
12454 CHECK_STACK_OVF (1);
12456 n = read32 (ip + 2);
12457 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12460 mono_class_init (cmethod->klass);
12462 mono_save_token_info (cfg, image, n, cmethod);
12464 context_used = mini_method_check_context_used (cfg, cmethod);
12466 cil_method = cmethod;
12467 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12468 emit_method_access_failure (cfg, method, cil_method);
12470 if (mono_security_core_clr_enabled ())
12471 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12474 * Optimize the common case of ldftn+delegate creation
12476 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12477 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12478 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12479 MonoInst *target_ins, *handle_ins;
12480 MonoMethod *invoke;
12481 int invoke_context_used;
12483 invoke = mono_get_delegate_invoke (ctor_method->klass);
12484 if (!invoke || !mono_method_signature (invoke))
12487 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12489 target_ins = sp [-1];
12491 if (mono_security_core_clr_enabled ())
12492 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12494 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12495 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12496 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12498 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12502 /* FIXME: SGEN support */
12503 if (invoke_context_used == 0 || cfg->llvm_only) {
12505 if (cfg->verbose_level > 3)
12506 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12507 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12510 CHECK_CFG_EXCEPTION;
12520 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12521 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12525 inline_costs += 10 * num_calls++;
12528 case CEE_LDVIRTFTN: {
12529 MonoInst *args [2];
12533 n = read32 (ip + 2);
12534 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12537 mono_class_init (cmethod->klass);
12539 context_used = mini_method_check_context_used (cfg, cmethod);
12541 if (mono_security_core_clr_enabled ())
12542 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12545 * Optimize the common case of ldvirtftn+delegate creation
12547 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12548 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12549 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12550 MonoInst *target_ins, *handle_ins;
12551 MonoMethod *invoke;
12552 int invoke_context_used;
12553 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12555 invoke = mono_get_delegate_invoke (ctor_method->klass);
12556 if (!invoke || !mono_method_signature (invoke))
12559 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12561 target_ins = sp [-1];
12563 if (mono_security_core_clr_enabled ())
12564 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12566 /* FIXME: SGEN support */
12567 if (invoke_context_used == 0 || cfg->llvm_only) {
12569 if (cfg->verbose_level > 3)
12570 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12571 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12574 CHECK_CFG_EXCEPTION;
12587 args [1] = emit_get_rgctx_method (cfg, context_used,
12588 cmethod, MONO_RGCTX_INFO_METHOD);
12591 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12593 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12596 inline_costs += 10 * num_calls++;
12600 CHECK_STACK_OVF (1);
12602 n = read16 (ip + 2);
12604 EMIT_NEW_ARGLOAD (cfg, ins, n);
12609 CHECK_STACK_OVF (1);
12611 n = read16 (ip + 2);
12613 NEW_ARGLOADA (cfg, ins, n);
12614 MONO_ADD_INS (cfg->cbb, ins);
12622 n = read16 (ip + 2);
12624 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12626 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12630 CHECK_STACK_OVF (1);
12632 n = read16 (ip + 2);
12634 EMIT_NEW_LOCLOAD (cfg, ins, n);
12639 unsigned char *tmp_ip;
12640 CHECK_STACK_OVF (1);
12642 n = read16 (ip + 2);
12645 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12651 EMIT_NEW_LOCLOADA (cfg, ins, n);
12660 n = read16 (ip + 2);
12662 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12664 emit_stloc_ir (cfg, sp, header, n);
12668 case CEE_LOCALLOC: {
12670 MonoBasicBlock *non_zero_bb, *end_bb;
12671 int alloc_ptr = alloc_preg (cfg);
12673 if (sp != stack_start)
12675 if (cfg->method != method)
12677 * Inlining this into a loop in a parent could lead to
12678 * stack overflows which is different behavior than the
12679 * non-inlined case, thus disable inlining in this case.
12681 INLINE_FAILURE("localloc");
12683 NEW_BBLOCK (cfg, non_zero_bb);
12684 NEW_BBLOCK (cfg, end_bb);
12686 /* if size != zero */
12687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12690 //size is zero, so result is NULL
12691 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12694 MONO_START_BB (cfg, non_zero_bb);
12695 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12696 ins->dreg = alloc_ptr;
12697 ins->sreg1 = sp [0]->dreg;
12698 ins->type = STACK_PTR;
12699 MONO_ADD_INS (cfg->cbb, ins);
12701 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12703 ins->flags |= MONO_INST_INIT;
12705 MONO_START_BB (cfg, end_bb);
12706 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12707 ins->type = STACK_PTR;
12713 case CEE_ENDFILTER: {
12714 MonoExceptionClause *clause, *nearest;
12719 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12721 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12722 ins->sreg1 = (*sp)->dreg;
12723 MONO_ADD_INS (cfg->cbb, ins);
12724 start_new_bblock = 1;
12728 for (cc = 0; cc < header->num_clauses; ++cc) {
12729 clause = &header->clauses [cc];
12730 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12731 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12732 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12735 g_assert (nearest);
12736 if ((ip - header->code) != nearest->handler_offset)
12741 case CEE_UNALIGNED_:
12742 ins_flag |= MONO_INST_UNALIGNED;
12743 /* FIXME: record alignment? we can assume 1 for now */
12747 case CEE_VOLATILE_:
12748 ins_flag |= MONO_INST_VOLATILE;
12752 ins_flag |= MONO_INST_TAILCALL;
12753 cfg->flags |= MONO_CFG_HAS_TAIL;
12754 /* Can't inline tail calls at this time */
12755 inline_costs += 100000;
12762 token = read32 (ip + 2);
12763 klass = mini_get_class (method, token, generic_context);
12764 CHECK_TYPELOAD (klass);
12765 if (generic_class_is_reference_type (cfg, klass))
12766 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12768 mini_emit_initobj (cfg, *sp, NULL, klass);
12772 case CEE_CONSTRAINED_:
12774 token = read32 (ip + 2);
12775 constrained_class = mini_get_class (method, token, generic_context);
12776 CHECK_TYPELOAD (constrained_class);
12780 case CEE_INITBLK: {
12781 MonoInst *iargs [3];
12785 /* Skip optimized paths for volatile operations. */
12786 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12787 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12788 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12789 /* emit_memset only works when val == 0 */
12790 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12793 iargs [0] = sp [0];
12794 iargs [1] = sp [1];
12795 iargs [2] = sp [2];
12796 if (ip [1] == CEE_CPBLK) {
12798 * FIXME: It's unclear whether we should be emitting both the acquire
12799 * and release barriers for cpblk. It is technically both a load and
12800 * store operation, so it seems like that's the sensible thing to do.
12802 * FIXME: We emit full barriers on both sides of the operation for
12803 * simplicity. We should have a separate atomic memcpy method instead.
12805 MonoMethod *memcpy_method = get_memcpy_method ();
12807 if (ins_flag & MONO_INST_VOLATILE)
12808 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12810 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12811 call->flags |= ins_flag;
12813 if (ins_flag & MONO_INST_VOLATILE)
12814 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12816 MonoMethod *memset_method = get_memset_method ();
12817 if (ins_flag & MONO_INST_VOLATILE) {
12818 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12819 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12821 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12822 call->flags |= ins_flag;
12833 ins_flag |= MONO_INST_NOTYPECHECK;
12835 ins_flag |= MONO_INST_NORANGECHECK;
12836 /* we ignore the no-nullcheck for now since we
12837 * really do it explicitly only when doing callvirt->call
12841 case CEE_RETHROW: {
12843 int handler_offset = -1;
12845 for (i = 0; i < header->num_clauses; ++i) {
12846 MonoExceptionClause *clause = &header->clauses [i];
12847 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12848 handler_offset = clause->handler_offset;
12853 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12855 if (handler_offset == -1)
12858 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12859 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12860 ins->sreg1 = load->dreg;
12861 MONO_ADD_INS (cfg->cbb, ins);
12863 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12864 MONO_ADD_INS (cfg->cbb, ins);
12867 link_bblock (cfg, cfg->cbb, end_bblock);
12868 start_new_bblock = 1;
12876 CHECK_STACK_OVF (1);
12878 token = read32 (ip + 2);
12879 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12880 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12883 val = mono_type_size (type, &ialign);
12885 MonoClass *klass = mini_get_class (method, token, generic_context);
12886 CHECK_TYPELOAD (klass);
12888 val = mono_type_size (&klass->byval_arg, &ialign);
12890 if (mini_is_gsharedvt_klass (klass))
12891 GSHAREDVT_FAILURE (*ip);
12893 EMIT_NEW_ICONST (cfg, ins, val);
12898 case CEE_REFANYTYPE: {
12899 MonoInst *src_var, *src;
12901 GSHAREDVT_FAILURE (*ip);
12907 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12909 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12910 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12911 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12916 case CEE_READONLY_:
12929 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12939 g_warning ("opcode 0x%02x not handled", *ip);
12943 if (start_new_bblock != 1)
12946 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12947 if (cfg->cbb->next_bb) {
12948 /* This could already be set because of inlining, #693905 */
12949 MonoBasicBlock *bb = cfg->cbb;
12951 while (bb->next_bb)
12953 bb->next_bb = end_bblock;
12955 cfg->cbb->next_bb = end_bblock;
12958 if (cfg->method == method && cfg->domainvar) {
12960 MonoInst *get_domain;
12962 cfg->cbb = init_localsbb;
12964 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12965 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12966 MONO_ADD_INS (cfg->cbb, store);
12969 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12970 if (cfg->compile_aot)
12971 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12972 mono_get_got_var (cfg);
12975 if (cfg->method == method && cfg->got_var)
12976 mono_emit_load_got_addr (cfg);
12978 if (init_localsbb) {
12979 cfg->cbb = init_localsbb;
12981 for (i = 0; i < header->num_locals; ++i) {
12982 emit_init_local (cfg, i, header->locals [i], init_locals);
12986 if (cfg->init_ref_vars && cfg->method == method) {
12987 /* Emit initialization for ref vars */
12988 // FIXME: Avoid duplication initialization for IL locals.
12989 for (i = 0; i < cfg->num_varinfo; ++i) {
12990 MonoInst *ins = cfg->varinfo [i];
12992 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12993 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12997 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12998 cfg->cbb = init_localsbb;
12999 emit_push_lmf (cfg);
13002 cfg->cbb = init_localsbb;
13003 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13006 MonoBasicBlock *bb;
13009 * Make seq points at backward branch targets interruptable.
13011 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13012 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13013 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13016 /* Add a sequence point for method entry/exit events */
13017 if (seq_points && cfg->gen_sdb_seq_points) {
13018 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13019 MONO_ADD_INS (init_localsbb, ins);
13020 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13021 MONO_ADD_INS (cfg->bb_exit, ins);
13025 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13026 * the code they refer to was dead (#11880).
13028 if (sym_seq_points) {
13029 for (i = 0; i < header->code_size; ++i) {
13030 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13033 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13034 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13041 if (cfg->method == method) {
13042 MonoBasicBlock *bb;
13043 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13044 if (bb == cfg->bb_init)
13047 bb->region = mono_find_block_region (cfg, bb->real_offset);
13049 mono_create_spvar_for_region (cfg, bb->region);
13050 if (cfg->verbose_level > 2)
13051 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13054 MonoBasicBlock *bb;
13055 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13056 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13057 bb->real_offset = inline_offset;
13061 if (inline_costs < 0) {
13064 /* Method is too large */
13065 mname = mono_method_full_name (method, TRUE);
13066 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13070 if ((cfg->verbose_level > 2) && (cfg->method == method))
13071 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13076 g_assert (!mono_error_ok (&cfg->error));
13080 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13084 set_exception_type_from_invalid_il (cfg, method, ip);
13088 g_slist_free (class_inits);
13089 mono_basic_block_free (original_bb);
13090 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13091 if (cfg->exception_type)
13094 return inline_costs;
13098 store_membase_reg_to_store_membase_imm (int opcode)
13101 case OP_STORE_MEMBASE_REG:
13102 return OP_STORE_MEMBASE_IMM;
13103 case OP_STOREI1_MEMBASE_REG:
13104 return OP_STOREI1_MEMBASE_IMM;
13105 case OP_STOREI2_MEMBASE_REG:
13106 return OP_STOREI2_MEMBASE_IMM;
13107 case OP_STOREI4_MEMBASE_REG:
13108 return OP_STOREI4_MEMBASE_IMM;
13109 case OP_STOREI8_MEMBASE_REG:
13110 return OP_STOREI8_MEMBASE_IMM;
13112 g_assert_not_reached ();
13119 mono_op_to_op_imm (int opcode)
13123 return OP_IADD_IMM;
13125 return OP_ISUB_IMM;
13127 return OP_IDIV_IMM;
13129 return OP_IDIV_UN_IMM;
13131 return OP_IREM_IMM;
13133 return OP_IREM_UN_IMM;
13135 return OP_IMUL_IMM;
13137 return OP_IAND_IMM;
13141 return OP_IXOR_IMM;
13143 return OP_ISHL_IMM;
13145 return OP_ISHR_IMM;
13147 return OP_ISHR_UN_IMM;
13150 return OP_LADD_IMM;
13152 return OP_LSUB_IMM;
13154 return OP_LAND_IMM;
13158 return OP_LXOR_IMM;
13160 return OP_LSHL_IMM;
13162 return OP_LSHR_IMM;
13164 return OP_LSHR_UN_IMM;
13165 #if SIZEOF_REGISTER == 8
13167 return OP_LREM_IMM;
13171 return OP_COMPARE_IMM;
13173 return OP_ICOMPARE_IMM;
13175 return OP_LCOMPARE_IMM;
13177 case OP_STORE_MEMBASE_REG:
13178 return OP_STORE_MEMBASE_IMM;
13179 case OP_STOREI1_MEMBASE_REG:
13180 return OP_STOREI1_MEMBASE_IMM;
13181 case OP_STOREI2_MEMBASE_REG:
13182 return OP_STOREI2_MEMBASE_IMM;
13183 case OP_STOREI4_MEMBASE_REG:
13184 return OP_STOREI4_MEMBASE_IMM;
13186 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13188 return OP_X86_PUSH_IMM;
13189 case OP_X86_COMPARE_MEMBASE_REG:
13190 return OP_X86_COMPARE_MEMBASE_IMM;
13192 #if defined(TARGET_AMD64)
13193 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13194 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13196 case OP_VOIDCALL_REG:
13197 return OP_VOIDCALL;
13205 return OP_LOCALLOC_IMM;
13212 ldind_to_load_membase (int opcode)
13216 return OP_LOADI1_MEMBASE;
13218 return OP_LOADU1_MEMBASE;
13220 return OP_LOADI2_MEMBASE;
13222 return OP_LOADU2_MEMBASE;
13224 return OP_LOADI4_MEMBASE;
13226 return OP_LOADU4_MEMBASE;
13228 return OP_LOAD_MEMBASE;
13229 case CEE_LDIND_REF:
13230 return OP_LOAD_MEMBASE;
13232 return OP_LOADI8_MEMBASE;
13234 return OP_LOADR4_MEMBASE;
13236 return OP_LOADR8_MEMBASE;
13238 g_assert_not_reached ();
13245 stind_to_store_membase (int opcode)
13249 return OP_STOREI1_MEMBASE_REG;
13251 return OP_STOREI2_MEMBASE_REG;
13253 return OP_STOREI4_MEMBASE_REG;
13255 case CEE_STIND_REF:
13256 return OP_STORE_MEMBASE_REG;
13258 return OP_STOREI8_MEMBASE_REG;
13260 return OP_STORER4_MEMBASE_REG;
13262 return OP_STORER8_MEMBASE_REG;
13264 g_assert_not_reached ();
13271 mono_load_membase_to_load_mem (int opcode)
13273 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13274 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13276 case OP_LOAD_MEMBASE:
13277 return OP_LOAD_MEM;
13278 case OP_LOADU1_MEMBASE:
13279 return OP_LOADU1_MEM;
13280 case OP_LOADU2_MEMBASE:
13281 return OP_LOADU2_MEM;
13282 case OP_LOADI4_MEMBASE:
13283 return OP_LOADI4_MEM;
13284 case OP_LOADU4_MEMBASE:
13285 return OP_LOADU4_MEM;
13286 #if SIZEOF_REGISTER == 8
13287 case OP_LOADI8_MEMBASE:
13288 return OP_LOADI8_MEM;
13297 op_to_op_dest_membase (int store_opcode, int opcode)
13299 #if defined(TARGET_X86)
13300 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13305 return OP_X86_ADD_MEMBASE_REG;
13307 return OP_X86_SUB_MEMBASE_REG;
13309 return OP_X86_AND_MEMBASE_REG;
13311 return OP_X86_OR_MEMBASE_REG;
13313 return OP_X86_XOR_MEMBASE_REG;
13316 return OP_X86_ADD_MEMBASE_IMM;
13319 return OP_X86_SUB_MEMBASE_IMM;
13322 return OP_X86_AND_MEMBASE_IMM;
13325 return OP_X86_OR_MEMBASE_IMM;
13328 return OP_X86_XOR_MEMBASE_IMM;
13334 #if defined(TARGET_AMD64)
13335 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13340 return OP_X86_ADD_MEMBASE_REG;
13342 return OP_X86_SUB_MEMBASE_REG;
13344 return OP_X86_AND_MEMBASE_REG;
13346 return OP_X86_OR_MEMBASE_REG;
13348 return OP_X86_XOR_MEMBASE_REG;
13350 return OP_X86_ADD_MEMBASE_IMM;
13352 return OP_X86_SUB_MEMBASE_IMM;
13354 return OP_X86_AND_MEMBASE_IMM;
13356 return OP_X86_OR_MEMBASE_IMM;
13358 return OP_X86_XOR_MEMBASE_IMM;
13360 return OP_AMD64_ADD_MEMBASE_REG;
13362 return OP_AMD64_SUB_MEMBASE_REG;
13364 return OP_AMD64_AND_MEMBASE_REG;
13366 return OP_AMD64_OR_MEMBASE_REG;
13368 return OP_AMD64_XOR_MEMBASE_REG;
13371 return OP_AMD64_ADD_MEMBASE_IMM;
13374 return OP_AMD64_SUB_MEMBASE_IMM;
13377 return OP_AMD64_AND_MEMBASE_IMM;
13380 return OP_AMD64_OR_MEMBASE_IMM;
13383 return OP_AMD64_XOR_MEMBASE_IMM;
13393 op_to_op_store_membase (int store_opcode, int opcode)
13395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13398 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13399 return OP_X86_SETEQ_MEMBASE;
13401 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13402 return OP_X86_SETNE_MEMBASE;
13410 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13413 /* FIXME: This has sign extension issues */
13415 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13416 return OP_X86_COMPARE_MEMBASE8_IMM;
13419 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13424 return OP_X86_PUSH_MEMBASE;
13425 case OP_COMPARE_IMM:
13426 case OP_ICOMPARE_IMM:
13427 return OP_X86_COMPARE_MEMBASE_IMM;
13430 return OP_X86_COMPARE_MEMBASE_REG;
13434 #ifdef TARGET_AMD64
13435 /* FIXME: This has sign extension issues */
13437 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13438 return OP_X86_COMPARE_MEMBASE8_IMM;
13443 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13444 return OP_X86_PUSH_MEMBASE;
13446 /* FIXME: This only works for 32 bit immediates
13447 case OP_COMPARE_IMM:
13448 case OP_LCOMPARE_IMM:
13449 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13450 return OP_AMD64_COMPARE_MEMBASE_IMM;
13452 case OP_ICOMPARE_IMM:
13453 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13454 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13458 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13459 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13460 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13461 return OP_AMD64_COMPARE_MEMBASE_REG;
13464 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13465 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13474 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13477 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13483 return OP_X86_COMPARE_REG_MEMBASE;
13485 return OP_X86_ADD_REG_MEMBASE;
13487 return OP_X86_SUB_REG_MEMBASE;
13489 return OP_X86_AND_REG_MEMBASE;
13491 return OP_X86_OR_REG_MEMBASE;
13493 return OP_X86_XOR_REG_MEMBASE;
13497 #ifdef TARGET_AMD64
13498 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13501 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13503 return OP_X86_ADD_REG_MEMBASE;
13505 return OP_X86_SUB_REG_MEMBASE;
13507 return OP_X86_AND_REG_MEMBASE;
13509 return OP_X86_OR_REG_MEMBASE;
13511 return OP_X86_XOR_REG_MEMBASE;
13513 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13517 return OP_AMD64_COMPARE_REG_MEMBASE;
13519 return OP_AMD64_ADD_REG_MEMBASE;
13521 return OP_AMD64_SUB_REG_MEMBASE;
13523 return OP_AMD64_AND_REG_MEMBASE;
13525 return OP_AMD64_OR_REG_MEMBASE;
13527 return OP_AMD64_XOR_REG_MEMBASE;
13536 mono_op_to_op_imm_noemul (int opcode)
13539 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13545 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13552 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13557 return mono_op_to_op_imm (opcode);
13562 * mono_handle_global_vregs:
13564 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13568 mono_handle_global_vregs (MonoCompile *cfg)
13570 gint32 *vreg_to_bb;
13571 MonoBasicBlock *bb;
13574 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13576 #ifdef MONO_ARCH_SIMD_INTRINSICS
13577 if (cfg->uses_simd_intrinsics)
13578 mono_simd_simplify_indirection (cfg);
13581 /* Find local vregs used in more than one bb */
13582 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13583 MonoInst *ins = bb->code;
13584 int block_num = bb->block_num;
13586 if (cfg->verbose_level > 2)
13587 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13590 for (; ins; ins = ins->next) {
13591 const char *spec = INS_INFO (ins->opcode);
13592 int regtype = 0, regindex;
13595 if (G_UNLIKELY (cfg->verbose_level > 2))
13596 mono_print_ins (ins);
13598 g_assert (ins->opcode >= MONO_CEE_LAST);
13600 for (regindex = 0; regindex < 4; regindex ++) {
13603 if (regindex == 0) {
13604 regtype = spec [MONO_INST_DEST];
13605 if (regtype == ' ')
13608 } else if (regindex == 1) {
13609 regtype = spec [MONO_INST_SRC1];
13610 if (regtype == ' ')
13613 } else if (regindex == 2) {
13614 regtype = spec [MONO_INST_SRC2];
13615 if (regtype == ' ')
13618 } else if (regindex == 3) {
13619 regtype = spec [MONO_INST_SRC3];
13620 if (regtype == ' ')
13625 #if SIZEOF_REGISTER == 4
13626 /* In the LLVM case, the long opcodes are not decomposed */
13627 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13629 * Since some instructions reference the original long vreg,
13630 * and some reference the two component vregs, it is quite hard
13631 * to determine when it needs to be global. So be conservative.
13633 if (!get_vreg_to_inst (cfg, vreg)) {
13634 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13636 if (cfg->verbose_level > 2)
13637 printf ("LONG VREG R%d made global.\n", vreg);
13641 * Make the component vregs volatile since the optimizations can
13642 * get confused otherwise.
13644 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13645 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13649 g_assert (vreg != -1);
13651 prev_bb = vreg_to_bb [vreg];
13652 if (prev_bb == 0) {
13653 /* 0 is a valid block num */
13654 vreg_to_bb [vreg] = block_num + 1;
13655 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13656 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13659 if (!get_vreg_to_inst (cfg, vreg)) {
13660 if (G_UNLIKELY (cfg->verbose_level > 2))
13661 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13665 if (vreg_is_ref (cfg, vreg))
13666 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13668 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13671 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13674 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13678 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13681 g_assert_not_reached ();
13685 /* Flag as having been used in more than one bb */
13686 vreg_to_bb [vreg] = -1;
13692 /* If a variable is used in only one bblock, convert it into a local vreg */
13693 for (i = 0; i < cfg->num_varinfo; i++) {
13694 MonoInst *var = cfg->varinfo [i];
13695 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13697 switch (var->type) {
13703 #if SIZEOF_REGISTER == 8
13706 #if !defined(TARGET_X86)
13707 /* Enabling this screws up the fp stack on x86 */
13710 if (mono_arch_is_soft_float ())
13714 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13718 /* Arguments are implicitly global */
13719 /* Putting R4 vars into registers doesn't work currently */
13720 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13721 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13723 * Make that the variable's liveness interval doesn't contain a call, since
13724 * that would cause the lvreg to be spilled, making the whole optimization
13727 /* This is too slow for JIT compilation */
13729 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13731 int def_index, call_index, ins_index;
13732 gboolean spilled = FALSE;
13737 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13738 const char *spec = INS_INFO (ins->opcode);
13740 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13741 def_index = ins_index;
13743 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13744 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13745 if (call_index > def_index) {
13751 if (MONO_IS_CALL (ins))
13752 call_index = ins_index;
13762 if (G_UNLIKELY (cfg->verbose_level > 2))
13763 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13764 var->flags |= MONO_INST_IS_DEAD;
13765 cfg->vreg_to_inst [var->dreg] = NULL;
13772 * Compress the varinfo and vars tables so the liveness computation is faster and
13773 * takes up less space.
13776 for (i = 0; i < cfg->num_varinfo; ++i) {
13777 MonoInst *var = cfg->varinfo [i];
13778 if (pos < i && cfg->locals_start == i)
13779 cfg->locals_start = pos;
13780 if (!(var->flags & MONO_INST_IS_DEAD)) {
13782 cfg->varinfo [pos] = cfg->varinfo [i];
13783 cfg->varinfo [pos]->inst_c0 = pos;
13784 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13785 cfg->vars [pos].idx = pos;
13786 #if SIZEOF_REGISTER == 4
13787 if (cfg->varinfo [pos]->type == STACK_I8) {
13788 /* Modify the two component vars too */
13791 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13792 var1->inst_c0 = pos;
13793 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13794 var1->inst_c0 = pos;
13801 cfg->num_varinfo = pos;
13802 if (cfg->locals_start > cfg->num_varinfo)
13803 cfg->locals_start = cfg->num_varinfo;
13807 * mono_allocate_gsharedvt_vars:
13809 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13810 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13813 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13817 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13819 for (i = 0; i < cfg->num_varinfo; ++i) {
13820 MonoInst *ins = cfg->varinfo [i];
13823 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13824 if (i >= cfg->locals_start) {
13826 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13827 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13828 ins->opcode = OP_GSHAREDVT_LOCAL;
13829 ins->inst_imm = idx;
13832 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13833 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13840 * mono_spill_global_vars:
13842 * Generate spill code for variables which are not allocated to registers,
13843 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13844 * code is generated which could be optimized by the local optimization passes.
13847 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13849 MonoBasicBlock *bb;
13851 int orig_next_vreg;
13852 guint32 *vreg_to_lvreg;
13854 guint32 i, lvregs_len, lvregs_size;
13855 gboolean dest_has_lvreg = FALSE;
13856 MonoStackType stacktypes [128];
13857 MonoInst **live_range_start, **live_range_end;
13858 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13860 *need_local_opts = FALSE;
13862 memset (spec2, 0, sizeof (spec2));
13864 /* FIXME: Move this function to mini.c */
13865 stacktypes ['i'] = STACK_PTR;
13866 stacktypes ['l'] = STACK_I8;
13867 stacktypes ['f'] = STACK_R8;
13868 #ifdef MONO_ARCH_SIMD_INTRINSICS
13869 stacktypes ['x'] = STACK_VTYPE;
13872 #if SIZEOF_REGISTER == 4
13873 /* Create MonoInsts for longs */
13874 for (i = 0; i < cfg->num_varinfo; i++) {
13875 MonoInst *ins = cfg->varinfo [i];
13877 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13878 switch (ins->type) {
13883 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13886 g_assert (ins->opcode == OP_REGOFFSET);
13888 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13890 tree->opcode = OP_REGOFFSET;
13891 tree->inst_basereg = ins->inst_basereg;
13892 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13894 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13896 tree->opcode = OP_REGOFFSET;
13897 tree->inst_basereg = ins->inst_basereg;
13898 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13908 if (cfg->compute_gc_maps) {
13909 /* registers need liveness info even for !non refs */
13910 for (i = 0; i < cfg->num_varinfo; i++) {
13911 MonoInst *ins = cfg->varinfo [i];
13913 if (ins->opcode == OP_REGVAR)
13914 ins->flags |= MONO_INST_GC_TRACK;
13918 /* FIXME: widening and truncation */
13921 * As an optimization, when a variable allocated to the stack is first loaded into
13922 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13923 * the variable again.
13925 orig_next_vreg = cfg->next_vreg;
13926 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13927 lvregs_size = 1024;
13928 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13932 * These arrays contain the first and last instructions accessing a given
13934 * Since we emit bblocks in the same order we process them here, and we
13935 * don't split live ranges, these will precisely describe the live range of
13936 * the variable, i.e. the instruction range where a valid value can be found
13937 * in the variables location.
13938 * The live range is computed using the liveness info computed by the liveness pass.
13939 * We can't use vmv->range, since that is an abstract live range, and we need
13940 * one which is instruction precise.
13941 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13943 /* FIXME: Only do this if debugging info is requested */
13944 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13945 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13946 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13947 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13949 /* Add spill loads/stores */
13950 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13953 if (cfg->verbose_level > 2)
13954 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13956 /* Clear vreg_to_lvreg array */
13957 for (i = 0; i < lvregs_len; i++)
13958 vreg_to_lvreg [lvregs [i]] = 0;
13962 MONO_BB_FOR_EACH_INS (bb, ins) {
13963 const char *spec = INS_INFO (ins->opcode);
13964 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13965 gboolean store, no_lvreg;
13966 int sregs [MONO_MAX_SRC_REGS];
13968 if (G_UNLIKELY (cfg->verbose_level > 2))
13969 mono_print_ins (ins);
13971 if (ins->opcode == OP_NOP)
13975 * We handle LDADDR here as well, since it can only be decomposed
13976 * when variable addresses are known.
13978 if (ins->opcode == OP_LDADDR) {
13979 MonoInst *var = (MonoInst *)ins->inst_p0;
13981 if (var->opcode == OP_VTARG_ADDR) {
13982 /* Happens on SPARC/S390 where vtypes are passed by reference */
13983 MonoInst *vtaddr = var->inst_left;
13984 if (vtaddr->opcode == OP_REGVAR) {
13985 ins->opcode = OP_MOVE;
13986 ins->sreg1 = vtaddr->dreg;
13988 else if (var->inst_left->opcode == OP_REGOFFSET) {
13989 ins->opcode = OP_LOAD_MEMBASE;
13990 ins->inst_basereg = vtaddr->inst_basereg;
13991 ins->inst_offset = vtaddr->inst_offset;
13994 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13995 /* gsharedvt arg passed by ref */
13996 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13998 ins->opcode = OP_LOAD_MEMBASE;
13999 ins->inst_basereg = var->inst_basereg;
14000 ins->inst_offset = var->inst_offset;
14001 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14002 MonoInst *load, *load2, *load3;
14003 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14004 int reg1, reg2, reg3;
14005 MonoInst *info_var = cfg->gsharedvt_info_var;
14006 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14010 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14013 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14015 g_assert (info_var);
14016 g_assert (locals_var);
14018 /* Mark the instruction used to compute the locals var as used */
14019 cfg->gsharedvt_locals_var_ins = NULL;
14021 /* Load the offset */
14022 if (info_var->opcode == OP_REGOFFSET) {
14023 reg1 = alloc_ireg (cfg);
14024 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14025 } else if (info_var->opcode == OP_REGVAR) {
14027 reg1 = info_var->dreg;
14029 g_assert_not_reached ();
14031 reg2 = alloc_ireg (cfg);
14032 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14033 /* Load the locals area address */
14034 reg3 = alloc_ireg (cfg);
14035 if (locals_var->opcode == OP_REGOFFSET) {
14036 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14037 } else if (locals_var->opcode == OP_REGVAR) {
14038 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14040 g_assert_not_reached ();
14042 /* Compute the address */
14043 ins->opcode = OP_PADD;
14047 mono_bblock_insert_before_ins (bb, ins, load3);
14048 mono_bblock_insert_before_ins (bb, load3, load2);
14050 mono_bblock_insert_before_ins (bb, load2, load);
14052 g_assert (var->opcode == OP_REGOFFSET);
14054 ins->opcode = OP_ADD_IMM;
14055 ins->sreg1 = var->inst_basereg;
14056 ins->inst_imm = var->inst_offset;
14059 *need_local_opts = TRUE;
14060 spec = INS_INFO (ins->opcode);
14063 if (ins->opcode < MONO_CEE_LAST) {
14064 mono_print_ins (ins);
14065 g_assert_not_reached ();
14069 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14073 if (MONO_IS_STORE_MEMBASE (ins)) {
14074 tmp_reg = ins->dreg;
14075 ins->dreg = ins->sreg2;
14076 ins->sreg2 = tmp_reg;
14079 spec2 [MONO_INST_DEST] = ' ';
14080 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14081 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14082 spec2 [MONO_INST_SRC3] = ' ';
14084 } else if (MONO_IS_STORE_MEMINDEX (ins))
14085 g_assert_not_reached ();
14090 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14091 printf ("\t %.3s %d", spec, ins->dreg);
14092 num_sregs = mono_inst_get_src_registers (ins, sregs);
14093 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14094 printf (" %d", sregs [srcindex]);
14101 regtype = spec [MONO_INST_DEST];
14102 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14105 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14106 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14107 MonoInst *store_ins;
14109 MonoInst *def_ins = ins;
14110 int dreg = ins->dreg; /* The original vreg */
14112 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14114 if (var->opcode == OP_REGVAR) {
14115 ins->dreg = var->dreg;
14116 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14118 * Instead of emitting a load+store, use a _membase opcode.
14120 g_assert (var->opcode == OP_REGOFFSET);
14121 if (ins->opcode == OP_MOVE) {
14125 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14126 ins->inst_basereg = var->inst_basereg;
14127 ins->inst_offset = var->inst_offset;
14130 spec = INS_INFO (ins->opcode);
14134 g_assert (var->opcode == OP_REGOFFSET);
14136 prev_dreg = ins->dreg;
14138 /* Invalidate any previous lvreg for this vreg */
14139 vreg_to_lvreg [ins->dreg] = 0;
14143 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14145 store_opcode = OP_STOREI8_MEMBASE_REG;
14148 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14150 #if SIZEOF_REGISTER != 8
14151 if (regtype == 'l') {
14152 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14153 mono_bblock_insert_after_ins (bb, ins, store_ins);
14154 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14155 mono_bblock_insert_after_ins (bb, ins, store_ins);
14156 def_ins = store_ins;
14161 g_assert (store_opcode != OP_STOREV_MEMBASE);
14163 /* Try to fuse the store into the instruction itself */
14164 /* FIXME: Add more instructions */
14165 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14166 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14167 ins->inst_imm = ins->inst_c0;
14168 ins->inst_destbasereg = var->inst_basereg;
14169 ins->inst_offset = var->inst_offset;
14170 spec = INS_INFO (ins->opcode);
14171 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14172 ins->opcode = store_opcode;
14173 ins->inst_destbasereg = var->inst_basereg;
14174 ins->inst_offset = var->inst_offset;
14178 tmp_reg = ins->dreg;
14179 ins->dreg = ins->sreg2;
14180 ins->sreg2 = tmp_reg;
14183 spec2 [MONO_INST_DEST] = ' ';
14184 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14185 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14186 spec2 [MONO_INST_SRC3] = ' ';
14188 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14189 // FIXME: The backends expect the base reg to be in inst_basereg
14190 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14192 ins->inst_basereg = var->inst_basereg;
14193 ins->inst_offset = var->inst_offset;
14194 spec = INS_INFO (ins->opcode);
14196 /* printf ("INS: "); mono_print_ins (ins); */
14197 /* Create a store instruction */
14198 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14200 /* Insert it after the instruction */
14201 mono_bblock_insert_after_ins (bb, ins, store_ins);
14203 def_ins = store_ins;
14206 * We can't assign ins->dreg to var->dreg here, since the
14207 * sregs could use it. So set a flag, and do it after
14210 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14211 dest_has_lvreg = TRUE;
14216 if (def_ins && !live_range_start [dreg]) {
14217 live_range_start [dreg] = def_ins;
14218 live_range_start_bb [dreg] = bb;
14221 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14224 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14225 tmp->inst_c1 = dreg;
14226 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14233 num_sregs = mono_inst_get_src_registers (ins, sregs);
14234 for (srcindex = 0; srcindex < 3; ++srcindex) {
14235 regtype = spec [MONO_INST_SRC1 + srcindex];
14236 sreg = sregs [srcindex];
14238 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14239 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14240 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14241 MonoInst *use_ins = ins;
14242 MonoInst *load_ins;
14243 guint32 load_opcode;
14245 if (var->opcode == OP_REGVAR) {
14246 sregs [srcindex] = var->dreg;
14247 //mono_inst_set_src_registers (ins, sregs);
14248 live_range_end [sreg] = use_ins;
14249 live_range_end_bb [sreg] = bb;
14251 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14254 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14255 /* var->dreg is a hreg */
14256 tmp->inst_c1 = sreg;
14257 mono_bblock_insert_after_ins (bb, ins, tmp);
14263 g_assert (var->opcode == OP_REGOFFSET);
14265 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14267 g_assert (load_opcode != OP_LOADV_MEMBASE);
14269 if (vreg_to_lvreg [sreg]) {
14270 g_assert (vreg_to_lvreg [sreg] != -1);
14272 /* The variable is already loaded to an lvreg */
14273 if (G_UNLIKELY (cfg->verbose_level > 2))
14274 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14275 sregs [srcindex] = vreg_to_lvreg [sreg];
14276 //mono_inst_set_src_registers (ins, sregs);
14280 /* Try to fuse the load into the instruction */
14281 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14282 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14283 sregs [0] = var->inst_basereg;
14284 //mono_inst_set_src_registers (ins, sregs);
14285 ins->inst_offset = var->inst_offset;
14286 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14287 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14288 sregs [1] = var->inst_basereg;
14289 //mono_inst_set_src_registers (ins, sregs);
14290 ins->inst_offset = var->inst_offset;
14292 if (MONO_IS_REAL_MOVE (ins)) {
14293 ins->opcode = OP_NOP;
14296 //printf ("%d ", srcindex); mono_print_ins (ins);
14298 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14300 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14301 if (var->dreg == prev_dreg) {
14303 * sreg refers to the value loaded by the load
14304 * emitted below, but we need to use ins->dreg
14305 * since it refers to the store emitted earlier.
14309 g_assert (sreg != -1);
14310 vreg_to_lvreg [var->dreg] = sreg;
14311 if (lvregs_len >= lvregs_size) {
14312 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14313 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14314 lvregs = new_lvregs;
14317 lvregs [lvregs_len ++] = var->dreg;
14321 sregs [srcindex] = sreg;
14322 //mono_inst_set_src_registers (ins, sregs);
14324 #if SIZEOF_REGISTER != 8
14325 if (regtype == 'l') {
14326 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14327 mono_bblock_insert_before_ins (bb, ins, load_ins);
14328 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14329 mono_bblock_insert_before_ins (bb, ins, load_ins);
14330 use_ins = load_ins;
14335 #if SIZEOF_REGISTER == 4
14336 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14338 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14339 mono_bblock_insert_before_ins (bb, ins, load_ins);
14340 use_ins = load_ins;
14344 if (var->dreg < orig_next_vreg) {
14345 live_range_end [var->dreg] = use_ins;
14346 live_range_end_bb [var->dreg] = bb;
14349 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14352 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14353 tmp->inst_c1 = var->dreg;
14354 mono_bblock_insert_after_ins (bb, ins, tmp);
14358 mono_inst_set_src_registers (ins, sregs);
14360 if (dest_has_lvreg) {
14361 g_assert (ins->dreg != -1);
14362 vreg_to_lvreg [prev_dreg] = ins->dreg;
14363 if (lvregs_len >= lvregs_size) {
14364 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14365 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14366 lvregs = new_lvregs;
14369 lvregs [lvregs_len ++] = prev_dreg;
14370 dest_has_lvreg = FALSE;
14374 tmp_reg = ins->dreg;
14375 ins->dreg = ins->sreg2;
14376 ins->sreg2 = tmp_reg;
14379 if (MONO_IS_CALL (ins)) {
14380 /* Clear vreg_to_lvreg array */
14381 for (i = 0; i < lvregs_len; i++)
14382 vreg_to_lvreg [lvregs [i]] = 0;
14384 } else if (ins->opcode == OP_NOP) {
14386 MONO_INST_NULLIFY_SREGS (ins);
14389 if (cfg->verbose_level > 2)
14390 mono_print_ins_index (1, ins);
14393 /* Extend the live range based on the liveness info */
14394 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14395 for (i = 0; i < cfg->num_varinfo; i ++) {
14396 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14398 if (vreg_is_volatile (cfg, vi->vreg))
14399 /* The liveness info is incomplete */
14402 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14403 /* Live from at least the first ins of this bb */
14404 live_range_start [vi->vreg] = bb->code;
14405 live_range_start_bb [vi->vreg] = bb;
14408 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14409 /* Live at least until the last ins of this bb */
14410 live_range_end [vi->vreg] = bb->last_ins;
14411 live_range_end_bb [vi->vreg] = bb;
14418 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14419 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14421 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14422 for (i = 0; i < cfg->num_varinfo; ++i) {
14423 int vreg = MONO_VARINFO (cfg, i)->vreg;
14426 if (live_range_start [vreg]) {
14427 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14429 ins->inst_c1 = vreg;
14430 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14432 if (live_range_end [vreg]) {
14433 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14435 ins->inst_c1 = vreg;
14436 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14437 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14439 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14444 if (cfg->gsharedvt_locals_var_ins) {
14445 /* Nullify if unused */
14446 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14447 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14450 g_free (live_range_start);
14451 g_free (live_range_end);
14452 g_free (live_range_start_bb);
14453 g_free (live_range_end_bb);
14459 * - use 'iadd' instead of 'int_add'
14460 * - handling ovf opcodes: decompose in method_to_ir.
14461 * - unify iregs/fregs
14462 * -> partly done, the missing parts are:
14463 * - a more complete unification would involve unifying the hregs as well, so
14464 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14465 * would no longer map to the machine hregs, so the code generators would need to
14466 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14467 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14468 * fp/non-fp branches speeds it up by about 15%.
14469 * - use sext/zext opcodes instead of shifts
14471 * - get rid of TEMPLOADs if possible and use vregs instead
14472 * - clean up usage of OP_P/OP_ opcodes
14473 * - cleanup usage of DUMMY_USE
14474 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14476 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14477 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14478 * - make sure handle_stack_args () is called before the branch is emitted
14479 * - when the new IR is done, get rid of all unused stuff
14480 * - COMPARE/BEQ as separate instructions or unify them ?
14481 * - keeping them separate allows specialized compare instructions like
14482 * compare_imm, compare_membase
14483 * - most back ends unify fp compare+branch, fp compare+ceq
14484 * - integrate mono_save_args into inline_method
14485 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14486 * - handle long shift opts on 32 bit platforms somehow: they require
14487 * 3 sregs (2 for arg1 and 1 for arg2)
14488 * - make byref a 'normal' type.
14489 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14490 * variable if needed.
14491 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14492 * like inline_method.
14493 * - remove inlining restrictions
14494 * - fix LNEG and enable cfold of INEG
14495 * - generalize x86 optimizations like ldelema as a peephole optimization
14496 * - add store_mem_imm for amd64
14497 * - optimize the loading of the interruption flag in the managed->native wrappers
14498 * - avoid special handling of OP_NOP in passes
14499 * - move code inserting instructions into one function/macro.
14500 * - try a coalescing phase after liveness analysis
14501 * - add float -> vreg conversion + local optimizations on !x86
14502 * - figure out how to handle decomposed branches during optimizations, ie.
14503 * compare+branch, op_jump_table+op_br etc.
14504 * - promote RuntimeXHandles to vregs
14505 * - vtype cleanups:
14506 * - add a NEW_VARLOADA_VREG macro
14507 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14508 * accessing vtype fields.
14509 * - get rid of I8CONST on 64 bit platforms
14510 * - dealing with the increase in code size due to branches created during opcode
14512 * - use extended basic blocks
14513 * - all parts of the JIT
14514 * - handle_global_vregs () && local regalloc
14515 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14516 * - sources of increase in code size:
14519 * - isinst and castclass
14520 * - lvregs not allocated to global registers even if used multiple times
14521 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14523 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14524 * - add all micro optimizations from the old JIT
14525 * - put tree optimizations into the deadce pass
14526 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14527 * specific function.
14528 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14529 * fcompare + branchCC.
14530 * - create a helper function for allocating a stack slot, taking into account
14531 * MONO_CFG_HAS_SPILLUP.
14533 * - merge the ia64 switch changes.
14534 * - optimize mono_regstate2_alloc_int/float.
14535 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14536 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14537 * parts of the tree could be separated by other instructions, killing the tree
14538 * arguments, or stores killing loads etc. Also, should we fold loads into other
14539 * instructions if the result of the load is used multiple times ?
14540 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14541 * - LAST MERGE: 108395.
14542 * - when returning vtypes in registers, generate IR and append it to the end of the
14543 * last bb instead of doing it in the epilog.
14544 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14552 - When to decompose opcodes:
14553 - earlier: this makes some optimizations hard to implement, since the low level IR
14554 no longer contains the neccessary information. But it is easier to do.
14555 - later: harder to implement, enables more optimizations.
14556 - Branches inside bblocks:
14557 - created when decomposing complex opcodes.
14558 - branches to another bblock: harmless, but not tracked by the branch
14559 optimizations, so need to branch to a label at the start of the bblock.
14560 - branches to inside the same bblock: very problematic, trips up the local
14561 reg allocator. Can be fixed by spitting the current bblock, but that is a
14562 complex operation, since some local vregs can become global vregs etc.
14563 - Local/global vregs:
14564 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14565 local register allocator.
14566 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14567 structure, created by mono_create_var (). Assigned to hregs or the stack by
14568 the global register allocator.
14569 - When to do optimizations like alu->alu_imm:
14570 - earlier -> saves work later on since the IR will be smaller/simpler
14571 - later -> can work on more instructions
14572 - Handling of valuetypes:
14573 - When a vtype is pushed on the stack, a new temporary is created, an
14574 instruction computing its address (LDADDR) is emitted and pushed on
14575 the stack. Need to optimize cases when the vtype is used immediately as in
14576 argument passing, stloc etc.
14577 - Instead of the to_end stuff in the old JIT, simply call the function handling
14578 the values on the stack before emitting the last instruction of the bb.
14581 #else /* !DISABLE_JIT */
14584 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14588 #endif /* !DISABLE_JIT */