3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 inline static MonoInst*
152 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
158 static MonoMethodSignature *helper_sig_jit_thread_attach;
159 static MonoMethodSignature *helper_sig_get_tls_tramp;
160 static MonoMethodSignature *helper_sig_set_tls_tramp;
162 /* type loading helpers */
163 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
164 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
167 * Instruction metadata
175 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
176 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
182 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 /* keep in sync with the enum in mini.h */
190 #include "mini-ops.h"
195 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
196 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
198 * This should contain the index of the last sreg + 1. This is not the same
199 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
201 const gint8 ins_sreg_counts[] = {
202 #include "mini-ops.h"
208 mono_alloc_ireg (MonoCompile *cfg)
210 return alloc_ireg (cfg);
214 mono_alloc_lreg (MonoCompile *cfg)
216 return alloc_lreg (cfg);
220 mono_alloc_freg (MonoCompile *cfg)
222 return alloc_freg (cfg);
226 mono_alloc_preg (MonoCompile *cfg)
228 return alloc_preg (cfg);
232 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
234 return alloc_dreg (cfg, stack_type);
238 * mono_alloc_ireg_ref:
240 * Allocate an IREG, and mark it as holding a GC ref.
243 mono_alloc_ireg_ref (MonoCompile *cfg)
245 return alloc_ireg_ref (cfg);
249 * mono_alloc_ireg_mp:
251 * Allocate an IREG, and mark it as holding a managed pointer.
254 mono_alloc_ireg_mp (MonoCompile *cfg)
256 return alloc_ireg_mp (cfg);
260 * mono_alloc_ireg_copy:
262 * Allocate an IREG with the same GC type as VREG.
265 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
267 if (vreg_is_ref (cfg, vreg))
268 return alloc_ireg_ref (cfg);
269 else if (vreg_is_mp (cfg, vreg))
270 return alloc_ireg_mp (cfg);
272 return alloc_ireg (cfg);
276 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 type = mini_get_underlying_type (type);
283 switch (type->type) {
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
312 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
328 type = &type->data.generic_class->container_class->byval_arg;
332 g_assert (cfg->gshared);
333 if (mini_type_var_is_vt (type))
336 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
344 mono_print_bb (MonoBasicBlock *bb, const char *msg)
348 GString *str = g_string_new ("");
350 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 g_string_append_printf (str, ", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 g_string_append_printf (str, " ]\n");
358 g_print ("%s", str->str);
359 g_string_free (str, TRUE);
361 for (tree = bb->code; tree; tree = tree->next)
362 mono_print_ins_index (-1, tree);
366 mono_create_helper_signatures (void)
368 helper_sig_domain_get = mono_create_icall_signature ("ptr");
369 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
370 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
371 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
372 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
373 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
376 static MONO_NEVER_INLINE void
377 break_on_unverified (void)
379 if (mini_get_debug_options ()->break_on_unverified)
383 static MONO_NEVER_INLINE void
384 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
386 char *method_fname = mono_method_full_name (method, TRUE);
387 char *field_fname = mono_field_full_name (field);
388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
389 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
390 g_free (method_fname);
391 g_free (field_fname);
394 static MONO_NEVER_INLINE void
395 inline_failure (MonoCompile *cfg, const char *msg)
397 if (cfg->verbose_level >= 2)
398 printf ("inline failed: %s\n", msg);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
402 static MONO_NEVER_INLINE void
403 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 if (cfg->verbose_level > 2) \
406 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
410 static MONO_NEVER_INLINE void
411 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
413 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
414 if (cfg->verbose_level >= 2)
415 printf ("%s\n", cfg->exception_message);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
420 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
421 * foo<T> (int i) { ldarg.0; box T; }
423 #define UNVERIFIED do { \
424 if (cfg->gsharedvt) { \
425 if (cfg->verbose_level > 2) \
426 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
428 goto exception_exit; \
430 break_on_unverified (); \
434 #define GET_BBLOCK(cfg,tblock,ip) do { \
435 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
437 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
438 NEW_BBLOCK (cfg, (tblock)); \
439 (tblock)->cil_code = (ip); \
440 ADD_BBLOCK (cfg, (tblock)); \
444 #if defined(TARGET_X86) || defined(TARGET_AMD64)
445 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
446 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
447 (dest)->dreg = alloc_ireg_mp ((cfg)); \
448 (dest)->sreg1 = (sr1); \
449 (dest)->sreg2 = (sr2); \
450 (dest)->inst_imm = (imm); \
451 (dest)->backend.shift_amount = (shift); \
452 MONO_ADD_INS ((cfg)->cbb, (dest)); \
456 /* Emit conversions so both operands of a binary opcode are of the same type */
458 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
460 MonoInst *arg1 = *arg1_ref;
461 MonoInst *arg2 = *arg2_ref;
464 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
465 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
468 /* Mixing r4/r8 is allowed by the spec */
469 if (arg1->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
473 conv->type = STACK_R8;
477 if (arg2->type == STACK_R4) {
478 int dreg = alloc_freg (cfg);
480 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
481 conv->type = STACK_R8;
487 #if SIZEOF_REGISTER == 8
488 /* FIXME: Need to add many more cases */
489 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
492 int dr = alloc_preg (cfg);
493 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
494 (ins)->sreg2 = widen->dreg;
499 #define ADD_BINOP(op) do { \
500 MONO_INST_NEW (cfg, ins, (op)); \
502 ins->sreg1 = sp [0]->dreg; \
503 ins->sreg2 = sp [1]->dreg; \
504 type_from_op (cfg, ins, sp [0], sp [1]); \
506 /* Have to insert a widening op */ \
507 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
508 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
509 MONO_ADD_INS ((cfg)->cbb, (ins)); \
510 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
513 #define ADD_UNOP(op) do { \
514 MONO_INST_NEW (cfg, ins, (op)); \
516 ins->sreg1 = sp [0]->dreg; \
517 type_from_op (cfg, ins, sp [0], NULL); \
519 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
520 MONO_ADD_INS ((cfg)->cbb, (ins)); \
521 *sp++ = mono_decompose_opcode (cfg, ins); \
524 #define ADD_BINCOND(next_block) do { \
527 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
528 cmp->sreg1 = sp [0]->dreg; \
529 cmp->sreg2 = sp [1]->dreg; \
530 type_from_op (cfg, cmp, sp [0], sp [1]); \
532 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
533 type_from_op (cfg, ins, sp [0], sp [1]); \
534 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
535 GET_BBLOCK (cfg, tblock, target); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_true_bb = tblock; \
538 if ((next_block)) { \
539 link_bblock (cfg, cfg->cbb, (next_block)); \
540 ins->inst_false_bb = (next_block); \
541 start_new_bblock = 1; \
543 GET_BBLOCK (cfg, tblock, ip); \
544 link_bblock (cfg, cfg->cbb, tblock); \
545 ins->inst_false_bb = tblock; \
546 start_new_bblock = 2; \
548 if (sp != stack_start) { \
549 handle_stack_args (cfg, stack_start, sp - stack_start); \
550 CHECK_UNVERIFIABLE (cfg); \
552 MONO_ADD_INS (cfg->cbb, cmp); \
553 MONO_ADD_INS (cfg->cbb, ins); \
557 * link_bblock: Links two basic blocks
559 * links two basic blocks in the control flow graph, the 'from'
560 * argument is the starting block and the 'to' argument is the block
561 * the control flow ends to after 'from'.
564 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
566 MonoBasicBlock **newa;
570 if (from->cil_code) {
572 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
574 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
577 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
579 printf ("edge from entry to exit\n");
584 for (i = 0; i < from->out_count; ++i) {
585 if (to == from->out_bb [i]) {
591 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
592 for (i = 0; i < from->out_count; ++i) {
593 newa [i] = from->out_bb [i];
601 for (i = 0; i < to->in_count; ++i) {
602 if (from == to->in_bb [i]) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
609 for (i = 0; i < to->in_count; ++i) {
610 newa [i] = to->in_bb [i];
619 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
621 link_bblock (cfg, from, to);
625 * mono_find_block_region:
627 * We mark each basic block with a region ID. We use that to avoid BB
628 * optimizations when blocks are in different regions.
631 * A region token that encodes where this region is, and information
632 * about the clause owner for this block.
634 * The region encodes the try/catch/filter clause that owns this block
635 * as well as the type. -1 is a special value that represents a block
636 * that is in none of try/catch/filter.
639 mono_find_block_region (MonoCompile *cfg, int offset)
641 MonoMethodHeader *header = cfg->header;
642 MonoExceptionClause *clause;
645 for (i = 0; i < header->num_clauses; ++i) {
646 clause = &header->clauses [i];
647 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
648 (offset < (clause->handler_offset)))
649 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
651 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
652 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
653 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
654 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
655 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
657 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
660 for (i = 0; i < header->num_clauses; ++i) {
661 clause = &header->clauses [i];
663 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
664 return ((i + 1) << 8) | clause->flags;
671 ip_in_finally_clause (MonoCompile *cfg, int offset)
673 MonoMethodHeader *header = cfg->header;
674 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
682 if (MONO_OFFSET_IN_HANDLER (clause, offset))
689 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
691 MonoMethodHeader *header = cfg->header;
692 MonoExceptionClause *clause;
696 for (i = 0; i < header->num_clauses; ++i) {
697 clause = &header->clauses [i];
698 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
699 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
700 if (clause->flags == type)
701 res = g_list_append (res, clause);
708 mono_create_spvar_for_region (MonoCompile *cfg, int region)
712 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
717 /* prevent it from being register allocated */
718 var->flags |= MONO_INST_VOLATILE;
720 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
724 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
726 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
730 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
734 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
738 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
739 /* prevent it from being register allocated */
740 var->flags |= MONO_INST_VOLATILE;
742 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
748 * Returns the type used in the eval stack when @type is loaded.
749 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
752 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
756 type = mini_get_underlying_type (type);
757 inst->klass = klass = mono_class_from_mono_type (type);
759 inst->type = STACK_MP;
764 switch (type->type) {
766 inst->type = STACK_INV;
774 inst->type = STACK_I4;
779 case MONO_TYPE_FNPTR:
780 inst->type = STACK_PTR;
782 case MONO_TYPE_CLASS:
783 case MONO_TYPE_STRING:
784 case MONO_TYPE_OBJECT:
785 case MONO_TYPE_SZARRAY:
786 case MONO_TYPE_ARRAY:
787 inst->type = STACK_OBJ;
791 inst->type = STACK_I8;
794 inst->type = cfg->r4_stack_type;
797 inst->type = STACK_R8;
799 case MONO_TYPE_VALUETYPE:
800 if (type->data.klass->enumtype) {
801 type = mono_class_enum_basetype (type->data.klass);
805 inst->type = STACK_VTYPE;
808 case MONO_TYPE_TYPEDBYREF:
809 inst->klass = mono_defaults.typed_reference_class;
810 inst->type = STACK_VTYPE;
812 case MONO_TYPE_GENERICINST:
813 type = &type->data.generic_class->container_class->byval_arg;
817 g_assert (cfg->gshared);
818 if (mini_is_gsharedvt_type (type)) {
819 g_assert (cfg->gsharedvt);
820 inst->type = STACK_VTYPE;
822 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
826 g_error ("unknown type 0x%02x in eval stack type", type->type);
831 * The following tables are used to quickly validate the IL code in type_from_op ().
834 bin_num_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
840 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
848 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
851 /* reduce the size of this table */
853 bin_int_table [STACK_MAX] [STACK_MAX] = {
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
865 bin_comp_table [STACK_MAX] [STACK_MAX] = {
866 /* Inv i L p F & O vt r4 */
868 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
869 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
870 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
871 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
872 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
873 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
874 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
875 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
878 /* reduce the size of this table */
880 shift_table [STACK_MAX] [STACK_MAX] = {
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
892 * Tables to map from the non-specific opcode to the matching
893 * type-specific opcode.
895 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
897 binops_op_map [STACK_MAX] = {
898 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
901 /* handles from CEE_NEG to CEE_CONV_U8 */
903 unops_op_map [STACK_MAX] = {
904 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
907 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
909 ovfops_op_map [STACK_MAX] = {
910 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
913 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
915 ovf2ops_op_map [STACK_MAX] = {
916 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
919 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
921 ovf3ops_op_map [STACK_MAX] = {
922 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
925 /* handles from CEE_BEQ to CEE_BLT_UN */
927 beqops_op_map [STACK_MAX] = {
928 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
931 /* handles from CEE_CEQ to CEE_CLT_UN */
933 ceqops_op_map [STACK_MAX] = {
934 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
938 * Sets ins->type (the type on the eval stack) according to the
939 * type of the opcode and the arguments to it.
940 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
942 * FIXME: this function sets ins->type unconditionally in some cases, but
943 * it should set it to invalid for some types (a conv.x on an object)
946 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
948 switch (ins->opcode) {
955 /* FIXME: check unverifiable args for STACK_MP */
956 ins->type = bin_num_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
964 ins->type = bin_int_table [src1->type] [src2->type];
965 ins->opcode += binops_op_map [ins->type];
970 ins->type = shift_table [src1->type] [src2->type];
971 ins->opcode += binops_op_map [ins->type];
976 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
977 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
978 ins->opcode = OP_LCOMPARE;
979 else if (src1->type == STACK_R4)
980 ins->opcode = OP_RCOMPARE;
981 else if (src1->type == STACK_R8)
982 ins->opcode = OP_FCOMPARE;
984 ins->opcode = OP_ICOMPARE;
986 case OP_ICOMPARE_IMM:
987 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
988 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
989 ins->opcode = OP_LCOMPARE_IMM;
1001 ins->opcode += beqops_op_map [src1->type];
1004 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1011 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1012 ins->opcode += ceqops_op_map [src1->type];
1016 ins->type = neg_table [src1->type];
1017 ins->opcode += unops_op_map [ins->type];
1020 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1021 ins->type = src1->type;
1023 ins->type = STACK_INV;
1024 ins->opcode += unops_op_map [ins->type];
1030 ins->type = STACK_I4;
1031 ins->opcode += unops_op_map [src1->type];
1034 ins->type = STACK_R8;
1035 switch (src1->type) {
1038 ins->opcode = OP_ICONV_TO_R_UN;
1041 ins->opcode = OP_LCONV_TO_R_UN;
1045 case CEE_CONV_OVF_I1:
1046 case CEE_CONV_OVF_U1:
1047 case CEE_CONV_OVF_I2:
1048 case CEE_CONV_OVF_U2:
1049 case CEE_CONV_OVF_I4:
1050 case CEE_CONV_OVF_U4:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf3ops_op_map [src1->type];
1054 case CEE_CONV_OVF_I_UN:
1055 case CEE_CONV_OVF_U_UN:
1056 ins->type = STACK_PTR;
1057 ins->opcode += ovf2ops_op_map [src1->type];
1059 case CEE_CONV_OVF_I1_UN:
1060 case CEE_CONV_OVF_I2_UN:
1061 case CEE_CONV_OVF_I4_UN:
1062 case CEE_CONV_OVF_U1_UN:
1063 case CEE_CONV_OVF_U2_UN:
1064 case CEE_CONV_OVF_U4_UN:
1065 ins->type = STACK_I4;
1066 ins->opcode += ovf2ops_op_map [src1->type];
1069 ins->type = STACK_PTR;
1070 switch (src1->type) {
1072 ins->opcode = OP_ICONV_TO_U;
1076 #if SIZEOF_VOID_P == 8
1077 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_MOVE;
1083 ins->opcode = OP_LCONV_TO_U;
1086 ins->opcode = OP_FCONV_TO_U;
1092 ins->type = STACK_I8;
1093 ins->opcode += unops_op_map [src1->type];
1095 case CEE_CONV_OVF_I8:
1096 case CEE_CONV_OVF_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += ovf3ops_op_map [src1->type];
1100 case CEE_CONV_OVF_U8_UN:
1101 case CEE_CONV_OVF_I8_UN:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf2ops_op_map [src1->type];
1106 ins->type = cfg->r4_stack_type;
1107 ins->opcode += unops_op_map [src1->type];
1110 ins->type = STACK_R8;
1111 ins->opcode += unops_op_map [src1->type];
1114 ins->type = STACK_R8;
1118 ins->type = STACK_I4;
1119 ins->opcode += ovfops_op_map [src1->type];
1122 case CEE_CONV_OVF_I:
1123 case CEE_CONV_OVF_U:
1124 ins->type = STACK_PTR;
1125 ins->opcode += ovfops_op_map [src1->type];
1128 case CEE_ADD_OVF_UN:
1130 case CEE_MUL_OVF_UN:
1132 case CEE_SUB_OVF_UN:
1133 ins->type = bin_num_table [src1->type] [src2->type];
1134 ins->opcode += ovfops_op_map [src1->type];
1135 if (ins->type == STACK_R8)
1136 ins->type = STACK_INV;
1138 case OP_LOAD_MEMBASE:
1139 ins->type = STACK_PTR;
1141 case OP_LOADI1_MEMBASE:
1142 case OP_LOADU1_MEMBASE:
1143 case OP_LOADI2_MEMBASE:
1144 case OP_LOADU2_MEMBASE:
1145 case OP_LOADI4_MEMBASE:
1146 case OP_LOADU4_MEMBASE:
1147 ins->type = STACK_PTR;
1149 case OP_LOADI8_MEMBASE:
1150 ins->type = STACK_I8;
1152 case OP_LOADR4_MEMBASE:
1153 ins->type = cfg->r4_stack_type;
1155 case OP_LOADR8_MEMBASE:
1156 ins->type = STACK_R8;
1159 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1163 if (ins->type == STACK_MP)
1164 ins->klass = mono_defaults.object_class;
1169 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1175 param_table [STACK_MAX] [STACK_MAX] = {
1180 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1185 switch (args->type) {
1195 for (i = 0; i < sig->param_count; ++i) {
1196 switch (args [i].type) {
1200 if (!sig->params [i]->byref)
1204 if (sig->params [i]->byref)
1206 switch (sig->params [i]->type) {
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1218 if (sig->params [i]->byref)
1220 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1229 /*if (!param_table [args [i].type] [sig->params [i]->type])
1237 * When we need a pointer to the current domain many times in a method, we
1238 * call mono_domain_get() once and we store the result in a local variable.
1239 * This function returns the variable that represents the MonoDomain*.
1241 inline static MonoInst *
1242 mono_get_domainvar (MonoCompile *cfg)
1244 if (!cfg->domainvar)
1245 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 return cfg->domainvar;
1250 * The got_var contains the address of the Global Offset Table when AOT
1254 mono_get_got_var (MonoCompile *cfg)
1256 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1258 if (!cfg->got_var) {
1259 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1261 return cfg->got_var;
1265 mono_get_vtable_var (MonoCompile *cfg)
1267 g_assert (cfg->gshared);
1269 if (!cfg->rgctx_var) {
1270 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1271 /* force the var to be stack allocated */
1272 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1275 return cfg->rgctx_var;
1279 type_from_stack_type (MonoInst *ins) {
1280 switch (ins->type) {
1281 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1282 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1283 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1284 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1285 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1287 return &ins->klass->this_arg;
1288 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1289 case STACK_VTYPE: return &ins->klass->byval_arg;
1291 g_error ("stack type %d to monotype not handled\n", ins->type);
1296 static G_GNUC_UNUSED int
1297 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1299 t = mono_type_get_underlying_type (t);
1311 case MONO_TYPE_FNPTR:
1313 case MONO_TYPE_CLASS:
1314 case MONO_TYPE_STRING:
1315 case MONO_TYPE_OBJECT:
1316 case MONO_TYPE_SZARRAY:
1317 case MONO_TYPE_ARRAY:
1323 return cfg->r4_stack_type;
1326 case MONO_TYPE_VALUETYPE:
1327 case MONO_TYPE_TYPEDBYREF:
1329 case MONO_TYPE_GENERICINST:
1330 if (mono_type_generic_inst_is_valuetype (t))
1336 g_assert_not_reached ();
1343 array_access_to_klass (int opcode)
1347 return mono_defaults.byte_class;
1349 return mono_defaults.uint16_class;
1352 return mono_defaults.int_class;
1355 return mono_defaults.sbyte_class;
1358 return mono_defaults.int16_class;
1361 return mono_defaults.int32_class;
1363 return mono_defaults.uint32_class;
1366 return mono_defaults.int64_class;
1369 return mono_defaults.single_class;
1372 return mono_defaults.double_class;
1373 case CEE_LDELEM_REF:
1374 case CEE_STELEM_REF:
1375 return mono_defaults.object_class;
1377 g_assert_not_reached ();
1383 * We try to share variables when possible
1386 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1391 /* inlining can result in deeper stacks */
1392 if (slot >= cfg->header->max_stack)
1393 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1395 pos = ins->type - 1 + slot * STACK_MAX;
1397 switch (ins->type) {
1404 if ((vnum = cfg->intvars [pos]))
1405 return cfg->varinfo [vnum];
1406 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1407 cfg->intvars [pos] = res->inst_c0;
1410 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1416 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1419 * Don't use this if a generic_context is set, since that means AOT can't
1420 * look up the method using just the image+token.
1421 * table == 0 means this is a reference made from a wrapper.
1423 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1424 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1425 jump_info_token->image = image;
1426 jump_info_token->token = token;
1427 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1432 * This function is called to handle items that are left on the evaluation stack
1433 * at basic block boundaries. What happens is that we save the values to local variables
1434 * and we reload them later when first entering the target basic block (with the
1435 * handle_loaded_temps () function).
1436 * A single joint point will use the same variables (stored in the array bb->out_stack or
1437 * bb->in_stack, if the basic block is before or after the joint point).
1439 * This function needs to be called _before_ emitting the last instruction of
1440 * the bb (i.e. before emitting a branch).
1441 * If the stack merge fails at a join point, cfg->unverifiable is set.
1444 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1447 MonoBasicBlock *bb = cfg->cbb;
1448 MonoBasicBlock *outb;
1449 MonoInst *inst, **locals;
1454 if (cfg->verbose_level > 3)
1455 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1456 if (!bb->out_scount) {
1457 bb->out_scount = count;
1458 //printf ("bblock %d has out:", bb->block_num);
1460 for (i = 0; i < bb->out_count; ++i) {
1461 outb = bb->out_bb [i];
1462 /* exception handlers are linked, but they should not be considered for stack args */
1463 if (outb->flags & BB_EXCEPTION_HANDLER)
1465 //printf (" %d", outb->block_num);
1466 if (outb->in_stack) {
1468 bb->out_stack = outb->in_stack;
1474 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1475 for (i = 0; i < count; ++i) {
1477 * try to reuse temps already allocated for this purpouse, if they occupy the same
1478 * stack slot and if they are of the same type.
1479 * This won't cause conflicts since if 'local' is used to
1480 * store one of the values in the in_stack of a bblock, then
1481 * the same variable will be used for the same outgoing stack
1483 * This doesn't work when inlining methods, since the bblocks
1484 * in the inlined methods do not inherit their in_stack from
1485 * the bblock they are inlined to. See bug #58863 for an
1488 if (cfg->inlined_method)
1489 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1491 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1496 for (i = 0; i < bb->out_count; ++i) {
1497 outb = bb->out_bb [i];
1498 /* exception handlers are linked, but they should not be considered for stack args */
1499 if (outb->flags & BB_EXCEPTION_HANDLER)
1501 if (outb->in_scount) {
1502 if (outb->in_scount != bb->out_scount) {
1503 cfg->unverifiable = TRUE;
1506 continue; /* check they are the same locals */
1508 outb->in_scount = count;
1509 outb->in_stack = bb->out_stack;
1512 locals = bb->out_stack;
1514 for (i = 0; i < count; ++i) {
1515 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1516 inst->cil_code = sp [i]->cil_code;
1517 sp [i] = locals [i];
1518 if (cfg->verbose_level > 3)
1519 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1523 * It is possible that the out bblocks already have in_stack assigned, and
1524 * the in_stacks differ. In this case, we will store to all the different
1531 /* Find a bblock which has a different in_stack */
1533 while (bindex < bb->out_count) {
1534 outb = bb->out_bb [bindex];
1535 /* exception handlers are linked, but they should not be considered for stack args */
1536 if (outb->flags & BB_EXCEPTION_HANDLER) {
1540 if (outb->in_stack != locals) {
1541 for (i = 0; i < count; ++i) {
1542 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1543 inst->cil_code = sp [i]->cil_code;
1544 sp [i] = locals [i];
1545 if (cfg->verbose_level > 3)
1546 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1548 locals = outb->in_stack;
1558 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1562 if (cfg->compile_aot) {
1563 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1569 ji.type = patch_type;
1570 ji.data.target = data;
1571 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1572 mono_error_assert_ok (&error);
1574 EMIT_NEW_PCONST (cfg, ins, target);
1580 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1582 return emit_runtime_constant (cfg, patch_type, data);
1586 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1590 g_assert (val == 0);
1595 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1598 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1601 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1604 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1606 #if SIZEOF_REGISTER == 8
1608 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1614 val_reg = alloc_preg (cfg);
1616 if (SIZEOF_REGISTER == 8)
1617 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1619 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1622 /* This could be optimized further if neccesary */
1624 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1631 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1633 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1638 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1645 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1662 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1669 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1670 g_assert (size < 10000);
1673 /* This could be optimized further if neccesary */
1675 cur_reg = alloc_preg (cfg);
1676 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1684 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1686 cur_reg = alloc_preg (cfg);
1687 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1696 cur_reg = alloc_preg (cfg);
1697 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1698 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1704 cur_reg = alloc_preg (cfg);
1705 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1712 cur_reg = alloc_preg (cfg);
1713 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1722 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1724 int tls_offset = mono_tls_get_tls_offset (key);
1726 if (cfg->compile_aot)
1729 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1731 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1732 ins->dreg = mono_alloc_preg (cfg);
1733 ins->inst_offset = tls_offset;
1740 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1742 int tls_offset = mono_tls_get_tls_offset (key);
1744 if (cfg->compile_aot)
1747 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1749 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1750 ins->sreg1 = value->dreg;
1751 ins->inst_offset = tls_offset;
1759 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1761 MonoInst *fast_tls = NULL;
1763 if (!mini_get_debug_options ()->use_fallback_tls)
1764 fast_tls = mono_create_fast_tls_getter (cfg, key);
1767 MONO_ADD_INS (cfg->cbb, fast_tls);
1771 if (cfg->compile_aot) {
1774 * tls getters are critical pieces of code and we don't want to resolve them
1775 * through the standard plt/tramp mechanism since we might expose ourselves
1776 * to crashes and infinite recursions.
1778 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1779 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1781 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1782 return mono_emit_jit_icall (cfg, getter, NULL);
1787 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1789 MonoInst *fast_tls = NULL;
1791 if (!mini_get_debug_options ()->use_fallback_tls)
1792 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1795 MONO_ADD_INS (cfg->cbb, fast_tls);
1799 if (cfg->compile_aot) {
1801 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1802 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1804 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1805 return mono_emit_jit_icall (cfg, setter, &value);
1812 * Emit IR to push the current LMF onto the LMF stack.
1815 emit_push_lmf (MonoCompile *cfg)
1818 * Emit IR to push the LMF:
1819 * lmf_addr = <lmf_addr from tls>
1820 * lmf->lmf_addr = lmf_addr
1821 * lmf->prev_lmf = *lmf_addr
1824 MonoInst *ins, *lmf_ins;
1829 int lmf_reg, prev_lmf_reg;
1831 * Store lmf_addr in a variable, so it can be allocated to a global register.
1833 if (!cfg->lmf_addr_var)
1834 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1837 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1839 int jit_tls_dreg = ins->dreg;
1841 lmf_reg = alloc_preg (cfg);
1842 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1844 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1847 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1849 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1850 lmf_reg = ins->dreg;
1852 prev_lmf_reg = alloc_preg (cfg);
1853 /* Save previous_lmf */
1854 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1855 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1857 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1863 * Emit IR to pop the current LMF from the LMF stack.
1866 emit_pop_lmf (MonoCompile *cfg)
1868 int lmf_reg, lmf_addr_reg;
1874 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1875 lmf_reg = ins->dreg;
1879 * Emit IR to pop the LMF:
1880 * *(lmf->lmf_addr) = lmf->prev_lmf
1882 /* This could be called before emit_push_lmf () */
1883 if (!cfg->lmf_addr_var)
1884 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1885 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1887 prev_lmf_reg = alloc_preg (cfg);
1888 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1889 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1893 emit_instrumentation_call (MonoCompile *cfg, void *func)
1895 MonoInst *iargs [1];
1898 * Avoid instrumenting inlined methods since it can
1899 * distort profiling results.
1901 if (cfg->method != cfg->current_method)
1904 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1905 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1906 mono_emit_jit_icall (cfg, func, iargs);
1911 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1914 type = mini_get_underlying_type (type);
1915 switch (type->type) {
1916 case MONO_TYPE_VOID:
1917 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1924 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1928 case MONO_TYPE_FNPTR:
1929 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1930 case MONO_TYPE_CLASS:
1931 case MONO_TYPE_STRING:
1932 case MONO_TYPE_OBJECT:
1933 case MONO_TYPE_SZARRAY:
1934 case MONO_TYPE_ARRAY:
1935 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1938 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1941 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1943 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1945 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1946 case MONO_TYPE_VALUETYPE:
1947 if (type->data.klass->enumtype) {
1948 type = mono_class_enum_basetype (type->data.klass);
1951 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1952 case MONO_TYPE_TYPEDBYREF:
1953 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1954 case MONO_TYPE_GENERICINST:
1955 type = &type->data.generic_class->container_class->byval_arg;
1958 case MONO_TYPE_MVAR:
1960 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1962 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1967 //XXX this ignores if t is byref
1968 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1971 * target_type_is_incompatible:
1972 * @cfg: MonoCompile context
1974 * Check that the item @arg on the evaluation stack can be stored
1975 * in the target type (can be a local, or field, etc).
1976 * The cfg arg can be used to check if we need verification or just
1979 * Returns: non-0 value if arg can't be stored on a target.
1982 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1984 MonoType *simple_type;
1987 if (target->byref) {
1988 /* FIXME: check that the pointed to types match */
1989 if (arg->type == STACK_MP) {
1990 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1991 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1992 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1994 /* if the target is native int& or same type */
1995 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1998 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1999 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2000 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2004 if (arg->type == STACK_PTR)
2009 simple_type = mini_get_underlying_type (target);
2010 switch (simple_type->type) {
2011 case MONO_TYPE_VOID:
2019 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2023 /* STACK_MP is needed when setting pinned locals */
2024 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2029 case MONO_TYPE_FNPTR:
2031 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2032 * in native int. (#688008).
2034 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2037 case MONO_TYPE_CLASS:
2038 case MONO_TYPE_STRING:
2039 case MONO_TYPE_OBJECT:
2040 case MONO_TYPE_SZARRAY:
2041 case MONO_TYPE_ARRAY:
2042 if (arg->type != STACK_OBJ)
2044 /* FIXME: check type compatibility */
2048 if (arg->type != STACK_I8)
2052 if (arg->type != cfg->r4_stack_type)
2056 if (arg->type != STACK_R8)
2059 case MONO_TYPE_VALUETYPE:
2060 if (arg->type != STACK_VTYPE)
2062 klass = mono_class_from_mono_type (simple_type);
2063 if (klass != arg->klass)
2066 case MONO_TYPE_TYPEDBYREF:
2067 if (arg->type != STACK_VTYPE)
2069 klass = mono_class_from_mono_type (simple_type);
2070 if (klass != arg->klass)
2073 case MONO_TYPE_GENERICINST:
2074 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2075 MonoClass *target_class;
2076 if (arg->type != STACK_VTYPE)
2078 klass = mono_class_from_mono_type (simple_type);
2079 target_class = mono_class_from_mono_type (target);
2080 /* The second cases is needed when doing partial sharing */
2081 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2085 if (arg->type != STACK_OBJ)
2087 /* FIXME: check type compatibility */
2091 case MONO_TYPE_MVAR:
2092 g_assert (cfg->gshared);
2093 if (mini_type_var_is_vt (simple_type)) {
2094 if (arg->type != STACK_VTYPE)
2097 if (arg->type != STACK_OBJ)
2102 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2108 * Prepare arguments for passing to a function call.
2109 * Return a non-zero value if the arguments can't be passed to the given
2111 * The type checks are not yet complete and some conversions may need
2112 * casts on 32 or 64 bit architectures.
2114 * FIXME: implement this using target_type_is_incompatible ()
2117 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2119 MonoType *simple_type;
2123 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2127 for (i = 0; i < sig->param_count; ++i) {
2128 if (sig->params [i]->byref) {
2129 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2133 simple_type = mini_get_underlying_type (sig->params [i]);
2135 switch (simple_type->type) {
2136 case MONO_TYPE_VOID:
2145 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2151 case MONO_TYPE_FNPTR:
2152 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2155 case MONO_TYPE_CLASS:
2156 case MONO_TYPE_STRING:
2157 case MONO_TYPE_OBJECT:
2158 case MONO_TYPE_SZARRAY:
2159 case MONO_TYPE_ARRAY:
2160 if (args [i]->type != STACK_OBJ)
2165 if (args [i]->type != STACK_I8)
2169 if (args [i]->type != cfg->r4_stack_type)
2173 if (args [i]->type != STACK_R8)
2176 case MONO_TYPE_VALUETYPE:
2177 if (simple_type->data.klass->enumtype) {
2178 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2181 if (args [i]->type != STACK_VTYPE)
2184 case MONO_TYPE_TYPEDBYREF:
2185 if (args [i]->type != STACK_VTYPE)
2188 case MONO_TYPE_GENERICINST:
2189 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2192 case MONO_TYPE_MVAR:
2194 if (args [i]->type != STACK_VTYPE)
2198 g_error ("unknown type 0x%02x in check_call_signature",
2206 callvirt_to_call (int opcode)
2209 case OP_CALL_MEMBASE:
2211 case OP_VOIDCALL_MEMBASE:
2213 case OP_FCALL_MEMBASE:
2215 case OP_RCALL_MEMBASE:
2217 case OP_VCALL_MEMBASE:
2219 case OP_LCALL_MEMBASE:
2222 g_assert_not_reached ();
2229 callvirt_to_call_reg (int opcode)
2232 case OP_CALL_MEMBASE:
2234 case OP_VOIDCALL_MEMBASE:
2235 return OP_VOIDCALL_REG;
2236 case OP_FCALL_MEMBASE:
2237 return OP_FCALL_REG;
2238 case OP_RCALL_MEMBASE:
2239 return OP_RCALL_REG;
2240 case OP_VCALL_MEMBASE:
2241 return OP_VCALL_REG;
2242 case OP_LCALL_MEMBASE:
2243 return OP_LCALL_REG;
2245 g_assert_not_reached ();
2251 /* Either METHOD or IMT_ARG needs to be set */
2253 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2257 if (COMPILE_LLVM (cfg)) {
2259 method_reg = alloc_preg (cfg);
2260 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2262 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2263 method_reg = ins->dreg;
2267 call->imt_arg_reg = method_reg;
2269 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2274 method_reg = alloc_preg (cfg);
2275 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2277 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2278 method_reg = ins->dreg;
2281 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2284 static MonoJumpInfo *
2285 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2287 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2291 ji->data.target = target;
2297 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2300 return mono_class_check_context_used (klass);
2306 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2309 return mono_method_check_context_used (method);
2315 * check_method_sharing:
2317 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2320 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2322 gboolean pass_vtable = FALSE;
2323 gboolean pass_mrgctx = FALSE;
2325 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2326 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2327 gboolean sharable = FALSE;
2329 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2333 * Pass vtable iff target method might
2334 * be shared, which means that sharing
2335 * is enabled for its class and its
2336 * context is sharable (and it's not a
2339 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2343 if (mini_method_get_context (cmethod) &&
2344 mini_method_get_context (cmethod)->method_inst) {
2345 g_assert (!pass_vtable);
2347 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2350 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2355 if (out_pass_vtable)
2356 *out_pass_vtable = pass_vtable;
2357 if (out_pass_mrgctx)
2358 *out_pass_mrgctx = pass_mrgctx;
2361 inline static MonoCallInst *
2362 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2363 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2367 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2375 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2377 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2379 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2382 call->signature = sig;
2383 call->rgctx_reg = rgctx;
2384 sig_ret = mini_get_underlying_type (sig->ret);
2386 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2389 if (mini_type_is_vtype (sig_ret)) {
2390 call->vret_var = cfg->vret_addr;
2391 //g_assert_not_reached ();
2393 } else if (mini_type_is_vtype (sig_ret)) {
2394 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2397 temp->backend.is_pinvoke = sig->pinvoke;
2400 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2401 * address of return value to increase optimization opportunities.
2402 * Before vtype decomposition, the dreg of the call ins itself represents the
2403 * fact the call modifies the return value. After decomposition, the call will
2404 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2405 * will be transformed into an LDADDR.
2407 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2408 loada->dreg = alloc_preg (cfg);
2409 loada->inst_p0 = temp;
2410 /* We reference the call too since call->dreg could change during optimization */
2411 loada->inst_p1 = call;
2412 MONO_ADD_INS (cfg->cbb, loada);
2414 call->inst.dreg = temp->dreg;
2416 call->vret_var = loada;
2417 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2418 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2420 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2421 if (COMPILE_SOFT_FLOAT (cfg)) {
2423 * If the call has a float argument, we would need to do an r8->r4 conversion using
2424 * an icall, but that cannot be done during the call sequence since it would clobber
2425 * the call registers + the stack. So we do it before emitting the call.
2427 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2429 MonoInst *in = call->args [i];
2431 if (i >= sig->hasthis)
2432 t = sig->params [i - sig->hasthis];
2434 t = &mono_defaults.int_class->byval_arg;
2435 t = mono_type_get_underlying_type (t);
2437 if (!t->byref && t->type == MONO_TYPE_R4) {
2438 MonoInst *iargs [1];
2442 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2444 /* The result will be in an int vreg */
2445 call->args [i] = conv;
2451 call->need_unbox_trampoline = unbox_trampoline;
2454 if (COMPILE_LLVM (cfg))
2455 mono_llvm_emit_call (cfg, call);
2457 mono_arch_emit_call (cfg, call);
2459 mono_arch_emit_call (cfg, call);
2462 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2463 cfg->flags |= MONO_CFG_HAS_CALLS;
2469 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2471 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2472 cfg->uses_rgctx_reg = TRUE;
2473 call->rgctx_reg = TRUE;
2475 call->rgctx_arg_reg = rgctx_reg;
2479 inline static MonoInst*
2480 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2485 gboolean check_sp = FALSE;
2487 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2488 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2490 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2495 rgctx_reg = mono_alloc_preg (cfg);
2496 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2500 if (!cfg->stack_inbalance_var)
2501 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2503 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2504 ins->dreg = cfg->stack_inbalance_var->dreg;
2505 MONO_ADD_INS (cfg->cbb, ins);
2508 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2510 call->inst.sreg1 = addr->dreg;
2513 emit_imt_argument (cfg, call, NULL, imt_arg);
2515 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2520 sp_reg = mono_alloc_preg (cfg);
2522 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2524 MONO_ADD_INS (cfg->cbb, ins);
2526 /* Restore the stack so we don't crash when throwing the exception */
2527 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2528 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2529 MONO_ADD_INS (cfg->cbb, ins);
2531 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2532 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2536 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2538 return (MonoInst*)call;
2542 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2545 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2548 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2549 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2551 #ifndef DISABLE_REMOTING
2552 gboolean might_be_remote = FALSE;
2554 gboolean virtual_ = this_ins != NULL;
2555 gboolean enable_for_aot = TRUE;
2558 MonoInst *call_target = NULL;
2560 gboolean need_unbox_trampoline;
2563 sig = mono_method_signature (method);
2565 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2566 g_assert_not_reached ();
2569 rgctx_reg = mono_alloc_preg (cfg);
2570 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2573 if (method->string_ctor) {
2574 /* Create the real signature */
2575 /* FIXME: Cache these */
2576 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2577 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2582 context_used = mini_method_check_context_used (cfg, method);
2584 #ifndef DISABLE_REMOTING
2585 might_be_remote = this_ins && sig->hasthis &&
2586 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2587 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2589 if (might_be_remote && context_used) {
2592 g_assert (cfg->gshared);
2594 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2596 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2600 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2601 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2603 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2605 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2607 #ifndef DISABLE_REMOTING
2608 if (might_be_remote)
2609 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2612 call->method = method;
2613 call->inst.flags |= MONO_INST_HAS_METHOD;
2614 call->inst.inst_left = this_ins;
2615 call->tail_call = tail;
2618 int vtable_reg, slot_reg, this_reg;
2621 this_reg = this_ins->dreg;
2623 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2624 MonoInst *dummy_use;
2626 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2628 /* Make a call to delegate->invoke_impl */
2629 call->inst.inst_basereg = this_reg;
2630 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2631 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2633 /* We must emit a dummy use here because the delegate trampoline will
2634 replace the 'this' argument with the delegate target making this activation
2635 no longer a root for the delegate.
2636 This is an issue for delegates that target collectible code such as dynamic
2637 methods of GC'able assemblies.
2639 For a test case look into #667921.
2641 FIXME: a dummy use is not the best way to do it as the local register allocator
2642 will put it on a caller save register and spil it around the call.
2643 Ideally, we would either put it on a callee save register or only do the store part.
2645 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2647 return (MonoInst*)call;
2650 if ((!cfg->compile_aot || enable_for_aot) &&
2651 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2652 (MONO_METHOD_IS_FINAL (method) &&
2653 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2654 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2656 * the method is not virtual, we just need to ensure this is not null
2657 * and then we can call the method directly.
2659 #ifndef DISABLE_REMOTING
2660 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2662 * The check above ensures method is not gshared, this is needed since
2663 * gshared methods can't have wrappers.
2665 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2669 if (!method->string_ctor)
2670 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2672 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2673 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2675 * the method is virtual, but we can statically dispatch since either
2676 * it's class or the method itself are sealed.
2677 * But first we need to ensure it's not a null reference.
2679 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2681 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2682 } else if (call_target) {
2683 vtable_reg = alloc_preg (cfg);
2684 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2686 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2687 call->inst.sreg1 = call_target->dreg;
2688 call->inst.flags &= !MONO_INST_HAS_METHOD;
2690 vtable_reg = alloc_preg (cfg);
2691 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2692 if (mono_class_is_interface (method->klass)) {
2693 guint32 imt_slot = mono_method_get_imt_slot (method);
2694 emit_imt_argument (cfg, call, call->method, imt_arg);
2695 slot_reg = vtable_reg;
2696 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2698 slot_reg = vtable_reg;
2699 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2700 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2702 g_assert (mono_method_signature (method)->generic_param_count);
2703 emit_imt_argument (cfg, call, call->method, imt_arg);
2707 call->inst.sreg1 = slot_reg;
2708 call->inst.inst_offset = offset;
2709 call->is_virtual = TRUE;
2713 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2716 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2718 return (MonoInst*)call;
2722 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2724 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2728 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2735 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2738 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2740 return (MonoInst*)call;
2744 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2746 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2750 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2754 * mono_emit_abs_call:
2756 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2758 inline static MonoInst*
2759 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2760 MonoMethodSignature *sig, MonoInst **args)
2762 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2766 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2769 if (cfg->abs_patches == NULL)
2770 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2771 g_hash_table_insert (cfg->abs_patches, ji, ji);
2772 ins = mono_emit_native_call (cfg, ji, sig, args);
2773 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2777 static MonoMethodSignature*
2778 sig_to_rgctx_sig (MonoMethodSignature *sig)
2780 // FIXME: memory allocation
2781 MonoMethodSignature *res;
2784 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2785 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2786 res->param_count = sig->param_count + 1;
2787 for (i = 0; i < sig->param_count; ++i)
2788 res->params [i] = sig->params [i];
2789 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2793 /* Make an indirect call to FSIG passing an additional argument */
2795 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2797 MonoMethodSignature *csig;
2798 MonoInst *args_buf [16];
2800 int i, pindex, tmp_reg;
2802 /* Make a call with an rgctx/extra arg */
2803 if (fsig->param_count + 2 < 16)
2806 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2809 args [pindex ++] = orig_args [0];
2810 for (i = 0; i < fsig->param_count; ++i)
2811 args [pindex ++] = orig_args [fsig->hasthis + i];
2812 tmp_reg = alloc_preg (cfg);
2813 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2814 csig = sig_to_rgctx_sig (fsig);
2815 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2818 /* Emit an indirect call to the function descriptor ADDR */
2820 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2822 int addr_reg, arg_reg;
2823 MonoInst *call_target;
2825 g_assert (cfg->llvm_only);
2828 * addr points to a <addr, arg> pair, load both of them, and
2829 * make a call to addr, passing arg as an extra arg.
2831 addr_reg = alloc_preg (cfg);
2832 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2833 arg_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2836 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2840 direct_icalls_enabled (MonoCompile *cfg)
2844 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2846 if (cfg->compile_llvm && !cfg->llvm_only)
2849 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2855 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2858 * Call the jit icall without a wrapper if possible.
2859 * The wrapper is needed for the following reasons:
2860 * - to handle exceptions thrown using mono_raise_exceptions () from the
2861 * icall function. The EH code needs the lmf frame pushed by the
2862 * wrapper to be able to unwind back to managed code.
2863 * - to be able to do stack walks for asynchronously suspended
2864 * threads when debugging.
2866 if (info->no_raise && direct_icalls_enabled (cfg)) {
2870 if (!info->wrapper_method) {
2871 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2872 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2874 mono_memory_barrier ();
2878 * Inline the wrapper method, which is basically a call to the C icall, and
2879 * an exception check.
2881 costs = inline_method (cfg, info->wrapper_method, NULL,
2882 args, NULL, il_offset, TRUE);
2883 g_assert (costs > 0);
2884 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2888 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2893 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2895 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2896 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2900 * Native code might return non register sized integers
2901 * without initializing the upper bits.
2903 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2904 case OP_LOADI1_MEMBASE:
2905 widen_op = OP_ICONV_TO_I1;
2907 case OP_LOADU1_MEMBASE:
2908 widen_op = OP_ICONV_TO_U1;
2910 case OP_LOADI2_MEMBASE:
2911 widen_op = OP_ICONV_TO_I2;
2913 case OP_LOADU2_MEMBASE:
2914 widen_op = OP_ICONV_TO_U2;
2920 if (widen_op != -1) {
2921 int dreg = alloc_preg (cfg);
2924 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2925 widen->type = ins->type;
2936 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2938 MonoInst *args [16];
2940 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2941 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2943 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2947 get_memcpy_method (void)
2949 static MonoMethod *memcpy_method = NULL;
2950 if (!memcpy_method) {
2951 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2953 g_error ("Old corlib found. Install a new one");
2955 return memcpy_method;
2959 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2961 MonoClassField *field;
2962 gpointer iter = NULL;
2964 while ((field = mono_class_get_fields (klass, &iter))) {
2967 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2969 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2970 if (mini_type_is_reference (mono_field_get_type (field))) {
2971 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2972 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2974 MonoClass *field_class = mono_class_from_mono_type (field->type);
2975 if (field_class->has_references)
2976 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2982 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2984 int card_table_shift_bits;
2985 gpointer card_table_mask;
2987 MonoInst *dummy_use;
2988 int nursery_shift_bits;
2989 size_t nursery_size;
2991 if (!cfg->gen_write_barriers)
2994 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2996 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2998 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3001 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3002 wbarrier->sreg1 = ptr->dreg;
3003 wbarrier->sreg2 = value->dreg;
3004 MONO_ADD_INS (cfg->cbb, wbarrier);
3005 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3006 int offset_reg = alloc_preg (cfg);
3010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3011 if (card_table_mask)
3012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3014 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3015 * IMM's larger than 32bits.
3017 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3018 card_reg = ins->dreg;
3020 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3021 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3023 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3024 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3027 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3031 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3033 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3034 unsigned need_wb = 0;
3039 /*types with references can't have alignment smaller than sizeof(void*) */
3040 if (align < SIZEOF_VOID_P)
3043 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3044 if (size > 32 * SIZEOF_VOID_P)
3047 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3049 /* We don't unroll more than 5 stores to avoid code bloat. */
3050 if (size > 5 * SIZEOF_VOID_P) {
3051 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3052 size += (SIZEOF_VOID_P - 1);
3053 size &= ~(SIZEOF_VOID_P - 1);
3055 EMIT_NEW_ICONST (cfg, iargs [2], size);
3056 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3057 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3061 destreg = iargs [0]->dreg;
3062 srcreg = iargs [1]->dreg;
3065 dest_ptr_reg = alloc_preg (cfg);
3066 tmp_reg = alloc_preg (cfg);
3069 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3071 while (size >= SIZEOF_VOID_P) {
3072 MonoInst *load_inst;
3073 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3074 load_inst->dreg = tmp_reg;
3075 load_inst->inst_basereg = srcreg;
3076 load_inst->inst_offset = offset;
3077 MONO_ADD_INS (cfg->cbb, load_inst);
3079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3082 emit_write_barrier (cfg, iargs [0], load_inst);
3084 offset += SIZEOF_VOID_P;
3085 size -= SIZEOF_VOID_P;
3088 /*tmp += sizeof (void*)*/
3089 if (size >= SIZEOF_VOID_P) {
3090 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3091 MONO_ADD_INS (cfg->cbb, iargs [0]);
3095 /* Those cannot be references since size < sizeof (void*) */
3097 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3098 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3112 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3121 * Emit code to copy a valuetype of type @klass whose address is stored in
3122 * @src->dreg to memory whose address is stored at @dest->dreg.
3125 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3127 MonoInst *iargs [4];
3130 MonoMethod *memcpy_method;
3131 MonoInst *size_ins = NULL;
3132 MonoInst *memcpy_ins = NULL;
3136 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3139 * This check breaks with spilled vars... need to handle it during verification anyway.
3140 * g_assert (klass && klass == src->klass && klass == dest->klass);
3143 if (mini_is_gsharedvt_klass (klass)) {
3145 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3146 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3150 n = mono_class_native_size (klass, &align);
3152 n = mono_class_value_size (klass, &align);
3154 /* if native is true there should be no references in the struct */
3155 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3156 /* Avoid barriers when storing to the stack */
3157 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3158 (dest->opcode == OP_LDADDR))) {
3164 context_used = mini_class_check_context_used (cfg, klass);
3166 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3167 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3169 } else if (context_used) {
3170 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3172 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3173 if (!cfg->compile_aot)
3174 mono_class_compute_gc_descriptor (klass);
3178 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3180 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3185 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3186 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3187 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3192 iargs [2] = size_ins;
3194 EMIT_NEW_ICONST (cfg, iargs [2], n);
3196 memcpy_method = get_memcpy_method ();
3198 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3200 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3205 get_memset_method (void)
3207 static MonoMethod *memset_method = NULL;
3208 if (!memset_method) {
3209 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3211 g_error ("Old corlib found. Install a new one");
3213 return memset_method;
3217 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3219 MonoInst *iargs [3];
3222 MonoMethod *memset_method;
3223 MonoInst *size_ins = NULL;
3224 MonoInst *bzero_ins = NULL;
3225 static MonoMethod *bzero_method;
3227 /* FIXME: Optimize this for the case when dest is an LDADDR */
3228 mono_class_init (klass);
3229 if (mini_is_gsharedvt_klass (klass)) {
3230 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3231 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3233 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3234 g_assert (bzero_method);
3236 iargs [1] = size_ins;
3237 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3241 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3243 n = mono_class_value_size (klass, &align);
3245 if (n <= sizeof (gpointer) * 8) {
3246 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3249 memset_method = get_memset_method ();
3251 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3252 EMIT_NEW_ICONST (cfg, iargs [2], n);
3253 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3260 * Emit IR to return either the this pointer for instance method,
3261 * or the mrgctx for static methods.
3264 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3266 MonoInst *this_ins = NULL;
3268 g_assert (cfg->gshared);
3270 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3271 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3272 !method->klass->valuetype)
3273 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3275 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3276 MonoInst *mrgctx_loc, *mrgctx_var;
3278 g_assert (!this_ins);
3279 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3281 mrgctx_loc = mono_get_vtable_var (cfg);
3282 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3285 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3286 MonoInst *vtable_loc, *vtable_var;
3288 g_assert (!this_ins);
3290 vtable_loc = mono_get_vtable_var (cfg);
3291 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3293 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3294 MonoInst *mrgctx_var = vtable_var;
3297 vtable_reg = alloc_preg (cfg);
3298 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3299 vtable_var->type = STACK_PTR;
3307 vtable_reg = alloc_preg (cfg);
3308 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3313 static MonoJumpInfoRgctxEntry *
3314 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3316 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3317 res->method = method;
3318 res->in_mrgctx = in_mrgctx;
3319 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3320 res->data->type = patch_type;
3321 res->data->data.target = patch_data;
3322 res->info_type = info_type;
3327 static inline MonoInst*
3328 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3330 MonoInst *args [16];
3333 // FIXME: No fastpath since the slot is not a compile time constant
3335 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3336 if (entry->in_mrgctx)
3337 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3339 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3343 * FIXME: This can be called during decompose, which is a problem since it creates
3345 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3347 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3349 MonoBasicBlock *is_null_bb, *end_bb;
3350 MonoInst *res, *ins, *call;
3353 slot = mini_get_rgctx_entry_slot (entry);
3355 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3356 index = MONO_RGCTX_SLOT_INDEX (slot);
3358 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3359 for (depth = 0; ; ++depth) {
3360 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3362 if (index < size - 1)
3367 NEW_BBLOCK (cfg, end_bb);
3368 NEW_BBLOCK (cfg, is_null_bb);
3371 rgctx_reg = rgctx->dreg;
3373 rgctx_reg = alloc_preg (cfg);
3375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3376 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3377 NEW_BBLOCK (cfg, is_null_bb);
3379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3380 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3383 for (i = 0; i < depth; ++i) {
3384 int array_reg = alloc_preg (cfg);
3386 /* load ptr to next array */
3387 if (mrgctx && i == 0)
3388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3391 rgctx_reg = array_reg;
3392 /* is the ptr null? */
3393 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3394 /* if yes, jump to actual trampoline */
3395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3399 val_reg = alloc_preg (cfg);
3400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3401 /* is the slot null? */
3402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3403 /* if yes, jump to actual trampoline */
3404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3407 res_reg = alloc_preg (cfg);
3408 MONO_INST_NEW (cfg, ins, OP_MOVE);
3409 ins->dreg = res_reg;
3410 ins->sreg1 = val_reg;
3411 MONO_ADD_INS (cfg->cbb, ins);
3413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3416 MONO_START_BB (cfg, is_null_bb);
3418 EMIT_NEW_ICONST (cfg, args [1], index);
3420 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3422 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3423 MONO_INST_NEW (cfg, ins, OP_MOVE);
3424 ins->dreg = res_reg;
3425 ins->sreg1 = call->dreg;
3426 MONO_ADD_INS (cfg->cbb, ins);
3427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3429 MONO_START_BB (cfg, end_bb);
3438 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3441 static inline MonoInst*
3442 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3445 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3447 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3451 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3452 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3454 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3455 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3457 return emit_rgctx_fetch (cfg, rgctx, entry);
3461 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3462 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3464 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3465 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3467 return emit_rgctx_fetch (cfg, rgctx, entry);
3471 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3472 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3474 MonoJumpInfoGSharedVtCall *call_info;
3475 MonoJumpInfoRgctxEntry *entry;
3478 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3479 call_info->sig = sig;
3480 call_info->method = cmethod;
3482 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3483 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3485 return emit_rgctx_fetch (cfg, rgctx, entry);
3489 * emit_get_rgctx_virt_method:
3491 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3494 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3495 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3497 MonoJumpInfoVirtMethod *info;
3498 MonoJumpInfoRgctxEntry *entry;
3501 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3502 info->klass = klass;
3503 info->method = virt_method;
3505 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3506 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3508 return emit_rgctx_fetch (cfg, rgctx, entry);
3512 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3513 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3515 MonoJumpInfoRgctxEntry *entry;
3518 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3519 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3521 return emit_rgctx_fetch (cfg, rgctx, entry);
3525 * emit_get_rgctx_method:
3527 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3528 * normal constants, else emit a load from the rgctx.
3531 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3532 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3534 if (!context_used) {
3537 switch (rgctx_type) {
3538 case MONO_RGCTX_INFO_METHOD:
3539 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3541 case MONO_RGCTX_INFO_METHOD_RGCTX:
3542 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3545 g_assert_not_reached ();
3548 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3549 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3551 return emit_rgctx_fetch (cfg, rgctx, entry);
3556 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3557 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3559 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3560 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3562 return emit_rgctx_fetch (cfg, rgctx, entry);
3566 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3568 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3569 MonoRuntimeGenericContextInfoTemplate *template_;
3574 for (i = 0; i < info->num_entries; ++i) {
3575 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3577 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3581 if (info->num_entries == info->count_entries) {
3582 MonoRuntimeGenericContextInfoTemplate *new_entries;
3583 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3585 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3587 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3588 info->entries = new_entries;
3589 info->count_entries = new_count_entries;
3592 idx = info->num_entries;
3593 template_ = &info->entries [idx];
3594 template_->info_type = rgctx_type;
3595 template_->data = data;
3597 info->num_entries ++;
3603 * emit_get_gsharedvt_info:
3605 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3608 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3613 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3614 /* Load info->entries [idx] */
3615 dreg = alloc_preg (cfg);
3616 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3622 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3624 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3628 * On return the caller must check @klass for load errors.
3631 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3633 MonoInst *vtable_arg;
3636 context_used = mini_class_check_context_used (cfg, klass);
3639 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3640 klass, MONO_RGCTX_INFO_VTABLE);
3642 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3646 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3649 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3653 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3654 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3656 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3657 ins->sreg1 = vtable_arg->dreg;
3658 MONO_ADD_INS (cfg->cbb, ins);
3661 MonoBasicBlock *inited_bb;
3662 MonoInst *args [16];
3664 inited_reg = alloc_ireg (cfg);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3668 NEW_BBLOCK (cfg, inited_bb);
3670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3673 args [0] = vtable_arg;
3674 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3676 MONO_START_BB (cfg, inited_bb);
3681 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3685 if (cfg->gen_seq_points && cfg->method == method) {
3686 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3688 ins->flags |= MONO_INST_NONEMPTY_STACK;
3689 MONO_ADD_INS (cfg->cbb, ins);
3694 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3696 if (mini_get_debug_options ()->better_cast_details) {
3697 int vtable_reg = alloc_preg (cfg);
3698 int klass_reg = alloc_preg (cfg);
3699 MonoBasicBlock *is_null_bb = NULL;
3701 int to_klass_reg, context_used;
3704 NEW_BBLOCK (cfg, is_null_bb);
3706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3710 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3712 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3721 context_used = mini_class_check_context_used (cfg, klass);
3723 MonoInst *class_ins;
3725 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3726 to_klass_reg = class_ins->dreg;
3728 to_klass_reg = alloc_preg (cfg);
3729 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3734 MONO_START_BB (cfg, is_null_bb);
3739 mini_reset_cast_details (MonoCompile *cfg)
3741 /* Reset the variables holding the cast details */
3742 if (mini_get_debug_options ()->better_cast_details) {
3743 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3744 /* It is enough to reset the from field */
3745 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3750 * On return the caller must check @array_class for load errors
3753 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3755 int vtable_reg = alloc_preg (cfg);
3758 context_used = mini_class_check_context_used (cfg, array_class);
3760 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3762 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3764 if (cfg->opt & MONO_OPT_SHARED) {
3765 int class_reg = alloc_preg (cfg);
3768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3769 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3770 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3771 } else if (context_used) {
3772 MonoInst *vtable_ins;
3774 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3775 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3777 if (cfg->compile_aot) {
3781 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3783 vt_reg = alloc_preg (cfg);
3784 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3785 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3788 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3794 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3796 mini_reset_cast_details (cfg);
3800 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3801 * generic code is generated.
3804 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3806 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3809 MonoInst *rgctx, *addr;
3811 /* FIXME: What if the class is shared? We might not
3812 have to get the address of the method from the
3814 addr = emit_get_rgctx_method (cfg, context_used, method,
3815 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3816 if (cfg->llvm_only) {
3817 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3818 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3820 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3822 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3825 gboolean pass_vtable, pass_mrgctx;
3826 MonoInst *rgctx_arg = NULL;
3828 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3829 g_assert (!pass_mrgctx);
3832 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3835 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3838 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3843 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3847 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3848 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3849 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3850 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3852 obj_reg = sp [0]->dreg;
3853 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3854 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3856 /* FIXME: generics */
3857 g_assert (klass->rank == 0);
3860 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3861 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3863 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3867 MonoInst *element_class;
3869 /* This assertion is from the unboxcast insn */
3870 g_assert (klass->rank == 0);
3872 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3873 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3875 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3876 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3878 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3879 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3880 mini_reset_cast_details (cfg);
3883 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3884 MONO_ADD_INS (cfg->cbb, add);
3885 add->type = STACK_MP;
3892 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3894 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3895 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3899 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3905 args [1] = klass_inst;
3908 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3910 NEW_BBLOCK (cfg, is_ref_bb);
3911 NEW_BBLOCK (cfg, is_nullable_bb);
3912 NEW_BBLOCK (cfg, end_bb);
3913 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3917 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3918 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3920 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3921 addr_reg = alloc_dreg (cfg, STACK_MP);
3925 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3926 MONO_ADD_INS (cfg->cbb, addr);
3928 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3931 MONO_START_BB (cfg, is_ref_bb);
3933 /* Save the ref to a temporary */
3934 dreg = alloc_ireg (cfg);
3935 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3936 addr->dreg = addr_reg;
3937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3941 MONO_START_BB (cfg, is_nullable_bb);
3944 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3945 MonoInst *unbox_call;
3946 MonoMethodSignature *unbox_sig;
3948 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3949 unbox_sig->ret = &klass->byval_arg;
3950 unbox_sig->param_count = 1;
3951 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3954 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3956 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3958 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3959 addr->dreg = addr_reg;
3962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3965 MONO_START_BB (cfg, end_bb);
3968 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3974 * Returns NULL and set the cfg exception on error.
3977 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3979 MonoInst *iargs [2];
3984 MonoRgctxInfoType rgctx_info;
3985 MonoInst *iargs [2];
3986 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3988 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3990 if (cfg->opt & MONO_OPT_SHARED)
3991 rgctx_info = MONO_RGCTX_INFO_KLASS;
3993 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3994 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3996 if (cfg->opt & MONO_OPT_SHARED) {
3997 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3999 alloc_ftn = ves_icall_object_new;
4002 alloc_ftn = ves_icall_object_new_specific;
4005 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4006 if (known_instance_size) {
4007 int size = mono_class_instance_size (klass);
4008 if (size < sizeof (MonoObject))
4009 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4011 EMIT_NEW_ICONST (cfg, iargs [1], size);
4013 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4016 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4019 if (cfg->opt & MONO_OPT_SHARED) {
4020 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4021 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4023 alloc_ftn = ves_icall_object_new;
4024 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4025 /* This happens often in argument checking code, eg. throw new FooException... */
4026 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4027 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4028 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4030 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4031 MonoMethod *managed_alloc = NULL;
4035 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4036 cfg->exception_ptr = klass;
4040 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4042 if (managed_alloc) {
4043 int size = mono_class_instance_size (klass);
4044 if (size < sizeof (MonoObject))
4045 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4047 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4048 EMIT_NEW_ICONST (cfg, iargs [1], size);
4049 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4051 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4053 guint32 lw = vtable->klass->instance_size;
4054 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4055 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4056 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4059 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4063 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4067 * Returns NULL and set the cfg exception on error.
4070 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4072 MonoInst *alloc, *ins;
4074 if (mono_class_is_nullable (klass)) {
4075 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4078 if (cfg->llvm_only && cfg->gsharedvt) {
4079 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4080 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4081 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4083 /* FIXME: What if the class is shared? We might not
4084 have to get the method address from the RGCTX. */
4085 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4086 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4087 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4089 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4092 gboolean pass_vtable, pass_mrgctx;
4093 MonoInst *rgctx_arg = NULL;
4095 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4096 g_assert (!pass_mrgctx);
4099 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4102 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4105 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4109 if (mini_is_gsharedvt_klass (klass)) {
4110 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4111 MonoInst *res, *is_ref, *src_var, *addr;
4114 dreg = alloc_ireg (cfg);
4116 NEW_BBLOCK (cfg, is_ref_bb);
4117 NEW_BBLOCK (cfg, is_nullable_bb);
4118 NEW_BBLOCK (cfg, end_bb);
4119 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4127 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4130 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4131 ins->opcode = OP_STOREV_MEMBASE;
4133 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4134 res->type = STACK_OBJ;
4136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4139 MONO_START_BB (cfg, is_ref_bb);
4141 /* val is a vtype, so has to load the value manually */
4142 src_var = get_vreg_to_inst (cfg, val->dreg);
4144 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4145 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4147 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4150 MONO_START_BB (cfg, is_nullable_bb);
4153 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4154 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4156 MonoMethodSignature *box_sig;
4159 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4160 * construct that method at JIT time, so have to do things by hand.
4162 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4163 box_sig->ret = &mono_defaults.object_class->byval_arg;
4164 box_sig->param_count = 1;
4165 box_sig->params [0] = &klass->byval_arg;
4168 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4170 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4171 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4172 res->type = STACK_OBJ;
4176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4178 MONO_START_BB (cfg, end_bb);
4182 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4186 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4191 static GHashTable* direct_icall_type_hash;
4194 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4196 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4197 if (!direct_icalls_enabled (cfg))
4201 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4202 * Whitelist a few icalls for now.
4204 if (!direct_icall_type_hash) {
4205 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4207 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4208 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4209 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4210 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4211 mono_memory_barrier ();
4212 direct_icall_type_hash = h;
4215 if (cmethod->klass == mono_defaults.math_class)
4217 /* No locking needed */
4218 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4224 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4226 if (cmethod->klass == mono_defaults.systemtype_class) {
4227 if (!strcmp (cmethod->name, "GetType"))
4233 static G_GNUC_UNUSED MonoInst*
4234 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4236 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4237 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4240 switch (enum_type->type) {
4243 #if SIZEOF_REGISTER == 8
4255 MonoInst *load, *and_, *cmp, *ceq;
4256 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4257 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4258 int dest_reg = alloc_ireg (cfg);
4260 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4261 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4262 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4263 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4265 ceq->type = STACK_I4;
4268 load = mono_decompose_opcode (cfg, load);
4269 and_ = mono_decompose_opcode (cfg, and_);
4270 cmp = mono_decompose_opcode (cfg, cmp);
4271 ceq = mono_decompose_opcode (cfg, ceq);
4279 * Returns NULL and set the cfg exception on error.
4281 static G_GNUC_UNUSED MonoInst*
4282 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4286 gpointer trampoline;
4287 MonoInst *obj, *method_ins, *tramp_ins;
4291 if (virtual_ && !cfg->llvm_only) {
4292 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4295 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4299 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4303 /* Inline the contents of mono_delegate_ctor */
4305 /* Set target field */
4306 /* Optimize away setting of NULL target */
4307 if (!MONO_INS_IS_PCONST_NULL (target)) {
4308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4309 if (cfg->gen_write_barriers) {
4310 dreg = alloc_preg (cfg);
4311 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4312 emit_write_barrier (cfg, ptr, target);
4316 /* Set method field */
4317 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4321 * To avoid looking up the compiled code belonging to the target method
4322 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4323 * store it, and we fill it after the method has been compiled.
4325 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4326 MonoInst *code_slot_ins;
4329 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4331 domain = mono_domain_get ();
4332 mono_domain_lock (domain);
4333 if (!domain_jit_info (domain)->method_code_hash)
4334 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4335 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4337 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4338 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4340 mono_domain_unlock (domain);
4342 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4344 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4347 if (cfg->llvm_only) {
4348 MonoInst *args [16];
4353 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4354 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4357 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4363 if (cfg->compile_aot) {
4364 MonoDelegateClassMethodPair *del_tramp;
4366 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4367 del_tramp->klass = klass;
4368 del_tramp->method = context_used ? NULL : method;
4369 del_tramp->is_virtual = virtual_;
4370 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4373 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4375 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4376 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4379 /* Set invoke_impl field */
4381 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4383 dreg = alloc_preg (cfg);
4384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4385 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4387 dreg = alloc_preg (cfg);
4388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4392 dreg = alloc_preg (cfg);
4393 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4396 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4402 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4404 MonoJitICallInfo *info;
4406 /* Need to register the icall so it gets an icall wrapper */
4407 info = mono_get_array_new_va_icall (rank);
4409 cfg->flags |= MONO_CFG_HAS_VARARGS;
4411 /* mono_array_new_va () needs a vararg calling convention */
4412 cfg->exception_message = g_strdup ("array-new");
4413 cfg->disable_llvm = TRUE;
4415 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4416 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4420 * handle_constrained_gsharedvt_call:
4422 * Handle constrained calls where the receiver is a gsharedvt type.
4423 * Return the instruction representing the call. Set the cfg exception on failure.
4426 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4427 gboolean *ref_emit_widen)
4429 MonoInst *ins = NULL;
4430 gboolean emit_widen = *ref_emit_widen;
4433 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4434 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4435 * pack the arguments into an array, and do the rest of the work in in an icall.
4437 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4438 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4439 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4440 MonoInst *args [16];
4443 * This case handles calls to
4444 * - object:ToString()/Equals()/GetHashCode(),
4445 * - System.IComparable<T>:CompareTo()
4446 * - System.IEquatable<T>:Equals ()
4447 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4451 if (mono_method_check_context_used (cmethod))
4452 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4454 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4455 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4457 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4458 if (fsig->hasthis && fsig->param_count) {
4459 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4460 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4461 ins->dreg = alloc_preg (cfg);
4462 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4463 MONO_ADD_INS (cfg->cbb, ins);
4466 if (mini_is_gsharedvt_type (fsig->params [0])) {
4467 int addr_reg, deref_arg_reg;
4469 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4470 deref_arg_reg = alloc_preg (cfg);
4471 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4472 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4474 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4475 addr_reg = ins->dreg;
4476 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4478 EMIT_NEW_ICONST (cfg, args [3], 0);
4479 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4482 EMIT_NEW_ICONST (cfg, args [3], 0);
4483 EMIT_NEW_ICONST (cfg, args [4], 0);
4485 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4488 if (mini_is_gsharedvt_type (fsig->ret)) {
4489 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4490 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4494 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4495 MONO_ADD_INS (cfg->cbb, add);
4497 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4498 MONO_ADD_INS (cfg->cbb, ins);
4499 /* ins represents the call result */
4502 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4505 *ref_emit_widen = emit_widen;
4514 mono_emit_load_got_addr (MonoCompile *cfg)
4516 MonoInst *getaddr, *dummy_use;
4518 if (!cfg->got_var || cfg->got_var_allocated)
4521 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4522 getaddr->cil_code = cfg->header->code;
4523 getaddr->dreg = cfg->got_var->dreg;
4525 /* Add it to the start of the first bblock */
4526 if (cfg->bb_entry->code) {
4527 getaddr->next = cfg->bb_entry->code;
4528 cfg->bb_entry->code = getaddr;
4531 MONO_ADD_INS (cfg->bb_entry, getaddr);
4533 cfg->got_var_allocated = TRUE;
4536 * Add a dummy use to keep the got_var alive, since real uses might
4537 * only be generated by the back ends.
4538 * Add it to end_bblock, so the variable's lifetime covers the whole
4540 * It would be better to make the usage of the got var explicit in all
4541 * cases when the backend needs it (i.e. calls, throw etc.), so this
4542 * wouldn't be needed.
4544 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4545 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4548 static int inline_limit;
4549 static gboolean inline_limit_inited;
4552 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4554 MonoMethodHeaderSummary header;
4556 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4557 MonoMethodSignature *sig = mono_method_signature (method);
4561 if (cfg->disable_inline)
4566 if (cfg->inline_depth > 10)
4569 if (!mono_method_get_header_summary (method, &header))
4572 /*runtime, icall and pinvoke are checked by summary call*/
4573 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4574 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4575 (mono_class_is_marshalbyref (method->klass)) ||
4579 /* also consider num_locals? */
4580 /* Do the size check early to avoid creating vtables */
4581 if (!inline_limit_inited) {
4582 if (g_getenv ("MONO_INLINELIMIT"))
4583 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4585 inline_limit = INLINE_LENGTH_LIMIT;
4586 inline_limit_inited = TRUE;
4588 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4592 * if we can initialize the class of the method right away, we do,
4593 * otherwise we don't allow inlining if the class needs initialization,
4594 * since it would mean inserting a call to mono_runtime_class_init()
4595 * inside the inlined code
4597 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4600 if (!(cfg->opt & MONO_OPT_SHARED)) {
4601 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4602 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4603 if (method->klass->has_cctor) {
4604 vtable = mono_class_vtable (cfg->domain, method->klass);
4607 if (!cfg->compile_aot) {
4609 if (!mono_runtime_class_init_full (vtable, &error)) {
4610 mono_error_cleanup (&error);
4615 } else if (mono_class_is_before_field_init (method->klass)) {
4616 if (cfg->run_cctors && method->klass->has_cctor) {
4617 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4618 if (!method->klass->runtime_info)
4619 /* No vtable created yet */
4621 vtable = mono_class_vtable (cfg->domain, method->klass);
4624 /* This makes so that inline cannot trigger */
4625 /* .cctors: too many apps depend on them */
4626 /* running with a specific order... */
4627 if (! vtable->initialized)
4630 if (!mono_runtime_class_init_full (vtable, &error)) {
4631 mono_error_cleanup (&error);
4635 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4636 if (!method->klass->runtime_info)
4637 /* No vtable created yet */
4639 vtable = mono_class_vtable (cfg->domain, method->klass);
4642 if (!vtable->initialized)
4647 * If we're compiling for shared code
4648 * the cctor will need to be run at aot method load time, for example,
4649 * or at the end of the compilation of the inlining method.
4651 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4655 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4656 if (mono_arch_is_soft_float ()) {
4658 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4660 for (i = 0; i < sig->param_count; ++i)
4661 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4666 if (g_list_find (cfg->dont_inline, method))
4673 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4675 if (!cfg->compile_aot) {
4677 if (vtable->initialized)
4681 if (mono_class_is_before_field_init (klass)) {
4682 if (cfg->method == method)
4686 if (!mono_class_needs_cctor_run (klass, method))
4689 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4690 /* The initialization is already done before the method is called */
4697 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4701 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4704 if (mini_is_gsharedvt_variable_klass (klass)) {
4707 mono_class_init (klass);
4708 size = mono_class_array_element_size (klass);
4711 mult_reg = alloc_preg (cfg);
4712 array_reg = arr->dreg;
4713 index_reg = index->dreg;
4715 #if SIZEOF_REGISTER == 8
4716 /* The array reg is 64 bits but the index reg is only 32 */
4717 if (COMPILE_LLVM (cfg)) {
4719 index2_reg = index_reg;
4721 index2_reg = alloc_preg (cfg);
4722 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4725 if (index->type == STACK_I8) {
4726 index2_reg = alloc_preg (cfg);
4727 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4729 index2_reg = index_reg;
4734 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4736 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4737 if (size == 1 || size == 2 || size == 4 || size == 8) {
4738 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4740 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4741 ins->klass = mono_class_get_element_class (klass);
4742 ins->type = STACK_MP;
4748 add_reg = alloc_ireg_mp (cfg);
4751 MonoInst *rgctx_ins;
4754 g_assert (cfg->gshared);
4755 context_used = mini_class_check_context_used (cfg, klass);
4756 g_assert (context_used);
4757 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4758 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4762 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4763 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4764 ins->klass = mono_class_get_element_class (klass);
4765 ins->type = STACK_MP;
4766 MONO_ADD_INS (cfg->cbb, ins);
4772 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4774 int bounds_reg = alloc_preg (cfg);
4775 int add_reg = alloc_ireg_mp (cfg);
4776 int mult_reg = alloc_preg (cfg);
4777 int mult2_reg = alloc_preg (cfg);
4778 int low1_reg = alloc_preg (cfg);
4779 int low2_reg = alloc_preg (cfg);
4780 int high1_reg = alloc_preg (cfg);
4781 int high2_reg = alloc_preg (cfg);
4782 int realidx1_reg = alloc_preg (cfg);
4783 int realidx2_reg = alloc_preg (cfg);
4784 int sum_reg = alloc_preg (cfg);
4785 int index1, index2, tmpreg;
4789 mono_class_init (klass);
4790 size = mono_class_array_element_size (klass);
4792 index1 = index_ins1->dreg;
4793 index2 = index_ins2->dreg;
4795 #if SIZEOF_REGISTER == 8
4796 /* The array reg is 64 bits but the index reg is only 32 */
4797 if (COMPILE_LLVM (cfg)) {
4800 tmpreg = alloc_preg (cfg);
4801 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4803 tmpreg = alloc_preg (cfg);
4804 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4808 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4812 /* range checking */
4813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4814 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4816 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4817 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4818 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4820 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4821 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4822 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4825 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4826 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4828 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4830 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4832 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4833 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4835 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4836 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4838 ins->type = STACK_MP;
4840 MONO_ADD_INS (cfg->cbb, ins);
4846 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4850 MonoMethod *addr_method;
4852 MonoClass *eclass = cmethod->klass->element_class;
4854 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4857 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4859 /* emit_ldelema_2 depends on OP_LMUL */
4860 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4861 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4864 if (mini_is_gsharedvt_variable_klass (eclass))
4867 element_size = mono_class_array_element_size (eclass);
4868 addr_method = mono_marshal_get_array_address (rank, element_size);
4869 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4874 static MonoBreakPolicy
4875 always_insert_breakpoint (MonoMethod *method)
4877 return MONO_BREAK_POLICY_ALWAYS;
4880 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4883 * mono_set_break_policy:
4884 * policy_callback: the new callback function
4886 * Allow embedders to decide wherther to actually obey breakpoint instructions
4887 * (both break IL instructions and Debugger.Break () method calls), for example
4888 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4889 * untrusted or semi-trusted code.
4891 * @policy_callback will be called every time a break point instruction needs to
4892 * be inserted with the method argument being the method that calls Debugger.Break()
4893 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4894 * if it wants the breakpoint to not be effective in the given method.
4895 * #MONO_BREAK_POLICY_ALWAYS is the default.
4898 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4900 if (policy_callback)
4901 break_policy_func = policy_callback;
4903 break_policy_func = always_insert_breakpoint;
4907 should_insert_brekpoint (MonoMethod *method) {
4908 switch (break_policy_func (method)) {
4909 case MONO_BREAK_POLICY_ALWAYS:
4911 case MONO_BREAK_POLICY_NEVER:
4913 case MONO_BREAK_POLICY_ON_DBG:
4914 g_warning ("mdb no longer supported");
4917 g_warning ("Incorrect value returned from break policy callback");
4922 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4924 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4926 MonoInst *addr, *store, *load;
4927 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4929 /* the bounds check is already done by the callers */
4930 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4932 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4933 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4934 if (mini_type_is_reference (&eklass->byval_arg))
4935 emit_write_barrier (cfg, addr, load);
4937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4938 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4945 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4947 return mini_type_is_reference (&klass->byval_arg);
4951 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4953 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4954 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4955 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4956 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4957 MonoInst *iargs [3];
4960 mono_class_setup_vtable (obj_array);
4961 g_assert (helper->slot);
4963 if (sp [0]->type != STACK_OBJ)
4965 if (sp [2]->type != STACK_OBJ)
4972 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4976 if (mini_is_gsharedvt_variable_klass (klass)) {
4979 // FIXME-VT: OP_ICONST optimization
4980 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4981 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4982 ins->opcode = OP_STOREV_MEMBASE;
4983 } else if (sp [1]->opcode == OP_ICONST) {
4984 int array_reg = sp [0]->dreg;
4985 int index_reg = sp [1]->dreg;
4986 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4988 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4989 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4992 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4993 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4995 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4996 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4997 if (generic_class_is_reference_type (cfg, klass))
4998 emit_write_barrier (cfg, addr, sp [2]);
5005 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5010 eklass = mono_class_from_mono_type (fsig->params [2]);
5012 eklass = mono_class_from_mono_type (fsig->ret);
5015 return emit_array_store (cfg, eklass, args, FALSE);
5017 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5018 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5024 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5027 int param_size, return_size;
5029 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5030 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5032 if (cfg->verbose_level > 3)
5033 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5035 //Don't allow mixing reference types with value types
5036 if (param_klass->valuetype != return_klass->valuetype) {
5037 if (cfg->verbose_level > 3)
5038 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5042 if (!param_klass->valuetype) {
5043 if (cfg->verbose_level > 3)
5044 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5049 if (param_klass->has_references || return_klass->has_references)
5052 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5053 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5054 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5055 if (cfg->verbose_level > 3)
5056 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5060 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5061 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5062 if (cfg->verbose_level > 3)
5063 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5067 param_size = mono_class_value_size (param_klass, &align);
5068 return_size = mono_class_value_size (return_klass, &align);
5070 //We can do it if sizes match
5071 if (param_size == return_size) {
5072 if (cfg->verbose_level > 3)
5073 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5077 //No simple way to handle struct if sizes don't match
5078 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5079 if (cfg->verbose_level > 3)
5080 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5085 * Same reg size category.
5086 * A quick note on why we don't require widening here.
5087 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5089 * Since the source value comes from a function argument, the JIT will already have
5090 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5092 if (param_size <= 4 && return_size <= 4) {
5093 if (cfg->verbose_level > 3)
5094 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5102 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5104 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5105 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5107 if (mini_is_gsharedvt_variable_type (fsig->ret))
5110 //Valuetypes that are semantically equivalent or numbers than can be widened to
5111 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5114 //Arrays of valuetypes that are semantically equivalent
5115 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5122 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5124 #ifdef MONO_ARCH_SIMD_INTRINSICS
5125 MonoInst *ins = NULL;
5127 if (cfg->opt & MONO_OPT_SIMD) {
5128 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5134 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5138 emit_memory_barrier (MonoCompile *cfg, int kind)
5140 MonoInst *ins = NULL;
5141 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5142 MONO_ADD_INS (cfg->cbb, ins);
5143 ins->backend.memory_barrier_kind = kind;
5149 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5151 MonoInst *ins = NULL;
5154 /* The LLVM backend supports these intrinsics */
5155 if (cmethod->klass == mono_defaults.math_class) {
5156 if (strcmp (cmethod->name, "Sin") == 0) {
5158 } else if (strcmp (cmethod->name, "Cos") == 0) {
5160 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5162 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5166 if (opcode && fsig->param_count == 1) {
5167 MONO_INST_NEW (cfg, ins, opcode);
5168 ins->type = STACK_R8;
5169 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5170 ins->sreg1 = args [0]->dreg;
5171 MONO_ADD_INS (cfg->cbb, ins);
5175 if (cfg->opt & MONO_OPT_CMOV) {
5176 if (strcmp (cmethod->name, "Min") == 0) {
5177 if (fsig->params [0]->type == MONO_TYPE_I4)
5179 if (fsig->params [0]->type == MONO_TYPE_U4)
5180 opcode = OP_IMIN_UN;
5181 else if (fsig->params [0]->type == MONO_TYPE_I8)
5183 else if (fsig->params [0]->type == MONO_TYPE_U8)
5184 opcode = OP_LMIN_UN;
5185 } else if (strcmp (cmethod->name, "Max") == 0) {
5186 if (fsig->params [0]->type == MONO_TYPE_I4)
5188 if (fsig->params [0]->type == MONO_TYPE_U4)
5189 opcode = OP_IMAX_UN;
5190 else if (fsig->params [0]->type == MONO_TYPE_I8)
5192 else if (fsig->params [0]->type == MONO_TYPE_U8)
5193 opcode = OP_LMAX_UN;
5197 if (opcode && fsig->param_count == 2) {
5198 MONO_INST_NEW (cfg, ins, opcode);
5199 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5200 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5201 ins->sreg1 = args [0]->dreg;
5202 ins->sreg2 = args [1]->dreg;
5203 MONO_ADD_INS (cfg->cbb, ins);
5211 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5213 if (cmethod->klass == mono_defaults.array_class) {
5214 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5215 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5216 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5217 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5218 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5219 return emit_array_unsafe_mov (cfg, fsig, args);
5226 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5228 MonoInst *ins = NULL;
5229 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5231 if (cmethod->klass == mono_defaults.string_class) {
5232 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5233 int dreg = alloc_ireg (cfg);
5234 int index_reg = alloc_preg (cfg);
5235 int add_reg = alloc_preg (cfg);
5237 #if SIZEOF_REGISTER == 8
5238 if (COMPILE_LLVM (cfg)) {
5239 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5241 /* The array reg is 64 bits but the index reg is only 32 */
5242 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5245 index_reg = args [1]->dreg;
5247 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5249 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5250 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5251 add_reg = ins->dreg;
5252 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5255 int mult_reg = alloc_preg (cfg);
5256 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5257 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5258 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5259 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5261 type_from_op (cfg, ins, NULL, NULL);
5263 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5264 int dreg = alloc_ireg (cfg);
5265 /* Decompose later to allow more optimizations */
5266 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5267 ins->type = STACK_I4;
5268 ins->flags |= MONO_INST_FAULT;
5269 cfg->cbb->has_array_access = TRUE;
5270 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5275 } else if (cmethod->klass == mono_defaults.object_class) {
5276 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5277 int dreg = alloc_ireg_ref (cfg);
5278 int vt_reg = alloc_preg (cfg);
5279 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5280 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5281 type_from_op (cfg, ins, NULL, NULL);
5284 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5285 int dreg = alloc_ireg (cfg);
5286 int t1 = alloc_ireg (cfg);
5288 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5289 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5290 ins->type = STACK_I4;
5293 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5294 MONO_INST_NEW (cfg, ins, OP_NOP);
5295 MONO_ADD_INS (cfg->cbb, ins);
5299 } else if (cmethod->klass == mono_defaults.array_class) {
5300 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5301 return emit_array_generic_access (cfg, fsig, args, FALSE);
5302 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5303 return emit_array_generic_access (cfg, fsig, args, TRUE);
5305 #ifndef MONO_BIG_ARRAYS
5307 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5310 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5311 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5312 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5313 int dreg = alloc_ireg (cfg);
5314 int bounds_reg = alloc_ireg_mp (cfg);
5315 MonoBasicBlock *end_bb, *szarray_bb;
5316 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5318 NEW_BBLOCK (cfg, end_bb);
5319 NEW_BBLOCK (cfg, szarray_bb);
5321 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5322 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5325 /* Non-szarray case */
5327 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5328 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5330 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5331 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5332 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5333 MONO_START_BB (cfg, szarray_bb);
5336 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5337 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5339 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5340 MONO_START_BB (cfg, end_bb);
5342 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5343 ins->type = STACK_I4;
5349 if (cmethod->name [0] != 'g')
5352 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5353 int dreg = alloc_ireg (cfg);
5354 int vtable_reg = alloc_preg (cfg);
5355 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5356 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5357 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5358 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5359 type_from_op (cfg, ins, NULL, NULL);
5362 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5363 int dreg = alloc_ireg (cfg);
5365 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5366 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5367 type_from_op (cfg, ins, NULL, NULL);
5372 } else if (cmethod->klass == runtime_helpers_class) {
5373 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5374 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5376 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5377 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5379 g_assert (ctx->method_inst);
5380 g_assert (ctx->method_inst->type_argc == 1);
5381 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5382 MonoClass *klass = mono_class_from_mono_type (t);
5386 mono_class_init (klass);
5387 if (MONO_TYPE_IS_REFERENCE (t))
5388 EMIT_NEW_ICONST (cfg, ins, 1);
5389 else if (MONO_TYPE_IS_PRIMITIVE (t))
5390 EMIT_NEW_ICONST (cfg, ins, 0);
5391 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5392 EMIT_NEW_ICONST (cfg, ins, 1);
5393 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5394 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5396 g_assert (cfg->gshared);
5398 int context_used = mini_class_check_context_used (cfg, klass);
5400 /* This returns 1 or 2 */
5401 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5402 int dreg = alloc_ireg (cfg);
5403 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5409 } else if (cmethod->klass == mono_defaults.monitor_class) {
5410 gboolean is_enter = FALSE;
5411 gboolean is_v4 = FALSE;
5413 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5417 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5422 * To make async stack traces work, icalls which can block should have a wrapper.
5423 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5425 MonoBasicBlock *end_bb;
5427 NEW_BBLOCK (cfg, end_bb);
5429 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5431 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5432 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5433 MONO_START_BB (cfg, end_bb);
5436 } else if (cmethod->klass == mono_defaults.thread_class) {
5437 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5438 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5439 MONO_ADD_INS (cfg->cbb, ins);
5441 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5442 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5443 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5445 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5447 if (fsig->params [0]->type == MONO_TYPE_I1)
5448 opcode = OP_LOADI1_MEMBASE;
5449 else if (fsig->params [0]->type == MONO_TYPE_U1)
5450 opcode = OP_LOADU1_MEMBASE;
5451 else if (fsig->params [0]->type == MONO_TYPE_I2)
5452 opcode = OP_LOADI2_MEMBASE;
5453 else if (fsig->params [0]->type == MONO_TYPE_U2)
5454 opcode = OP_LOADU2_MEMBASE;
5455 else if (fsig->params [0]->type == MONO_TYPE_I4)
5456 opcode = OP_LOADI4_MEMBASE;
5457 else if (fsig->params [0]->type == MONO_TYPE_U4)
5458 opcode = OP_LOADU4_MEMBASE;
5459 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5460 opcode = OP_LOADI8_MEMBASE;
5461 else if (fsig->params [0]->type == MONO_TYPE_R4)
5462 opcode = OP_LOADR4_MEMBASE;
5463 else if (fsig->params [0]->type == MONO_TYPE_R8)
5464 opcode = OP_LOADR8_MEMBASE;
5465 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5466 opcode = OP_LOAD_MEMBASE;
5469 MONO_INST_NEW (cfg, ins, opcode);
5470 ins->inst_basereg = args [0]->dreg;
5471 ins->inst_offset = 0;
5472 MONO_ADD_INS (cfg->cbb, ins);
5474 switch (fsig->params [0]->type) {
5481 ins->dreg = mono_alloc_ireg (cfg);
5482 ins->type = STACK_I4;
5486 ins->dreg = mono_alloc_lreg (cfg);
5487 ins->type = STACK_I8;
5491 ins->dreg = mono_alloc_ireg (cfg);
5492 #if SIZEOF_REGISTER == 8
5493 ins->type = STACK_I8;
5495 ins->type = STACK_I4;
5500 ins->dreg = mono_alloc_freg (cfg);
5501 ins->type = STACK_R8;
5504 g_assert (mini_type_is_reference (fsig->params [0]));
5505 ins->dreg = mono_alloc_ireg_ref (cfg);
5506 ins->type = STACK_OBJ;
5510 if (opcode == OP_LOADI8_MEMBASE)
5511 ins = mono_decompose_opcode (cfg, ins);
5513 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5517 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5519 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5521 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5522 opcode = OP_STOREI1_MEMBASE_REG;
5523 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5524 opcode = OP_STOREI2_MEMBASE_REG;
5525 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5526 opcode = OP_STOREI4_MEMBASE_REG;
5527 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5528 opcode = OP_STOREI8_MEMBASE_REG;
5529 else if (fsig->params [0]->type == MONO_TYPE_R4)
5530 opcode = OP_STORER4_MEMBASE_REG;
5531 else if (fsig->params [0]->type == MONO_TYPE_R8)
5532 opcode = OP_STORER8_MEMBASE_REG;
5533 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5534 opcode = OP_STORE_MEMBASE_REG;
5537 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5539 MONO_INST_NEW (cfg, ins, opcode);
5540 ins->sreg1 = args [1]->dreg;
5541 ins->inst_destbasereg = args [0]->dreg;
5542 ins->inst_offset = 0;
5543 MONO_ADD_INS (cfg->cbb, ins);
5545 if (opcode == OP_STOREI8_MEMBASE_REG)
5546 ins = mono_decompose_opcode (cfg, ins);
5551 } else if (cmethod->klass->image == mono_defaults.corlib &&
5552 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5553 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5556 #if SIZEOF_REGISTER == 8
5557 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5558 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5559 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5560 ins->dreg = mono_alloc_preg (cfg);
5561 ins->sreg1 = args [0]->dreg;
5562 ins->type = STACK_I8;
5563 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5564 MONO_ADD_INS (cfg->cbb, ins);
5568 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5570 /* 64 bit reads are already atomic */
5571 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5572 load_ins->dreg = mono_alloc_preg (cfg);
5573 load_ins->inst_basereg = args [0]->dreg;
5574 load_ins->inst_offset = 0;
5575 load_ins->type = STACK_I8;
5576 MONO_ADD_INS (cfg->cbb, load_ins);
5578 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5585 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5586 MonoInst *ins_iconst;
5589 if (fsig->params [0]->type == MONO_TYPE_I4) {
5590 opcode = OP_ATOMIC_ADD_I4;
5591 cfg->has_atomic_add_i4 = TRUE;
5593 #if SIZEOF_REGISTER == 8
5594 else if (fsig->params [0]->type == MONO_TYPE_I8)
5595 opcode = OP_ATOMIC_ADD_I8;
5598 if (!mono_arch_opcode_supported (opcode))
5600 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5601 ins_iconst->inst_c0 = 1;
5602 ins_iconst->dreg = mono_alloc_ireg (cfg);
5603 MONO_ADD_INS (cfg->cbb, ins_iconst);
5605 MONO_INST_NEW (cfg, ins, opcode);
5606 ins->dreg = mono_alloc_ireg (cfg);
5607 ins->inst_basereg = args [0]->dreg;
5608 ins->inst_offset = 0;
5609 ins->sreg2 = ins_iconst->dreg;
5610 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5611 MONO_ADD_INS (cfg->cbb, ins);
5613 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5614 MonoInst *ins_iconst;
5617 if (fsig->params [0]->type == MONO_TYPE_I4) {
5618 opcode = OP_ATOMIC_ADD_I4;
5619 cfg->has_atomic_add_i4 = TRUE;
5621 #if SIZEOF_REGISTER == 8
5622 else if (fsig->params [0]->type == MONO_TYPE_I8)
5623 opcode = OP_ATOMIC_ADD_I8;
5626 if (!mono_arch_opcode_supported (opcode))
5628 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5629 ins_iconst->inst_c0 = -1;
5630 ins_iconst->dreg = mono_alloc_ireg (cfg);
5631 MONO_ADD_INS (cfg->cbb, ins_iconst);
5633 MONO_INST_NEW (cfg, ins, opcode);
5634 ins->dreg = mono_alloc_ireg (cfg);
5635 ins->inst_basereg = args [0]->dreg;
5636 ins->inst_offset = 0;
5637 ins->sreg2 = ins_iconst->dreg;
5638 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5639 MONO_ADD_INS (cfg->cbb, ins);
5641 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5644 if (fsig->params [0]->type == MONO_TYPE_I4) {
5645 opcode = OP_ATOMIC_ADD_I4;
5646 cfg->has_atomic_add_i4 = TRUE;
5648 #if SIZEOF_REGISTER == 8
5649 else if (fsig->params [0]->type == MONO_TYPE_I8)
5650 opcode = OP_ATOMIC_ADD_I8;
5653 if (!mono_arch_opcode_supported (opcode))
5655 MONO_INST_NEW (cfg, ins, opcode);
5656 ins->dreg = mono_alloc_ireg (cfg);
5657 ins->inst_basereg = args [0]->dreg;
5658 ins->inst_offset = 0;
5659 ins->sreg2 = args [1]->dreg;
5660 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5661 MONO_ADD_INS (cfg->cbb, ins);
5664 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5665 MonoInst *f2i = NULL, *i2f;
5666 guint32 opcode, f2i_opcode, i2f_opcode;
5667 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5668 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5670 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5671 fsig->params [0]->type == MONO_TYPE_R4) {
5672 opcode = OP_ATOMIC_EXCHANGE_I4;
5673 f2i_opcode = OP_MOVE_F_TO_I4;
5674 i2f_opcode = OP_MOVE_I4_TO_F;
5675 cfg->has_atomic_exchange_i4 = TRUE;
5677 #if SIZEOF_REGISTER == 8
5679 fsig->params [0]->type == MONO_TYPE_I8 ||
5680 fsig->params [0]->type == MONO_TYPE_R8 ||
5681 fsig->params [0]->type == MONO_TYPE_I) {
5682 opcode = OP_ATOMIC_EXCHANGE_I8;
5683 f2i_opcode = OP_MOVE_F_TO_I8;
5684 i2f_opcode = OP_MOVE_I8_TO_F;
5687 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5688 opcode = OP_ATOMIC_EXCHANGE_I4;
5689 cfg->has_atomic_exchange_i4 = TRUE;
5695 if (!mono_arch_opcode_supported (opcode))
5699 /* TODO: Decompose these opcodes instead of bailing here. */
5700 if (COMPILE_SOFT_FLOAT (cfg))
5703 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5704 f2i->dreg = mono_alloc_ireg (cfg);
5705 f2i->sreg1 = args [1]->dreg;
5706 if (f2i_opcode == OP_MOVE_F_TO_I4)
5707 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5708 MONO_ADD_INS (cfg->cbb, f2i);
5711 MONO_INST_NEW (cfg, ins, opcode);
5712 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5713 ins->inst_basereg = args [0]->dreg;
5714 ins->inst_offset = 0;
5715 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5716 MONO_ADD_INS (cfg->cbb, ins);
5718 switch (fsig->params [0]->type) {
5720 ins->type = STACK_I4;
5723 ins->type = STACK_I8;
5726 #if SIZEOF_REGISTER == 8
5727 ins->type = STACK_I8;
5729 ins->type = STACK_I4;
5734 ins->type = STACK_R8;
5737 g_assert (mini_type_is_reference (fsig->params [0]));
5738 ins->type = STACK_OBJ;
5743 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5744 i2f->dreg = mono_alloc_freg (cfg);
5745 i2f->sreg1 = ins->dreg;
5746 i2f->type = STACK_R8;
5747 if (i2f_opcode == OP_MOVE_I4_TO_F)
5748 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5749 MONO_ADD_INS (cfg->cbb, i2f);
5754 if (cfg->gen_write_barriers && is_ref)
5755 emit_write_barrier (cfg, args [0], args [1]);
5757 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5758 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5759 guint32 opcode, f2i_opcode, i2f_opcode;
5760 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5761 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5763 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5764 fsig->params [1]->type == MONO_TYPE_R4) {
5765 opcode = OP_ATOMIC_CAS_I4;
5766 f2i_opcode = OP_MOVE_F_TO_I4;
5767 i2f_opcode = OP_MOVE_I4_TO_F;
5768 cfg->has_atomic_cas_i4 = TRUE;
5770 #if SIZEOF_REGISTER == 8
5772 fsig->params [1]->type == MONO_TYPE_I8 ||
5773 fsig->params [1]->type == MONO_TYPE_R8 ||
5774 fsig->params [1]->type == MONO_TYPE_I) {
5775 opcode = OP_ATOMIC_CAS_I8;
5776 f2i_opcode = OP_MOVE_F_TO_I8;
5777 i2f_opcode = OP_MOVE_I8_TO_F;
5780 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5781 opcode = OP_ATOMIC_CAS_I4;
5782 cfg->has_atomic_cas_i4 = TRUE;
5788 if (!mono_arch_opcode_supported (opcode))
5792 /* TODO: Decompose these opcodes instead of bailing here. */
5793 if (COMPILE_SOFT_FLOAT (cfg))
5796 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5797 f2i_new->dreg = mono_alloc_ireg (cfg);
5798 f2i_new->sreg1 = args [1]->dreg;
5799 if (f2i_opcode == OP_MOVE_F_TO_I4)
5800 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5801 MONO_ADD_INS (cfg->cbb, f2i_new);
5803 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5804 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5805 f2i_cmp->sreg1 = args [2]->dreg;
5806 if (f2i_opcode == OP_MOVE_F_TO_I4)
5807 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5808 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5811 MONO_INST_NEW (cfg, ins, opcode);
5812 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5813 ins->sreg1 = args [0]->dreg;
5814 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5815 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5816 MONO_ADD_INS (cfg->cbb, ins);
5818 switch (fsig->params [1]->type) {
5820 ins->type = STACK_I4;
5823 ins->type = STACK_I8;
5826 #if SIZEOF_REGISTER == 8
5827 ins->type = STACK_I8;
5829 ins->type = STACK_I4;
5833 ins->type = cfg->r4_stack_type;
5836 ins->type = STACK_R8;
5839 g_assert (mini_type_is_reference (fsig->params [1]));
5840 ins->type = STACK_OBJ;
5845 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5846 i2f->dreg = mono_alloc_freg (cfg);
5847 i2f->sreg1 = ins->dreg;
5848 i2f->type = STACK_R8;
5849 if (i2f_opcode == OP_MOVE_I4_TO_F)
5850 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5851 MONO_ADD_INS (cfg->cbb, i2f);
5856 if (cfg->gen_write_barriers && is_ref)
5857 emit_write_barrier (cfg, args [0], args [1]);
5859 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5860 fsig->params [1]->type == MONO_TYPE_I4) {
5861 MonoInst *cmp, *ceq;
5863 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5866 /* int32 r = CAS (location, value, comparand); */
5867 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5868 ins->dreg = alloc_ireg (cfg);
5869 ins->sreg1 = args [0]->dreg;
5870 ins->sreg2 = args [1]->dreg;
5871 ins->sreg3 = args [2]->dreg;
5872 ins->type = STACK_I4;
5873 MONO_ADD_INS (cfg->cbb, ins);
5875 /* bool result = r == comparand; */
5876 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5877 cmp->sreg1 = ins->dreg;
5878 cmp->sreg2 = args [2]->dreg;
5879 cmp->type = STACK_I4;
5880 MONO_ADD_INS (cfg->cbb, cmp);
5882 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5883 ceq->dreg = alloc_ireg (cfg);
5884 ceq->type = STACK_I4;
5885 MONO_ADD_INS (cfg->cbb, ceq);
5887 /* *success = result; */
5888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5890 cfg->has_atomic_cas_i4 = TRUE;
5892 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5893 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5897 } else if (cmethod->klass->image == mono_defaults.corlib &&
5898 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5899 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5902 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5904 MonoType *t = fsig->params [0];
5906 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5908 g_assert (t->byref);
5909 /* t is a byref type, so the reference check is more complicated */
5910 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5911 if (t->type == MONO_TYPE_I1)
5912 opcode = OP_ATOMIC_LOAD_I1;
5913 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5914 opcode = OP_ATOMIC_LOAD_U1;
5915 else if (t->type == MONO_TYPE_I2)
5916 opcode = OP_ATOMIC_LOAD_I2;
5917 else if (t->type == MONO_TYPE_U2)
5918 opcode = OP_ATOMIC_LOAD_U2;
5919 else if (t->type == MONO_TYPE_I4)
5920 opcode = OP_ATOMIC_LOAD_I4;
5921 else if (t->type == MONO_TYPE_U4)
5922 opcode = OP_ATOMIC_LOAD_U4;
5923 else if (t->type == MONO_TYPE_R4)
5924 opcode = OP_ATOMIC_LOAD_R4;
5925 else if (t->type == MONO_TYPE_R8)
5926 opcode = OP_ATOMIC_LOAD_R8;
5927 #if SIZEOF_REGISTER == 8
5928 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5929 opcode = OP_ATOMIC_LOAD_I8;
5930 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5931 opcode = OP_ATOMIC_LOAD_U8;
5933 else if (t->type == MONO_TYPE_I)
5934 opcode = OP_ATOMIC_LOAD_I4;
5935 else if (is_ref || t->type == MONO_TYPE_U)
5936 opcode = OP_ATOMIC_LOAD_U4;
5940 if (!mono_arch_opcode_supported (opcode))
5943 MONO_INST_NEW (cfg, ins, opcode);
5944 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5945 ins->sreg1 = args [0]->dreg;
5946 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5947 MONO_ADD_INS (cfg->cbb, ins);
5950 case MONO_TYPE_BOOLEAN:
5957 ins->type = STACK_I4;
5961 ins->type = STACK_I8;
5965 #if SIZEOF_REGISTER == 8
5966 ins->type = STACK_I8;
5968 ins->type = STACK_I4;
5972 ins->type = cfg->r4_stack_type;
5975 ins->type = STACK_R8;
5979 ins->type = STACK_OBJ;
5985 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5987 MonoType *t = fsig->params [0];
5990 g_assert (t->byref);
5991 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5992 if (t->type == MONO_TYPE_I1)
5993 opcode = OP_ATOMIC_STORE_I1;
5994 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5995 opcode = OP_ATOMIC_STORE_U1;
5996 else if (t->type == MONO_TYPE_I2)
5997 opcode = OP_ATOMIC_STORE_I2;
5998 else if (t->type == MONO_TYPE_U2)
5999 opcode = OP_ATOMIC_STORE_U2;
6000 else if (t->type == MONO_TYPE_I4)
6001 opcode = OP_ATOMIC_STORE_I4;
6002 else if (t->type == MONO_TYPE_U4)
6003 opcode = OP_ATOMIC_STORE_U4;
6004 else if (t->type == MONO_TYPE_R4)
6005 opcode = OP_ATOMIC_STORE_R4;
6006 else if (t->type == MONO_TYPE_R8)
6007 opcode = OP_ATOMIC_STORE_R8;
6008 #if SIZEOF_REGISTER == 8
6009 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6010 opcode = OP_ATOMIC_STORE_I8;
6011 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6012 opcode = OP_ATOMIC_STORE_U8;
6014 else if (t->type == MONO_TYPE_I)
6015 opcode = OP_ATOMIC_STORE_I4;
6016 else if (is_ref || t->type == MONO_TYPE_U)
6017 opcode = OP_ATOMIC_STORE_U4;
6021 if (!mono_arch_opcode_supported (opcode))
6024 MONO_INST_NEW (cfg, ins, opcode);
6025 ins->dreg = args [0]->dreg;
6026 ins->sreg1 = args [1]->dreg;
6027 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6028 MONO_ADD_INS (cfg->cbb, ins);
6030 if (cfg->gen_write_barriers && is_ref)
6031 emit_write_barrier (cfg, args [0], args [1]);
6037 } else if (cmethod->klass->image == mono_defaults.corlib &&
6038 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6039 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6040 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6041 if (should_insert_brekpoint (cfg->method)) {
6042 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6044 MONO_INST_NEW (cfg, ins, OP_NOP);
6045 MONO_ADD_INS (cfg->cbb, ins);
6049 } else if (cmethod->klass->image == mono_defaults.corlib &&
6050 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6051 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6052 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6054 EMIT_NEW_ICONST (cfg, ins, 1);
6056 EMIT_NEW_ICONST (cfg, ins, 0);
6059 } else if (cmethod->klass->image == mono_defaults.corlib &&
6060 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6061 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6062 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6063 /* No stack walks are currently available, so implement this as an intrinsic */
6064 MonoInst *assembly_ins;
6066 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6067 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6070 } else if (cmethod->klass->image == mono_defaults.corlib &&
6071 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6072 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6073 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6074 /* No stack walks are currently available, so implement this as an intrinsic */
6075 MonoInst *method_ins;
6076 MonoMethod *declaring = cfg->method;
6078 /* This returns the declaring generic method */
6079 if (declaring->is_inflated)
6080 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6081 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6082 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6083 cfg->no_inline = TRUE;
6084 if (cfg->method != cfg->current_method)
6085 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6088 } else if (cmethod->klass == mono_defaults.math_class) {
6090 * There is general branchless code for Min/Max, but it does not work for
6092 * http://everything2.com/?node_id=1051618
6094 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6095 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6096 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6097 ins->dreg = alloc_preg (cfg);
6098 ins->type = STACK_I4;
6099 MONO_ADD_INS (cfg->cbb, ins);
6101 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6102 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6103 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6104 !strcmp (cmethod->klass->name, "Selector")) ||
6105 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6106 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6107 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6108 !strcmp (cmethod->klass->name, "Selector"))
6110 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6111 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6112 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6115 MonoJumpInfoToken *ji;
6118 if (args [0]->opcode == OP_GOT_ENTRY) {
6119 pi = (MonoInst *)args [0]->inst_p1;
6120 g_assert (pi->opcode == OP_PATCH_INFO);
6121 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6122 ji = (MonoJumpInfoToken *)pi->inst_p0;
6124 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6125 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6128 NULLIFY_INS (args [0]);
6130 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6131 return_val_if_nok (&cfg->error, NULL);
6133 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6134 ins->dreg = mono_alloc_ireg (cfg);
6137 MONO_ADD_INS (cfg->cbb, ins);
6142 #ifdef MONO_ARCH_SIMD_INTRINSICS
6143 if (cfg->opt & MONO_OPT_SIMD) {
6144 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6150 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6154 if (COMPILE_LLVM (cfg)) {
6155 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6160 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6164 * This entry point could be used later for arbitrary method
6167 inline static MonoInst*
6168 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6169 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6171 if (method->klass == mono_defaults.string_class) {
6172 /* managed string allocation support */
6173 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6174 MonoInst *iargs [2];
6175 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6176 MonoMethod *managed_alloc = NULL;
6178 g_assert (vtable); /*Should not fail since it System.String*/
6179 #ifndef MONO_CROSS_COMPILE
6180 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6184 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6185 iargs [1] = args [0];
6186 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6193 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6195 MonoInst *store, *temp;
6198 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6199 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6202 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6203 * would be different than the MonoInst's used to represent arguments, and
6204 * the ldelema implementation can't deal with that.
6205 * Solution: When ldelema is used on an inline argument, create a var for
6206 * it, emit ldelema on that var, and emit the saving code below in
6207 * inline_method () if needed.
6209 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6210 cfg->args [i] = temp;
6211 /* This uses cfg->args [i] which is set by the preceeding line */
6212 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6213 store->cil_code = sp [0]->cil_code;
6218 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6219 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6221 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6223 check_inline_called_method_name_limit (MonoMethod *called_method)
6226 static const char *limit = NULL;
6228 if (limit == NULL) {
6229 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6231 if (limit_string != NULL)
6232 limit = limit_string;
6237 if (limit [0] != '\0') {
6238 char *called_method_name = mono_method_full_name (called_method, TRUE);
6240 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6241 g_free (called_method_name);
6243 //return (strncmp_result <= 0);
6244 return (strncmp_result == 0);
6251 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6253 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6256 static const char *limit = NULL;
6258 if (limit == NULL) {
6259 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6260 if (limit_string != NULL) {
6261 limit = limit_string;
6267 if (limit [0] != '\0') {
6268 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6270 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6271 g_free (caller_method_name);
6273 //return (strncmp_result <= 0);
6274 return (strncmp_result == 0);
6282 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6284 static double r8_0 = 0.0;
6285 static float r4_0 = 0.0;
6289 rtype = mini_get_underlying_type (rtype);
6293 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6294 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6295 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6296 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6297 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6298 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6299 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6300 ins->type = STACK_R4;
6301 ins->inst_p0 = (void*)&r4_0;
6303 MONO_ADD_INS (cfg->cbb, ins);
6304 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6305 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6306 ins->type = STACK_R8;
6307 ins->inst_p0 = (void*)&r8_0;
6309 MONO_ADD_INS (cfg->cbb, ins);
6310 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6311 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6312 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6313 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6314 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6316 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6321 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6325 rtype = mini_get_underlying_type (rtype);
6329 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6330 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6331 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6332 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6333 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6334 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6335 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6336 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6337 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6338 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6339 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6340 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6341 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6342 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6344 emit_init_rvar (cfg, dreg, rtype);
6348 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6350 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6352 MonoInst *var = cfg->locals [local];
6353 if (COMPILE_SOFT_FLOAT (cfg)) {
6355 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6356 emit_init_rvar (cfg, reg, type);
6357 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6360 emit_init_rvar (cfg, var->dreg, type);
6362 emit_dummy_init_rvar (cfg, var->dreg, type);
6367 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6369 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6375 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6378 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6379 guchar *ip, guint real_offset, gboolean inline_always)
6382 MonoInst *ins, *rvar = NULL;
6383 MonoMethodHeader *cheader;
6384 MonoBasicBlock *ebblock, *sbblock;
6386 MonoMethod *prev_inlined_method;
6387 MonoInst **prev_locals, **prev_args;
6388 MonoType **prev_arg_types;
6389 guint prev_real_offset;
6390 GHashTable *prev_cbb_hash;
6391 MonoBasicBlock **prev_cil_offset_to_bb;
6392 MonoBasicBlock *prev_cbb;
6393 const unsigned char *prev_ip;
6394 unsigned char *prev_cil_start;
6395 guint32 prev_cil_offset_to_bb_len;
6396 MonoMethod *prev_current_method;
6397 MonoGenericContext *prev_generic_context;
6398 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6400 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6402 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6403 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6406 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6407 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6412 fsig = mono_method_signature (cmethod);
6414 if (cfg->verbose_level > 2)
6415 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6417 if (!cmethod->inline_info) {
6418 cfg->stat_inlineable_methods++;
6419 cmethod->inline_info = 1;
6422 /* allocate local variables */
6423 cheader = mono_method_get_header_checked (cmethod, &error);
6425 if (inline_always) {
6426 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6427 mono_error_move (&cfg->error, &error);
6429 mono_error_cleanup (&error);
6434 /*Must verify before creating locals as it can cause the JIT to assert.*/
6435 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6436 mono_metadata_free_mh (cheader);
6440 /* allocate space to store the return value */
6441 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6442 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6445 prev_locals = cfg->locals;
6446 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6447 for (i = 0; i < cheader->num_locals; ++i)
6448 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6450 /* allocate start and end blocks */
6451 /* This is needed so if the inline is aborted, we can clean up */
6452 NEW_BBLOCK (cfg, sbblock);
6453 sbblock->real_offset = real_offset;
6455 NEW_BBLOCK (cfg, ebblock);
6456 ebblock->block_num = cfg->num_bblocks++;
6457 ebblock->real_offset = real_offset;
6459 prev_args = cfg->args;
6460 prev_arg_types = cfg->arg_types;
6461 prev_inlined_method = cfg->inlined_method;
6462 cfg->inlined_method = cmethod;
6463 cfg->ret_var_set = FALSE;
6464 cfg->inline_depth ++;
6465 prev_real_offset = cfg->real_offset;
6466 prev_cbb_hash = cfg->cbb_hash;
6467 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6468 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6469 prev_cil_start = cfg->cil_start;
6471 prev_cbb = cfg->cbb;
6472 prev_current_method = cfg->current_method;
6473 prev_generic_context = cfg->generic_context;
6474 prev_ret_var_set = cfg->ret_var_set;
6475 prev_disable_inline = cfg->disable_inline;
6477 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6480 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6482 ret_var_set = cfg->ret_var_set;
6484 cfg->inlined_method = prev_inlined_method;
6485 cfg->real_offset = prev_real_offset;
6486 cfg->cbb_hash = prev_cbb_hash;
6487 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6488 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6489 cfg->cil_start = prev_cil_start;
6491 cfg->locals = prev_locals;
6492 cfg->args = prev_args;
6493 cfg->arg_types = prev_arg_types;
6494 cfg->current_method = prev_current_method;
6495 cfg->generic_context = prev_generic_context;
6496 cfg->ret_var_set = prev_ret_var_set;
6497 cfg->disable_inline = prev_disable_inline;
6498 cfg->inline_depth --;
6500 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6501 if (cfg->verbose_level > 2)
6502 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6504 cfg->stat_inlined_methods++;
6506 /* always add some code to avoid block split failures */
6507 MONO_INST_NEW (cfg, ins, OP_NOP);
6508 MONO_ADD_INS (prev_cbb, ins);
6510 prev_cbb->next_bb = sbblock;
6511 link_bblock (cfg, prev_cbb, sbblock);
6514 * Get rid of the begin and end bblocks if possible to aid local
6517 if (prev_cbb->out_count == 1)
6518 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6520 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6521 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6523 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6524 MonoBasicBlock *prev = ebblock->in_bb [0];
6526 if (prev->next_bb == ebblock) {
6527 mono_merge_basic_blocks (cfg, prev, ebblock);
6529 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6530 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6531 cfg->cbb = prev_cbb;
6534 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6539 * Its possible that the rvar is set in some prev bblock, but not in others.
6545 for (i = 0; i < ebblock->in_count; ++i) {
6546 bb = ebblock->in_bb [i];
6548 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6551 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6561 * If the inlined method contains only a throw, then the ret var is not
6562 * set, so set it to a dummy value.
6565 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6567 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6570 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6573 if (cfg->verbose_level > 2)
6574 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6575 cfg->exception_type = MONO_EXCEPTION_NONE;
6577 /* This gets rid of the newly added bblocks */
6578 cfg->cbb = prev_cbb;
6580 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6585 * Some of these comments may well be out-of-date.
6586 * Design decisions: we do a single pass over the IL code (and we do bblock
6587 * splitting/merging in the few cases when it's required: a back jump to an IL
6588 * address that was not already seen as bblock starting point).
6589 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6590 * Complex operations are decomposed in simpler ones right away. We need to let the
6591 * arch-specific code peek and poke inside this process somehow (except when the
6592 * optimizations can take advantage of the full semantic info of coarse opcodes).
6593 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6594 * MonoInst->opcode initially is the IL opcode or some simplification of that
6595 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6596 * opcode with value bigger than OP_LAST.
6597 * At this point the IR can be handed over to an interpreter, a dumb code generator
6598 * or to the optimizing code generator that will translate it to SSA form.
6600 * Profiling directed optimizations.
6601 * We may compile by default with few or no optimizations and instrument the code
6602 * or the user may indicate what methods to optimize the most either in a config file
6603 * or through repeated runs where the compiler applies offline the optimizations to
6604 * each method and then decides if it was worth it.
6607 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6608 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6609 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6610 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6611 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6612 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6613 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6614 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6616 /* offset from br.s -> br like opcodes */
6617 #define BIG_BRANCH_OFFSET 13
6620 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6622 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6624 return b == NULL || b == bb;
6628 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6630 unsigned char *ip = start;
6631 unsigned char *target;
6634 MonoBasicBlock *bblock;
6635 const MonoOpcode *opcode;
6638 cli_addr = ip - start;
6639 i = mono_opcode_value ((const guint8 **)&ip, end);
6642 opcode = &mono_opcodes [i];
6643 switch (opcode->argument) {
6644 case MonoInlineNone:
6647 case MonoInlineString:
6648 case MonoInlineType:
6649 case MonoInlineField:
6650 case MonoInlineMethod:
6653 case MonoShortInlineR:
6660 case MonoShortInlineVar:
6661 case MonoShortInlineI:
6664 case MonoShortInlineBrTarget:
6665 target = start + cli_addr + 2 + (signed char)ip [1];
6666 GET_BBLOCK (cfg, bblock, target);
6669 GET_BBLOCK (cfg, bblock, ip);
6671 case MonoInlineBrTarget:
6672 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6673 GET_BBLOCK (cfg, bblock, target);
6676 GET_BBLOCK (cfg, bblock, ip);
6678 case MonoInlineSwitch: {
6679 guint32 n = read32 (ip + 1);
6682 cli_addr += 5 + 4 * n;
6683 target = start + cli_addr;
6684 GET_BBLOCK (cfg, bblock, target);
6686 for (j = 0; j < n; ++j) {
6687 target = start + cli_addr + (gint32)read32 (ip);
6688 GET_BBLOCK (cfg, bblock, target);
6698 g_assert_not_reached ();
6701 if (i == CEE_THROW) {
6702 unsigned char *bb_start = ip - 1;
6704 /* Find the start of the bblock containing the throw */
6706 while ((bb_start >= start) && !bblock) {
6707 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6711 bblock->out_of_line = 1;
6721 static inline MonoMethod *
6722 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6728 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6729 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6731 method = mono_class_inflate_generic_method_checked (method, context, error);
6734 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6740 static inline MonoMethod *
6741 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6744 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6746 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6747 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6751 if (!method && !cfg)
6752 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6757 static inline MonoClass*
6758 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6763 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6764 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6766 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6767 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6770 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6771 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6774 mono_class_init (klass);
6778 static inline MonoMethodSignature*
6779 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6781 MonoMethodSignature *fsig;
6784 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6785 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6787 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6788 return_val_if_nok (error, NULL);
6791 fsig = mono_inflate_generic_signature(fsig, context, error);
6797 throw_exception (void)
6799 static MonoMethod *method = NULL;
6802 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6803 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6810 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6812 MonoMethod *thrower = throw_exception ();
6815 EMIT_NEW_PCONST (cfg, args [0], ex);
6816 mono_emit_method_call (cfg, thrower, args, NULL);
6820 * Return the original method is a wrapper is specified. We can only access
6821 * the custom attributes from the original method.
6824 get_original_method (MonoMethod *method)
6826 if (method->wrapper_type == MONO_WRAPPER_NONE)
6829 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6830 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6833 /* in other cases we need to find the original method */
6834 return mono_marshal_method_from_wrapper (method);
6838 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6840 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6841 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6843 emit_throw_exception (cfg, ex);
6847 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6849 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6850 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6852 emit_throw_exception (cfg, ex);
6856 * Check that the IL instructions at ip are the array initialization
6857 * sequence and return the pointer to the data and the size.
6860 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6863 * newarr[System.Int32]
6865 * ldtoken field valuetype ...
6866 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6868 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6870 guint32 token = read32 (ip + 7);
6871 guint32 field_token = read32 (ip + 2);
6872 guint32 field_index = field_token & 0xffffff;
6874 const char *data_ptr;
6876 MonoMethod *cmethod;
6877 MonoClass *dummy_class;
6878 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6882 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6886 *out_field_token = field_token;
6888 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6891 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6893 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6894 case MONO_TYPE_BOOLEAN:
6898 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6899 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6900 case MONO_TYPE_CHAR:
6917 if (size > mono_type_size (field->type, &dummy_align))
6920 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6921 if (!image_is_dynamic (method->klass->image)) {
6922 field_index = read32 (ip + 2) & 0xffffff;
6923 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6924 data_ptr = mono_image_rva_map (method->klass->image, rva);
6925 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6926 /* for aot code we do the lookup on load */
6927 if (aot && data_ptr)
6928 return (const char *)GUINT_TO_POINTER (rva);
6930 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6932 data_ptr = mono_field_get_data (field);
6940 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6943 char *method_fname = mono_method_full_name (method, TRUE);
6945 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6948 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6949 mono_error_cleanup (&error);
6950 } else if (header->code_size == 0)
6951 method_code = g_strdup ("method body is empty.");
6953 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6954 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6955 g_free (method_fname);
6956 g_free (method_code);
6957 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6961 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6964 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6965 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6966 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6967 /* Optimize reg-reg moves away */
6969 * Can't optimize other opcodes, since sp[0] might point to
6970 * the last ins of a decomposed opcode.
6972 sp [0]->dreg = (cfg)->locals [n]->dreg;
6974 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6979 * ldloca inhibits many optimizations so try to get rid of it in common
6982 static inline unsigned char *
6983 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6993 local = read16 (ip + 2);
6997 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6998 /* From the INITOBJ case */
6999 token = read32 (ip + 2);
7000 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7001 CHECK_TYPELOAD (klass);
7002 type = mini_get_underlying_type (&klass->byval_arg);
7003 emit_init_local (cfg, local, type, TRUE);
7011 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7013 MonoInst *icall_args [16];
7014 MonoInst *call_target, *ins, *vtable_ins;
7015 int arg_reg, this_reg, vtable_reg;
7016 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7017 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7018 gboolean variant_iface = FALSE;
7021 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7024 * In llvm-only mode, vtables contain function descriptors instead of
7025 * method addresses/trampolines.
7027 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7030 slot = mono_method_get_imt_slot (cmethod);
7032 slot = mono_method_get_vtable_index (cmethod);
7034 this_reg = sp [0]->dreg;
7036 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7037 variant_iface = TRUE;
7039 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7041 * The simplest case, a normal virtual call.
7043 int slot_reg = alloc_preg (cfg);
7044 int addr_reg = alloc_preg (cfg);
7045 int arg_reg = alloc_preg (cfg);
7046 MonoBasicBlock *non_null_bb;
7048 vtable_reg = alloc_preg (cfg);
7049 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7050 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7052 /* Load the vtable slot, which contains a function descriptor. */
7053 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7055 NEW_BBLOCK (cfg, non_null_bb);
7057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7058 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7059 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7062 // FIXME: Make the wrapper use the preserveall cconv
7063 // FIXME: Use one icall per slot for small slot numbers ?
7064 icall_args [0] = vtable_ins;
7065 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7066 /* Make the icall return the vtable slot value to save some code space */
7067 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7068 ins->dreg = slot_reg;
7069 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7072 MONO_START_BB (cfg, non_null_bb);
7073 /* Load the address + arg from the vtable slot */
7074 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7077 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7080 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7082 * A simple interface call
7084 * We make a call through an imt slot to obtain the function descriptor we need to call.
7085 * The imt slot contains a function descriptor for a runtime function + arg.
7087 int slot_reg = alloc_preg (cfg);
7088 int addr_reg = alloc_preg (cfg);
7089 int arg_reg = alloc_preg (cfg);
7090 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7092 vtable_reg = alloc_preg (cfg);
7093 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7094 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7097 * The slot is already initialized when the vtable is created so there is no need
7101 /* Load the imt slot, which contains a function descriptor. */
7102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7104 /* Load the address + arg of the imt thunk from the imt slot */
7105 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7106 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7108 * IMT thunks in llvm-only mode are C functions which take an info argument
7109 * plus the imt method and return the ftndesc to call.
7111 icall_args [0] = thunk_arg_ins;
7112 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7113 cmethod, MONO_RGCTX_INFO_METHOD);
7114 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7116 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7119 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7121 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7122 * dynamically extended as more instantiations are discovered.
7123 * This handles generic virtual methods both on classes and interfaces.
7125 int slot_reg = alloc_preg (cfg);
7126 int addr_reg = alloc_preg (cfg);
7127 int arg_reg = alloc_preg (cfg);
7128 int ftndesc_reg = alloc_preg (cfg);
7129 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7130 MonoBasicBlock *slowpath_bb, *end_bb;
7132 NEW_BBLOCK (cfg, slowpath_bb);
7133 NEW_BBLOCK (cfg, end_bb);
7135 vtable_reg = alloc_preg (cfg);
7136 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7138 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7140 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7142 /* Load the slot, which contains a function descriptor. */
7143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7145 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7146 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7151 /* Same as with iface calls */
7152 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7153 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7154 icall_args [0] = thunk_arg_ins;
7155 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7156 cmethod, MONO_RGCTX_INFO_METHOD);
7157 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7158 ftndesc_ins->dreg = ftndesc_reg;
7160 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7161 * they don't know about yet. Fall back to the slowpath in that case.
7163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7164 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7169 MONO_START_BB (cfg, slowpath_bb);
7170 icall_args [0] = vtable_ins;
7171 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7172 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7173 cmethod, MONO_RGCTX_INFO_METHOD);
7175 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7177 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7178 ftndesc_ins->dreg = ftndesc_reg;
7179 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7182 MONO_START_BB (cfg, end_bb);
7183 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7187 * Non-optimized cases
7189 icall_args [0] = sp [0];
7190 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7192 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7193 cmethod, MONO_RGCTX_INFO_METHOD);
7195 arg_reg = alloc_preg (cfg);
7196 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7197 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7199 g_assert (is_gsharedvt);
7201 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7203 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7206 * Pass the extra argument even if the callee doesn't receive it, most
7207 * calling conventions allow this.
7209 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7213 is_exception_class (MonoClass *klass)
7216 if (klass == mono_defaults.exception_class)
7218 klass = klass->parent;
7224 * is_jit_optimizer_disabled:
7226 * Determine whenever M's assembly has a DebuggableAttribute with the
7227 * IsJITOptimizerDisabled flag set.
7230 is_jit_optimizer_disabled (MonoMethod *m)
7233 MonoAssembly *ass = m->klass->image->assembly;
7234 MonoCustomAttrInfo* attrs;
7237 gboolean val = FALSE;
7240 if (ass->jit_optimizer_disabled_inited)
7241 return ass->jit_optimizer_disabled;
7243 klass = mono_class_try_get_debuggable_attribute_class ();
7247 ass->jit_optimizer_disabled = FALSE;
7248 mono_memory_barrier ();
7249 ass->jit_optimizer_disabled_inited = TRUE;
7253 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7254 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7256 for (i = 0; i < attrs->num_attrs; ++i) {
7257 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7259 MonoMethodSignature *sig;
7261 if (!attr->ctor || attr->ctor->klass != klass)
7263 /* Decode the attribute. See reflection.c */
7264 p = (const char*)attr->data;
7265 g_assert (read16 (p) == 0x0001);
7268 // FIXME: Support named parameters
7269 sig = mono_method_signature (attr->ctor);
7270 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7272 /* Two boolean arguments */
7276 mono_custom_attrs_free (attrs);
7279 ass->jit_optimizer_disabled = val;
7280 mono_memory_barrier ();
7281 ass->jit_optimizer_disabled_inited = TRUE;
7287 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7289 gboolean supported_tail_call;
7292 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7294 for (i = 0; i < fsig->param_count; ++i) {
7295 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7296 /* These can point to the current method's stack */
7297 supported_tail_call = FALSE;
7299 if (fsig->hasthis && cmethod->klass->valuetype)
7300 /* this might point to the current method's stack */
7301 supported_tail_call = FALSE;
7302 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7303 supported_tail_call = FALSE;
7304 if (cfg->method->save_lmf)
7305 supported_tail_call = FALSE;
7306 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7307 supported_tail_call = FALSE;
7308 if (call_opcode != CEE_CALL)
7309 supported_tail_call = FALSE;
7311 /* Debugging support */
7313 if (supported_tail_call) {
7314 if (!mono_debug_count ())
7315 supported_tail_call = FALSE;
7319 return supported_tail_call;
7325 * Handle calls made to ctors from NEWOBJ opcodes.
7328 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7329 MonoInst **sp, guint8 *ip, int *inline_costs)
7331 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7333 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7334 mono_method_is_generic_sharable (cmethod, TRUE)) {
7335 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7336 mono_class_vtable (cfg->domain, cmethod->klass);
7337 CHECK_TYPELOAD (cmethod->klass);
7339 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7340 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7343 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7344 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7346 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7348 CHECK_TYPELOAD (cmethod->klass);
7349 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7354 /* Avoid virtual calls to ctors if possible */
7355 if (mono_class_is_marshalbyref (cmethod->klass))
7356 callvirt_this_arg = sp [0];
7358 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7359 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7360 CHECK_CFG_EXCEPTION;
7361 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7362 mono_method_check_inlining (cfg, cmethod) &&
7363 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7366 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7367 cfg->real_offset += 5;
7369 *inline_costs += costs - 5;
7371 INLINE_FAILURE ("inline failure");
7372 // FIXME-VT: Clean this up
7373 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7374 GSHAREDVT_FAILURE(*ip);
7375 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7377 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7380 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7382 if (cfg->llvm_only) {
7383 // FIXME: Avoid initializing vtable_arg
7384 emit_llvmonly_calli (cfg, fsig, sp, addr);
7386 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7388 } else if (context_used &&
7389 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7390 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7391 MonoInst *cmethod_addr;
7393 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7395 if (cfg->llvm_only) {
7396 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7397 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7398 emit_llvmonly_calli (cfg, fsig, sp, addr);
7400 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7401 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7403 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7406 INLINE_FAILURE ("ctor call");
7407 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7408 callvirt_this_arg, NULL, vtable_arg);
7415 emit_setret (MonoCompile *cfg, MonoInst *val)
7417 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7420 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7423 if (!cfg->vret_addr) {
7424 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7426 EMIT_NEW_RETLOADA (cfg, ret_addr);
7428 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7429 ins->klass = mono_class_from_mono_type (ret_type);
7432 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7433 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7434 MonoInst *iargs [1];
7438 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7439 mono_arch_emit_setret (cfg, cfg->method, conv);
7441 mono_arch_emit_setret (cfg, cfg->method, val);
7444 mono_arch_emit_setret (cfg, cfg->method, val);
7450 * mono_method_to_ir:
7452 * Translate the .net IL into linear IR.
7454 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7455 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7456 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7457 * @inline_args: if not NULL, contains the arguments to the inline call
7458 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7459 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7461 * This method is used to turn ECMA IL into Mono's internal Linear IR
7462 * reprensetation. It is used both for entire methods, as well as
7463 * inlining existing methods. In the former case, the @start_bblock,
7464 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7465 * inline_offset is set to zero.
7467 * Returns: the inline cost, or -1 if there was an error processing this method.
7470 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7471 MonoInst *return_var, MonoInst **inline_args,
7472 guint inline_offset, gboolean is_virtual_call)
7475 MonoInst *ins, **sp, **stack_start;
7476 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7477 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7478 MonoMethod *cmethod, *method_definition;
7479 MonoInst **arg_array;
7480 MonoMethodHeader *header;
7482 guint32 token, ins_flag;
7484 MonoClass *constrained_class = NULL;
7485 unsigned char *ip, *end, *target, *err_pos;
7486 MonoMethodSignature *sig;
7487 MonoGenericContext *generic_context = NULL;
7488 MonoGenericContainer *generic_container = NULL;
7489 MonoType **param_types;
7490 int i, n, start_new_bblock, dreg;
7491 int num_calls = 0, inline_costs = 0;
7492 int breakpoint_id = 0;
7494 GSList *class_inits = NULL;
7495 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7497 gboolean init_locals, seq_points, skip_dead_blocks;
7498 gboolean sym_seq_points = FALSE;
7499 MonoDebugMethodInfo *minfo;
7500 MonoBitSet *seq_point_locs = NULL;
7501 MonoBitSet *seq_point_set_locs = NULL;
7503 cfg->disable_inline = is_jit_optimizer_disabled (method);
7505 /* serialization and xdomain stuff may need access to private fields and methods */
7506 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7507 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7508 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7509 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7510 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7511 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7513 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7514 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7515 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7516 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7517 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7519 image = method->klass->image;
7520 header = mono_method_get_header_checked (method, &cfg->error);
7522 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7523 goto exception_exit;
7525 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7528 generic_container = mono_method_get_generic_container (method);
7529 sig = mono_method_signature (method);
7530 num_args = sig->hasthis + sig->param_count;
7531 ip = (unsigned char*)header->code;
7532 cfg->cil_start = ip;
7533 end = ip + header->code_size;
7534 cfg->stat_cil_code_size += header->code_size;
7536 seq_points = cfg->gen_seq_points && cfg->method == method;
7538 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7539 /* We could hit a seq point before attaching to the JIT (#8338) */
7543 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7544 minfo = mono_debug_lookup_method (method);
7546 MonoSymSeqPoint *sps;
7547 int i, n_il_offsets;
7549 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7550 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7551 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7552 sym_seq_points = TRUE;
7553 for (i = 0; i < n_il_offsets; ++i) {
7554 if (sps [i].il_offset < header->code_size)
7555 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7559 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7561 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7563 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7564 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7566 mono_debug_free_method_async_debug_info (asyncMethod);
7568 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7569 /* Methods without line number info like auto-generated property accessors */
7570 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7571 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7572 sym_seq_points = TRUE;
7577 * Methods without init_locals set could cause asserts in various passes
7578 * (#497220). To work around this, we emit dummy initialization opcodes
7579 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7580 * on some platforms.
7582 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7583 init_locals = header->init_locals;
7587 method_definition = method;
7588 while (method_definition->is_inflated) {
7589 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7590 method_definition = imethod->declaring;
7593 /* SkipVerification is not allowed if core-clr is enabled */
7594 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7596 dont_verify_stloc = TRUE;
7599 if (sig->is_inflated)
7600 generic_context = mono_method_get_context (method);
7601 else if (generic_container)
7602 generic_context = &generic_container->context;
7603 cfg->generic_context = generic_context;
7606 g_assert (!sig->has_type_parameters);
7608 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7609 g_assert (method->is_inflated);
7610 g_assert (mono_method_get_context (method)->method_inst);
7612 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7613 g_assert (sig->generic_param_count);
7615 if (cfg->method == method) {
7616 cfg->real_offset = 0;
7618 cfg->real_offset = inline_offset;
7621 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7622 cfg->cil_offset_to_bb_len = header->code_size;
7624 cfg->current_method = method;
7626 if (cfg->verbose_level > 2)
7627 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7629 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7631 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7632 for (n = 0; n < sig->param_count; ++n)
7633 param_types [n + sig->hasthis] = sig->params [n];
7634 cfg->arg_types = param_types;
7636 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7637 if (cfg->method == method) {
7639 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7640 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7643 NEW_BBLOCK (cfg, start_bblock);
7644 cfg->bb_entry = start_bblock;
7645 start_bblock->cil_code = NULL;
7646 start_bblock->cil_length = 0;
7649 NEW_BBLOCK (cfg, end_bblock);
7650 cfg->bb_exit = end_bblock;
7651 end_bblock->cil_code = NULL;
7652 end_bblock->cil_length = 0;
7653 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7654 g_assert (cfg->num_bblocks == 2);
7656 arg_array = cfg->args;
7658 if (header->num_clauses) {
7659 cfg->spvars = g_hash_table_new (NULL, NULL);
7660 cfg->exvars = g_hash_table_new (NULL, NULL);
7662 /* handle exception clauses */
7663 for (i = 0; i < header->num_clauses; ++i) {
7664 MonoBasicBlock *try_bb;
7665 MonoExceptionClause *clause = &header->clauses [i];
7666 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7668 try_bb->real_offset = clause->try_offset;
7669 try_bb->try_start = TRUE;
7670 try_bb->region = ((i + 1) << 8) | clause->flags;
7671 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7672 tblock->real_offset = clause->handler_offset;
7673 tblock->flags |= BB_EXCEPTION_HANDLER;
7676 * Linking the try block with the EH block hinders inlining as we won't be able to
7677 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7679 if (COMPILE_LLVM (cfg))
7680 link_bblock (cfg, try_bb, tblock);
7682 if (*(ip + clause->handler_offset) == CEE_POP)
7683 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7685 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7686 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7687 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7688 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7689 MONO_ADD_INS (tblock, ins);
7691 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7692 /* finally clauses already have a seq point */
7693 /* seq points for filter clauses are emitted below */
7694 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7695 MONO_ADD_INS (tblock, ins);
7698 /* todo: is a fault block unsafe to optimize? */
7699 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7700 tblock->flags |= BB_EXCEPTION_UNSAFE;
7703 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7705 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7707 /* catch and filter blocks get the exception object on the stack */
7708 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7709 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7711 /* mostly like handle_stack_args (), but just sets the input args */
7712 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7713 tblock->in_scount = 1;
7714 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7715 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7719 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7720 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7721 if (!cfg->compile_llvm) {
7722 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7723 ins->dreg = tblock->in_stack [0]->dreg;
7724 MONO_ADD_INS (tblock, ins);
7727 MonoInst *dummy_use;
7730 * Add a dummy use for the exvar so its liveness info will be
7733 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7736 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7737 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7738 MONO_ADD_INS (tblock, ins);
7741 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7742 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7743 tblock->flags |= BB_EXCEPTION_HANDLER;
7744 tblock->real_offset = clause->data.filter_offset;
7745 tblock->in_scount = 1;
7746 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7747 /* The filter block shares the exvar with the handler block */
7748 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7749 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7750 MONO_ADD_INS (tblock, ins);
7754 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7755 clause->data.catch_class &&
7757 mono_class_check_context_used (clause->data.catch_class)) {
7759 * In shared generic code with catch
7760 * clauses containing type variables
7761 * the exception handling code has to
7762 * be able to get to the rgctx.
7763 * Therefore we have to make sure that
7764 * the vtable/mrgctx argument (for
7765 * static or generic methods) or the
7766 * "this" argument (for non-static
7767 * methods) are live.
7769 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7770 mini_method_get_context (method)->method_inst ||
7771 method->klass->valuetype) {
7772 mono_get_vtable_var (cfg);
7774 MonoInst *dummy_use;
7776 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7781 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7782 cfg->cbb = start_bblock;
7783 cfg->args = arg_array;
7784 mono_save_args (cfg, sig, inline_args);
7787 /* FIRST CODE BLOCK */
7788 NEW_BBLOCK (cfg, tblock);
7789 tblock->cil_code = ip;
7793 ADD_BBLOCK (cfg, tblock);
7795 if (cfg->method == method) {
7796 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7797 if (breakpoint_id) {
7798 MONO_INST_NEW (cfg, ins, OP_BREAK);
7799 MONO_ADD_INS (cfg->cbb, ins);
7803 /* we use a separate basic block for the initialization code */
7804 NEW_BBLOCK (cfg, init_localsbb);
7805 if (cfg->method == method)
7806 cfg->bb_init = init_localsbb;
7807 init_localsbb->real_offset = cfg->real_offset;
7808 start_bblock->next_bb = init_localsbb;
7809 init_localsbb->next_bb = cfg->cbb;
7810 link_bblock (cfg, start_bblock, init_localsbb);
7811 link_bblock (cfg, init_localsbb, cfg->cbb);
7813 cfg->cbb = init_localsbb;
7815 if (cfg->gsharedvt && cfg->method == method) {
7816 MonoGSharedVtMethodInfo *info;
7817 MonoInst *var, *locals_var;
7820 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7821 info->method = cfg->method;
7822 info->count_entries = 16;
7823 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7824 cfg->gsharedvt_info = info;
7826 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7827 /* prevent it from being register allocated */
7828 //var->flags |= MONO_INST_VOLATILE;
7829 cfg->gsharedvt_info_var = var;
7831 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7832 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7834 /* Allocate locals */
7835 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7836 /* prevent it from being register allocated */
7837 //locals_var->flags |= MONO_INST_VOLATILE;
7838 cfg->gsharedvt_locals_var = locals_var;
7840 dreg = alloc_ireg (cfg);
7841 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7843 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7844 ins->dreg = locals_var->dreg;
7846 MONO_ADD_INS (cfg->cbb, ins);
7847 cfg->gsharedvt_locals_var_ins = ins;
7849 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7852 ins->flags |= MONO_INST_INIT;
7856 if (mono_security_core_clr_enabled ()) {
7857 /* check if this is native code, e.g. an icall or a p/invoke */
7858 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7859 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7861 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7862 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7864 /* if this ia a native call then it can only be JITted from platform code */
7865 if ((icall || pinvk) && method->klass && method->klass->image) {
7866 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7867 MonoException *ex = icall ? mono_get_exception_security () :
7868 mono_get_exception_method_access ();
7869 emit_throw_exception (cfg, ex);
7876 CHECK_CFG_EXCEPTION;
7878 if (header->code_size == 0)
7881 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7886 if (cfg->method == method)
7887 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7889 for (n = 0; n < header->num_locals; ++n) {
7890 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7895 /* We force the vtable variable here for all shared methods
7896 for the possibility that they might show up in a stack
7897 trace where their exact instantiation is needed. */
7898 if (cfg->gshared && method == cfg->method) {
7899 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7900 mini_method_get_context (method)->method_inst ||
7901 method->klass->valuetype) {
7902 mono_get_vtable_var (cfg);
7904 /* FIXME: Is there a better way to do this?
7905 We need the variable live for the duration
7906 of the whole method. */
7907 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7911 /* add a check for this != NULL to inlined methods */
7912 if (is_virtual_call) {
7915 NEW_ARGLOAD (cfg, arg_ins, 0);
7916 MONO_ADD_INS (cfg->cbb, arg_ins);
7917 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7920 skip_dead_blocks = !dont_verify;
7921 if (skip_dead_blocks) {
7922 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7927 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7928 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7931 start_new_bblock = 0;
7933 if (cfg->method == method)
7934 cfg->real_offset = ip - header->code;
7936 cfg->real_offset = inline_offset;
7941 if (start_new_bblock) {
7942 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7943 if (start_new_bblock == 2) {
7944 g_assert (ip == tblock->cil_code);
7946 GET_BBLOCK (cfg, tblock, ip);
7948 cfg->cbb->next_bb = tblock;
7950 start_new_bblock = 0;
7951 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7952 if (cfg->verbose_level > 3)
7953 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7954 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7958 g_slist_free (class_inits);
7961 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7962 link_bblock (cfg, cfg->cbb, tblock);
7963 if (sp != stack_start) {
7964 handle_stack_args (cfg, stack_start, sp - stack_start);
7966 CHECK_UNVERIFIABLE (cfg);
7968 cfg->cbb->next_bb = tblock;
7970 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7971 if (cfg->verbose_level > 3)
7972 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7973 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7976 g_slist_free (class_inits);
7981 if (skip_dead_blocks) {
7982 int ip_offset = ip - header->code;
7984 if (ip_offset == bb->end)
7988 int op_size = mono_opcode_size (ip, end);
7989 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7991 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7993 if (ip_offset + op_size == bb->end) {
7994 MONO_INST_NEW (cfg, ins, OP_NOP);
7995 MONO_ADD_INS (cfg->cbb, ins);
7996 start_new_bblock = 1;
8004 * Sequence points are points where the debugger can place a breakpoint.
8005 * Currently, we generate these automatically at points where the IL
8008 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8010 * Make methods interruptable at the beginning, and at the targets of
8011 * backward branches.
8012 * Also, do this at the start of every bblock in methods with clauses too,
8013 * to be able to handle instructions with inprecise control flow like
8015 * Backward branches are handled at the end of method-to-ir ().
8017 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8018 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8020 /* Avoid sequence points on empty IL like .volatile */
8021 // FIXME: Enable this
8022 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8023 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8024 if ((sp != stack_start) && !sym_seq_point)
8025 ins->flags |= MONO_INST_NONEMPTY_STACK;
8026 MONO_ADD_INS (cfg->cbb, ins);
8029 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8032 cfg->cbb->real_offset = cfg->real_offset;
8034 if ((cfg->method == method) && cfg->coverage_info) {
8035 guint32 cil_offset = ip - header->code;
8036 cfg->coverage_info->data [cil_offset].cil_code = ip;
8038 /* TODO: Use an increment here */
8039 #if defined(TARGET_X86)
8040 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8041 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8043 MONO_ADD_INS (cfg->cbb, ins);
8045 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8046 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8050 if (cfg->verbose_level > 3)
8051 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8055 if (seq_points && !sym_seq_points && sp != stack_start) {
8057 * The C# compiler uses these nops to notify the JIT that it should
8058 * insert seq points.
8060 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8061 MONO_ADD_INS (cfg->cbb, ins);
8063 if (cfg->keep_cil_nops)
8064 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8066 MONO_INST_NEW (cfg, ins, OP_NOP);
8068 MONO_ADD_INS (cfg->cbb, ins);
8071 if (should_insert_brekpoint (cfg->method)) {
8072 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8074 MONO_INST_NEW (cfg, ins, OP_NOP);
8077 MONO_ADD_INS (cfg->cbb, ins);
8083 CHECK_STACK_OVF (1);
8084 n = (*ip)-CEE_LDARG_0;
8086 EMIT_NEW_ARGLOAD (cfg, ins, n);
8094 CHECK_STACK_OVF (1);
8095 n = (*ip)-CEE_LDLOC_0;
8097 EMIT_NEW_LOCLOAD (cfg, ins, n);
8106 n = (*ip)-CEE_STLOC_0;
8109 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8111 emit_stloc_ir (cfg, sp, header, n);
8118 CHECK_STACK_OVF (1);
8121 EMIT_NEW_ARGLOAD (cfg, ins, n);
8127 CHECK_STACK_OVF (1);
8130 NEW_ARGLOADA (cfg, ins, n);
8131 MONO_ADD_INS (cfg->cbb, ins);
8141 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8143 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8148 CHECK_STACK_OVF (1);
8151 EMIT_NEW_LOCLOAD (cfg, ins, n);
8155 case CEE_LDLOCA_S: {
8156 unsigned char *tmp_ip;
8158 CHECK_STACK_OVF (1);
8159 CHECK_LOCAL (ip [1]);
8161 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8167 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8176 CHECK_LOCAL (ip [1]);
8177 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8179 emit_stloc_ir (cfg, sp, header, ip [1]);
8184 CHECK_STACK_OVF (1);
8185 EMIT_NEW_PCONST (cfg, ins, NULL);
8186 ins->type = STACK_OBJ;
8191 CHECK_STACK_OVF (1);
8192 EMIT_NEW_ICONST (cfg, ins, -1);
8205 CHECK_STACK_OVF (1);
8206 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8212 CHECK_STACK_OVF (1);
8214 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8220 CHECK_STACK_OVF (1);
8221 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8227 CHECK_STACK_OVF (1);
8228 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8229 ins->type = STACK_I8;
8230 ins->dreg = alloc_dreg (cfg, STACK_I8);
8232 ins->inst_l = (gint64)read64 (ip);
8233 MONO_ADD_INS (cfg->cbb, ins);
8239 gboolean use_aotconst = FALSE;
8241 #ifdef TARGET_POWERPC
8242 /* FIXME: Clean this up */
8243 if (cfg->compile_aot)
8244 use_aotconst = TRUE;
8247 /* FIXME: we should really allocate this only late in the compilation process */
8248 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8250 CHECK_STACK_OVF (1);
8256 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8258 dreg = alloc_freg (cfg);
8259 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8260 ins->type = cfg->r4_stack_type;
8262 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8263 ins->type = cfg->r4_stack_type;
8264 ins->dreg = alloc_dreg (cfg, STACK_R8);
8266 MONO_ADD_INS (cfg->cbb, ins);
8276 gboolean use_aotconst = FALSE;
8278 #ifdef TARGET_POWERPC
8279 /* FIXME: Clean this up */
8280 if (cfg->compile_aot)
8281 use_aotconst = TRUE;
8284 /* FIXME: we should really allocate this only late in the compilation process */
8285 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8287 CHECK_STACK_OVF (1);
8293 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8295 dreg = alloc_freg (cfg);
8296 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8297 ins->type = STACK_R8;
8299 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8300 ins->type = STACK_R8;
8301 ins->dreg = alloc_dreg (cfg, STACK_R8);
8303 MONO_ADD_INS (cfg->cbb, ins);
8312 MonoInst *temp, *store;
8314 CHECK_STACK_OVF (1);
8318 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8319 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8321 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8324 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8337 if (sp [0]->type == STACK_R8)
8338 /* we need to pop the value from the x86 FP stack */
8339 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8344 MonoMethodSignature *fsig;
8347 INLINE_FAILURE ("jmp");
8348 GSHAREDVT_FAILURE (*ip);
8351 if (stack_start != sp)
8353 token = read32 (ip + 1);
8354 /* FIXME: check the signature matches */
8355 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8358 if (cfg->gshared && mono_method_check_context_used (cmethod))
8359 GENERIC_SHARING_FAILURE (CEE_JMP);
8361 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8363 fsig = mono_method_signature (cmethod);
8364 n = fsig->param_count + fsig->hasthis;
8365 if (cfg->llvm_only) {
8368 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8369 for (i = 0; i < n; ++i)
8370 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8371 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8373 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8374 * have to emit a normal return since llvm expects it.
8377 emit_setret (cfg, ins);
8378 MONO_INST_NEW (cfg, ins, OP_BR);
8379 ins->inst_target_bb = end_bblock;
8380 MONO_ADD_INS (cfg->cbb, ins);
8381 link_bblock (cfg, cfg->cbb, end_bblock);
8384 } else if (cfg->backend->have_op_tail_call) {
8385 /* Handle tail calls similarly to calls */
8388 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8389 call->method = cmethod;
8390 call->tail_call = TRUE;
8391 call->signature = mono_method_signature (cmethod);
8392 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8393 call->inst.inst_p0 = cmethod;
8394 for (i = 0; i < n; ++i)
8395 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8397 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8398 call->vret_var = cfg->vret_addr;
8400 mono_arch_emit_call (cfg, call);
8401 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8402 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8404 for (i = 0; i < num_args; ++i)
8405 /* Prevent arguments from being optimized away */
8406 arg_array [i]->flags |= MONO_INST_VOLATILE;
8408 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8409 ins = (MonoInst*)call;
8410 ins->inst_p0 = cmethod;
8411 MONO_ADD_INS (cfg->cbb, ins);
8415 start_new_bblock = 1;
8420 MonoMethodSignature *fsig;
8423 token = read32 (ip + 1);
8427 //GSHAREDVT_FAILURE (*ip);
8432 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8435 if (method->dynamic && fsig->pinvoke) {
8439 * This is a call through a function pointer using a pinvoke
8440 * signature. Have to create a wrapper and call that instead.
8441 * FIXME: This is very slow, need to create a wrapper at JIT time
8442 * instead based on the signature.
8444 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8445 EMIT_NEW_PCONST (cfg, args [1], fsig);
8447 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8450 n = fsig->param_count + fsig->hasthis;
8454 //g_assert (!virtual_ || fsig->hasthis);
8458 inline_costs += 10 * num_calls++;
8461 * Making generic calls out of gsharedvt methods.
8462 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8463 * patching gshared method addresses into a gsharedvt method.
8465 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8467 * We pass the address to the gsharedvt trampoline in the rgctx reg
8469 MonoInst *callee = addr;
8471 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8473 GSHAREDVT_FAILURE (*ip);
8477 GSHAREDVT_FAILURE (*ip);
8479 addr = emit_get_rgctx_sig (cfg, context_used,
8480 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8481 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8485 /* Prevent inlining of methods with indirect calls */
8486 INLINE_FAILURE ("indirect call");
8488 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8489 MonoJumpInfoType info_type;
8493 * Instead of emitting an indirect call, emit a direct call
8494 * with the contents of the aotconst as the patch info.
8496 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8497 info_type = (MonoJumpInfoType)addr->inst_c1;
8498 info_data = addr->inst_p0;
8500 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8501 info_data = addr->inst_right->inst_left;
8504 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8505 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8508 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8509 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8514 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8518 /* End of call, INS should contain the result of the call, if any */
8520 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8522 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8525 CHECK_CFG_EXCEPTION;
8529 constrained_class = NULL;
8533 case CEE_CALLVIRT: {
8534 MonoInst *addr = NULL;
8535 MonoMethodSignature *fsig = NULL;
8537 int virtual_ = *ip == CEE_CALLVIRT;
8538 gboolean pass_imt_from_rgctx = FALSE;
8539 MonoInst *imt_arg = NULL;
8540 MonoInst *keep_this_alive = NULL;
8541 gboolean pass_vtable = FALSE;
8542 gboolean pass_mrgctx = FALSE;
8543 MonoInst *vtable_arg = NULL;
8544 gboolean check_this = FALSE;
8545 gboolean supported_tail_call = FALSE;
8546 gboolean tail_call = FALSE;
8547 gboolean need_seq_point = FALSE;
8548 guint32 call_opcode = *ip;
8549 gboolean emit_widen = TRUE;
8550 gboolean push_res = TRUE;
8551 gboolean skip_ret = FALSE;
8552 gboolean delegate_invoke = FALSE;
8553 gboolean direct_icall = FALSE;
8554 gboolean constrained_partial_call = FALSE;
8555 MonoMethod *cil_method;
8558 token = read32 (ip + 1);
8562 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8565 cil_method = cmethod;
8567 if (constrained_class) {
8568 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8569 if (!mini_is_gsharedvt_klass (constrained_class)) {
8570 g_assert (!cmethod->klass->valuetype);
8571 if (!mini_type_is_reference (&constrained_class->byval_arg))
8572 constrained_partial_call = TRUE;
8576 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8577 if (cfg->verbose_level > 2)
8578 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8579 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8580 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8582 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8586 if (cfg->verbose_level > 2)
8587 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8589 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8591 * This is needed since get_method_constrained can't find
8592 * the method in klass representing a type var.
8593 * The type var is guaranteed to be a reference type in this
8596 if (!mini_is_gsharedvt_klass (constrained_class))
8597 g_assert (!cmethod->klass->valuetype);
8599 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8604 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8605 /* Use the corresponding method from the base type to avoid boxing */
8606 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8607 g_assert (base_type);
8608 constrained_class = mono_class_from_mono_type (base_type);
8609 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8614 if (!dont_verify && !cfg->skip_visibility) {
8615 MonoMethod *target_method = cil_method;
8616 if (method->is_inflated) {
8617 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8620 if (!mono_method_can_access_method (method_definition, target_method) &&
8621 !mono_method_can_access_method (method, cil_method))
8622 emit_method_access_failure (cfg, method, cil_method);
8625 if (mono_security_core_clr_enabled ())
8626 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8628 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8629 /* MS.NET seems to silently convert this to a callvirt */
8634 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8635 * converts to a callvirt.
8637 * tests/bug-515884.il is an example of this behavior
8639 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8640 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8641 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8645 if (!cmethod->klass->inited)
8646 if (!mono_class_init (cmethod->klass))
8647 TYPE_LOAD_ERROR (cmethod->klass);
8649 fsig = mono_method_signature (cmethod);
8652 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8653 mini_class_is_system_array (cmethod->klass)) {
8654 array_rank = cmethod->klass->rank;
8655 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8656 direct_icall = TRUE;
8657 } else if (fsig->pinvoke) {
8658 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8659 fsig = mono_method_signature (wrapper);
8660 } else if (constrained_class) {
8662 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8666 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8667 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8669 /* See code below */
8670 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8671 MonoBasicBlock *tbb;
8673 GET_BBLOCK (cfg, tbb, ip + 5);
8674 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8676 * We want to extend the try block to cover the call, but we can't do it if the
8677 * call is made directly since its followed by an exception check.
8679 direct_icall = FALSE;
8683 mono_save_token_info (cfg, image, token, cil_method);
8685 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8686 need_seq_point = TRUE;
8688 /* Don't support calls made using type arguments for now */
8690 if (cfg->gsharedvt) {
8691 if (mini_is_gsharedvt_signature (fsig))
8692 GSHAREDVT_FAILURE (*ip);
8696 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8697 g_assert_not_reached ();
8699 n = fsig->param_count + fsig->hasthis;
8701 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8705 g_assert (!mono_method_check_context_used (cmethod));
8709 //g_assert (!virtual_ || fsig->hasthis);
8714 * We have the `constrained.' prefix opcode.
8716 if (constrained_class) {
8717 if (mini_is_gsharedvt_klass (constrained_class)) {
8718 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8719 /* The 'Own method' case below */
8720 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8721 /* 'The type parameter is instantiated as a reference type' case below. */
8723 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8724 CHECK_CFG_EXCEPTION;
8730 if (constrained_partial_call) {
8731 gboolean need_box = TRUE;
8734 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8735 * called method is not known at compile time either. The called method could end up being
8736 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8737 * to box the receiver.
8738 * A simple solution would be to box always and make a normal virtual call, but that would
8739 * be bad performance wise.
8741 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8743 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8748 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8749 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8750 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8751 ins->klass = constrained_class;
8752 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8753 CHECK_CFG_EXCEPTION;
8754 } else if (need_box) {
8756 MonoBasicBlock *is_ref_bb, *end_bb;
8757 MonoInst *nonbox_call;
8760 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8762 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8763 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8765 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8767 NEW_BBLOCK (cfg, is_ref_bb);
8768 NEW_BBLOCK (cfg, end_bb);
8770 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8771 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8772 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8775 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8780 MONO_START_BB (cfg, is_ref_bb);
8781 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8782 ins->klass = constrained_class;
8783 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8784 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8786 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8788 MONO_START_BB (cfg, end_bb);
8791 nonbox_call->dreg = ins->dreg;
8794 g_assert (mono_class_is_interface (cmethod->klass));
8795 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8796 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8799 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8801 * The type parameter is instantiated as a valuetype,
8802 * but that type doesn't override the method we're
8803 * calling, so we need to box `this'.
8805 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8806 ins->klass = constrained_class;
8807 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8808 CHECK_CFG_EXCEPTION;
8809 } else if (!constrained_class->valuetype) {
8810 int dreg = alloc_ireg_ref (cfg);
8813 * The type parameter is instantiated as a reference
8814 * type. We have a managed pointer on the stack, so
8815 * we need to dereference it here.
8817 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8818 ins->type = STACK_OBJ;
8821 if (cmethod->klass->valuetype) {
8824 /* Interface method */
8827 mono_class_setup_vtable (constrained_class);
8828 CHECK_TYPELOAD (constrained_class);
8829 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8831 TYPE_LOAD_ERROR (constrained_class);
8832 slot = mono_method_get_vtable_slot (cmethod);
8834 TYPE_LOAD_ERROR (cmethod->klass);
8835 cmethod = constrained_class->vtable [ioffset + slot];
8837 if (cmethod->klass == mono_defaults.enum_class) {
8838 /* Enum implements some interfaces, so treat this as the first case */
8839 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8840 ins->klass = constrained_class;
8841 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8842 CHECK_CFG_EXCEPTION;
8847 constrained_class = NULL;
8850 if (check_call_signature (cfg, fsig, sp))
8853 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8854 delegate_invoke = TRUE;
8856 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8857 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8858 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8866 * If the callee is a shared method, then its static cctor
8867 * might not get called after the call was patched.
8869 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8870 emit_class_init (cfg, cmethod->klass);
8871 CHECK_TYPELOAD (cmethod->klass);
8874 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8877 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8879 context_used = mini_method_check_context_used (cfg, cmethod);
8881 if (context_used && mono_class_is_interface (cmethod->klass)) {
8882 /* Generic method interface
8883 calls are resolved via a
8884 helper function and don't
8886 if (!cmethod_context || !cmethod_context->method_inst)
8887 pass_imt_from_rgctx = TRUE;
8891 * If a shared method calls another
8892 * shared method then the caller must
8893 * have a generic sharing context
8894 * because the magic trampoline
8895 * requires it. FIXME: We shouldn't
8896 * have to force the vtable/mrgctx
8897 * variable here. Instead there
8898 * should be a flag in the cfg to
8899 * request a generic sharing context.
8902 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8903 mono_get_vtable_var (cfg);
8908 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8910 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8912 CHECK_TYPELOAD (cmethod->klass);
8913 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8918 g_assert (!vtable_arg);
8920 if (!cfg->compile_aot) {
8922 * emit_get_rgctx_method () calls mono_class_vtable () so check
8923 * for type load errors before.
8925 mono_class_setup_vtable (cmethod->klass);
8926 CHECK_TYPELOAD (cmethod->klass);
8929 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8931 /* !marshalbyref is needed to properly handle generic methods + remoting */
8932 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8933 MONO_METHOD_IS_FINAL (cmethod)) &&
8934 !mono_class_is_marshalbyref (cmethod->klass)) {
8941 if (pass_imt_from_rgctx) {
8942 g_assert (!pass_vtable);
8944 imt_arg = emit_get_rgctx_method (cfg, context_used,
8945 cmethod, MONO_RGCTX_INFO_METHOD);
8949 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8951 /* Calling virtual generic methods */
8952 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8953 !(MONO_METHOD_IS_FINAL (cmethod) &&
8954 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8955 fsig->generic_param_count &&
8956 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8958 MonoInst *this_temp, *this_arg_temp, *store;
8959 MonoInst *iargs [4];
8961 g_assert (fsig->is_inflated);
8963 /* Prevent inlining of methods that contain indirect calls */
8964 INLINE_FAILURE ("virtual generic call");
8966 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8967 GSHAREDVT_FAILURE (*ip);
8969 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8970 g_assert (!imt_arg);
8972 g_assert (cmethod->is_inflated);
8973 imt_arg = emit_get_rgctx_method (cfg, context_used,
8974 cmethod, MONO_RGCTX_INFO_METHOD);
8975 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8977 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8978 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8979 MONO_ADD_INS (cfg->cbb, store);
8981 /* FIXME: This should be a managed pointer */
8982 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8984 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8985 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8986 cmethod, MONO_RGCTX_INFO_METHOD);
8987 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8988 addr = mono_emit_jit_icall (cfg,
8989 mono_helper_compile_generic_method, iargs);
8991 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8993 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9000 * Implement a workaround for the inherent races involved in locking:
9006 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9007 * try block, the Exit () won't be executed, see:
9008 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9009 * To work around this, we extend such try blocks to include the last x bytes
9010 * of the Monitor.Enter () call.
9012 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9013 MonoBasicBlock *tbb;
9015 GET_BBLOCK (cfg, tbb, ip + 5);
9017 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9018 * from Monitor.Enter like ArgumentNullException.
9020 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9021 /* Mark this bblock as needing to be extended */
9022 tbb->extend_try_block = TRUE;
9026 /* Conversion to a JIT intrinsic */
9027 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9028 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9029 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9037 if ((cfg->opt & MONO_OPT_INLINE) &&
9038 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9039 mono_method_check_inlining (cfg, cmethod)) {
9041 gboolean always = FALSE;
9043 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9044 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9045 /* Prevent inlining of methods that call wrappers */
9046 INLINE_FAILURE ("wrapper call");
9047 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9051 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9053 cfg->real_offset += 5;
9055 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9056 /* *sp is already set by inline_method */
9061 inline_costs += costs;
9067 /* Tail recursion elimination */
9068 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9069 gboolean has_vtargs = FALSE;
9072 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9073 INLINE_FAILURE ("tail call");
9075 /* keep it simple */
9076 for (i = fsig->param_count - 1; i >= 0; i--) {
9077 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9082 if (need_seq_point) {
9083 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9084 need_seq_point = FALSE;
9086 for (i = 0; i < n; ++i)
9087 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9088 MONO_INST_NEW (cfg, ins, OP_BR);
9089 MONO_ADD_INS (cfg->cbb, ins);
9090 tblock = start_bblock->out_bb [0];
9091 link_bblock (cfg, cfg->cbb, tblock);
9092 ins->inst_target_bb = tblock;
9093 start_new_bblock = 1;
9095 /* skip the CEE_RET, too */
9096 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9103 inline_costs += 10 * num_calls++;
9106 * Synchronized wrappers.
9107 * Its hard to determine where to replace a method with its synchronized
9108 * wrapper without causing an infinite recursion. The current solution is
9109 * to add the synchronized wrapper in the trampolines, and to
9110 * change the called method to a dummy wrapper, and resolve that wrapper
9111 * to the real method in mono_jit_compile_method ().
9113 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9114 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9115 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9116 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9120 * Making generic calls out of gsharedvt methods.
9121 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9122 * patching gshared method addresses into a gsharedvt method.
9124 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9125 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9126 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9127 MonoRgctxInfoType info_type;
9130 //if (mono_class_is_interface (cmethod->klass))
9131 //GSHAREDVT_FAILURE (*ip);
9132 // disable for possible remoting calls
9133 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9134 GSHAREDVT_FAILURE (*ip);
9135 if (fsig->generic_param_count) {
9136 /* virtual generic call */
9137 g_assert (!imt_arg);
9138 /* Same as the virtual generic case above */
9139 imt_arg = emit_get_rgctx_method (cfg, context_used,
9140 cmethod, MONO_RGCTX_INFO_METHOD);
9141 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9143 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9144 /* This can happen when we call a fully instantiated iface method */
9145 imt_arg = emit_get_rgctx_method (cfg, context_used,
9146 cmethod, MONO_RGCTX_INFO_METHOD);
9151 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9152 keep_this_alive = sp [0];
9154 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9155 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9157 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9158 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9160 if (cfg->llvm_only) {
9161 // FIXME: Avoid initializing vtable_arg
9162 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9164 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9169 /* Generic sharing */
9172 * Use this if the callee is gsharedvt sharable too, since
9173 * at runtime we might find an instantiation so the call cannot
9174 * be patched (the 'no_patch' code path in mini-trampolines.c).
9176 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9177 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9178 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9179 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9180 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9181 INLINE_FAILURE ("gshared");
9183 g_assert (cfg->gshared && cmethod);
9187 * We are compiling a call to a
9188 * generic method from shared code,
9189 * which means that we have to look up
9190 * the method in the rgctx and do an
9194 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9196 if (cfg->llvm_only) {
9197 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9198 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9200 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9201 // FIXME: Avoid initializing imt_arg/vtable_arg
9202 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9204 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9205 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9210 /* Direct calls to icalls */
9212 MonoMethod *wrapper;
9215 /* Inline the wrapper */
9216 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9218 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9219 g_assert (costs > 0);
9220 cfg->real_offset += 5;
9222 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9223 /* *sp is already set by inline_method */
9228 inline_costs += costs;
9237 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9238 MonoInst *val = sp [fsig->param_count];
9240 if (val->type == STACK_OBJ) {
9241 MonoInst *iargs [2];
9246 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9249 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9250 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9251 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9252 emit_write_barrier (cfg, addr, val);
9253 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9254 GSHAREDVT_FAILURE (*ip);
9255 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9256 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9258 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9259 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9260 if (!cmethod->klass->element_class->valuetype && !readonly)
9261 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9262 CHECK_TYPELOAD (cmethod->klass);
9265 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9268 g_assert_not_reached ();
9275 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9279 /* Tail prefix / tail call optimization */
9281 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9282 /* FIXME: runtime generic context pointer for jumps? */
9283 /* FIXME: handle this for generic sharing eventually */
9284 if ((ins_flag & MONO_INST_TAILCALL) &&
9285 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9286 supported_tail_call = TRUE;
9288 if (supported_tail_call) {
9291 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9292 INLINE_FAILURE ("tail call");
9294 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9296 if (cfg->backend->have_op_tail_call) {
9297 /* Handle tail calls similarly to normal calls */
9300 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9302 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9303 call->tail_call = TRUE;
9304 call->method = cmethod;
9305 call->signature = mono_method_signature (cmethod);
9308 * We implement tail calls by storing the actual arguments into the
9309 * argument variables, then emitting a CEE_JMP.
9311 for (i = 0; i < n; ++i) {
9312 /* Prevent argument from being register allocated */
9313 arg_array [i]->flags |= MONO_INST_VOLATILE;
9314 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9316 ins = (MonoInst*)call;
9317 ins->inst_p0 = cmethod;
9318 ins->inst_p1 = arg_array [0];
9319 MONO_ADD_INS (cfg->cbb, ins);
9320 link_bblock (cfg, cfg->cbb, end_bblock);
9321 start_new_bblock = 1;
9323 // FIXME: Eliminate unreachable epilogs
9326 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9327 * only reachable from this call.
9329 GET_BBLOCK (cfg, tblock, ip + 5);
9330 if (tblock == cfg->cbb || tblock->in_count == 0)
9339 * Virtual calls in llvm-only mode.
9341 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9342 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9347 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9348 INLINE_FAILURE ("call");
9349 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9350 imt_arg, vtable_arg);
9352 if (tail_call && !cfg->llvm_only) {
9353 link_bblock (cfg, cfg->cbb, end_bblock);
9354 start_new_bblock = 1;
9356 // FIXME: Eliminate unreachable epilogs
9359 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9360 * only reachable from this call.
9362 GET_BBLOCK (cfg, tblock, ip + 5);
9363 if (tblock == cfg->cbb || tblock->in_count == 0)
9370 /* End of call, INS should contain the result of the call, if any */
9372 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9375 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9380 if (keep_this_alive) {
9381 MonoInst *dummy_use;
9383 /* See mono_emit_method_call_full () */
9384 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9387 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9389 * Clang can convert these calls to tail calls which screw up the stack
9390 * walk. This happens even when the -fno-optimize-sibling-calls
9391 * option is passed to clang.
9392 * Work around this by emitting a dummy call.
9394 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9397 CHECK_CFG_EXCEPTION;
9401 g_assert (*ip == CEE_RET);
9405 constrained_class = NULL;
9407 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9411 if (cfg->method != method) {
9412 /* return from inlined method */
9414 * If in_count == 0, that means the ret is unreachable due to
9415 * being preceeded by a throw. In that case, inline_method () will
9416 * handle setting the return value
9417 * (test case: test_0_inline_throw ()).
9419 if (return_var && cfg->cbb->in_count) {
9420 MonoType *ret_type = mono_method_signature (method)->ret;
9426 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9429 //g_assert (returnvar != -1);
9430 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9431 cfg->ret_var_set = TRUE;
9434 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9436 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9440 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9442 if (seq_points && !sym_seq_points) {
9444 * Place a seq point here too even through the IL stack is not
9445 * empty, so a step over on
9448 * will work correctly.
9450 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9451 MONO_ADD_INS (cfg->cbb, ins);
9454 g_assert (!return_var);
9458 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9461 emit_setret (cfg, *sp);
9464 if (sp != stack_start)
9466 MONO_INST_NEW (cfg, ins, OP_BR);
9468 ins->inst_target_bb = end_bblock;
9469 MONO_ADD_INS (cfg->cbb, ins);
9470 link_bblock (cfg, cfg->cbb, end_bblock);
9471 start_new_bblock = 1;
9475 MONO_INST_NEW (cfg, ins, OP_BR);
9477 target = ip + 1 + (signed char)(*ip);
9479 GET_BBLOCK (cfg, tblock, target);
9480 link_bblock (cfg, cfg->cbb, tblock);
9481 ins->inst_target_bb = tblock;
9482 if (sp != stack_start) {
9483 handle_stack_args (cfg, stack_start, sp - stack_start);
9485 CHECK_UNVERIFIABLE (cfg);
9487 MONO_ADD_INS (cfg->cbb, ins);
9488 start_new_bblock = 1;
9489 inline_costs += BRANCH_COST;
9503 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9505 target = ip + 1 + *(signed char*)ip;
9511 inline_costs += BRANCH_COST;
9515 MONO_INST_NEW (cfg, ins, OP_BR);
9518 target = ip + 4 + (gint32)read32(ip);
9520 GET_BBLOCK (cfg, tblock, target);
9521 link_bblock (cfg, cfg->cbb, tblock);
9522 ins->inst_target_bb = tblock;
9523 if (sp != stack_start) {
9524 handle_stack_args (cfg, stack_start, sp - stack_start);
9526 CHECK_UNVERIFIABLE (cfg);
9529 MONO_ADD_INS (cfg->cbb, ins);
9531 start_new_bblock = 1;
9532 inline_costs += BRANCH_COST;
9539 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9540 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9541 guint32 opsize = is_short ? 1 : 4;
9543 CHECK_OPSIZE (opsize);
9545 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9548 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9553 GET_BBLOCK (cfg, tblock, target);
9554 link_bblock (cfg, cfg->cbb, tblock);
9555 GET_BBLOCK (cfg, tblock, ip);
9556 link_bblock (cfg, cfg->cbb, tblock);
9558 if (sp != stack_start) {
9559 handle_stack_args (cfg, stack_start, sp - stack_start);
9560 CHECK_UNVERIFIABLE (cfg);
9563 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9564 cmp->sreg1 = sp [0]->dreg;
9565 type_from_op (cfg, cmp, sp [0], NULL);
9568 #if SIZEOF_REGISTER == 4
9569 if (cmp->opcode == OP_LCOMPARE_IMM) {
9570 /* Convert it to OP_LCOMPARE */
9571 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9572 ins->type = STACK_I8;
9573 ins->dreg = alloc_dreg (cfg, STACK_I8);
9575 MONO_ADD_INS (cfg->cbb, ins);
9576 cmp->opcode = OP_LCOMPARE;
9577 cmp->sreg2 = ins->dreg;
9580 MONO_ADD_INS (cfg->cbb, cmp);
9582 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9583 type_from_op (cfg, ins, sp [0], NULL);
9584 MONO_ADD_INS (cfg->cbb, ins);
9585 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9586 GET_BBLOCK (cfg, tblock, target);
9587 ins->inst_true_bb = tblock;
9588 GET_BBLOCK (cfg, tblock, ip);
9589 ins->inst_false_bb = tblock;
9590 start_new_bblock = 2;
9593 inline_costs += BRANCH_COST;
9608 MONO_INST_NEW (cfg, ins, *ip);
9610 target = ip + 4 + (gint32)read32(ip);
9616 inline_costs += BRANCH_COST;
9620 MonoBasicBlock **targets;
9621 MonoBasicBlock *default_bblock;
9622 MonoJumpInfoBBTable *table;
9623 int offset_reg = alloc_preg (cfg);
9624 int target_reg = alloc_preg (cfg);
9625 int table_reg = alloc_preg (cfg);
9626 int sum_reg = alloc_preg (cfg);
9627 gboolean use_op_switch;
9631 n = read32 (ip + 1);
9634 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9638 CHECK_OPSIZE (n * sizeof (guint32));
9639 target = ip + n * sizeof (guint32);
9641 GET_BBLOCK (cfg, default_bblock, target);
9642 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9644 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9645 for (i = 0; i < n; ++i) {
9646 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9647 targets [i] = tblock;
9648 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9652 if (sp != stack_start) {
9654 * Link the current bb with the targets as well, so handle_stack_args
9655 * will set their in_stack correctly.
9657 link_bblock (cfg, cfg->cbb, default_bblock);
9658 for (i = 0; i < n; ++i)
9659 link_bblock (cfg, cfg->cbb, targets [i]);
9661 handle_stack_args (cfg, stack_start, sp - stack_start);
9663 CHECK_UNVERIFIABLE (cfg);
9665 /* Undo the links */
9666 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9667 for (i = 0; i < n; ++i)
9668 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9674 for (i = 0; i < n; ++i)
9675 link_bblock (cfg, cfg->cbb, targets [i]);
9677 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9678 table->table = targets;
9679 table->table_size = n;
9681 use_op_switch = FALSE;
9683 /* ARM implements SWITCH statements differently */
9684 /* FIXME: Make it use the generic implementation */
9685 if (!cfg->compile_aot)
9686 use_op_switch = TRUE;
9689 if (COMPILE_LLVM (cfg))
9690 use_op_switch = TRUE;
9692 cfg->cbb->has_jump_table = 1;
9694 if (use_op_switch) {
9695 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9696 ins->sreg1 = src1->dreg;
9697 ins->inst_p0 = table;
9698 ins->inst_many_bb = targets;
9699 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9700 MONO_ADD_INS (cfg->cbb, ins);
9702 if (sizeof (gpointer) == 8)
9703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9707 #if SIZEOF_REGISTER == 8
9708 /* The upper word might not be zero, and we add it to a 64 bit address later */
9709 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9712 if (cfg->compile_aot) {
9713 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9715 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9716 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9717 ins->inst_p0 = table;
9718 ins->dreg = table_reg;
9719 MONO_ADD_INS (cfg->cbb, ins);
9722 /* FIXME: Use load_memindex */
9723 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9725 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9727 start_new_bblock = 1;
9728 inline_costs += (BRANCH_COST * 2);
9748 dreg = alloc_freg (cfg);
9751 dreg = alloc_lreg (cfg);
9754 dreg = alloc_ireg_ref (cfg);
9757 dreg = alloc_preg (cfg);
9760 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9761 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9762 if (*ip == CEE_LDIND_R4)
9763 ins->type = cfg->r4_stack_type;
9764 ins->flags |= ins_flag;
9765 MONO_ADD_INS (cfg->cbb, ins);
9767 if (ins_flag & MONO_INST_VOLATILE) {
9768 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9769 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9785 if (ins_flag & MONO_INST_VOLATILE) {
9786 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9787 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9790 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9791 ins->flags |= ins_flag;
9794 MONO_ADD_INS (cfg->cbb, ins);
9796 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9797 emit_write_barrier (cfg, sp [0], sp [1]);
9806 MONO_INST_NEW (cfg, ins, (*ip));
9808 ins->sreg1 = sp [0]->dreg;
9809 ins->sreg2 = sp [1]->dreg;
9810 type_from_op (cfg, ins, sp [0], sp [1]);
9812 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9814 /* Use the immediate opcodes if possible */
9815 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9816 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9817 if (imm_opcode != -1) {
9818 ins->opcode = imm_opcode;
9819 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9822 NULLIFY_INS (sp [1]);
9826 MONO_ADD_INS ((cfg)->cbb, (ins));
9828 *sp++ = mono_decompose_opcode (cfg, ins);
9845 MONO_INST_NEW (cfg, ins, (*ip));
9847 ins->sreg1 = sp [0]->dreg;
9848 ins->sreg2 = sp [1]->dreg;
9849 type_from_op (cfg, ins, sp [0], sp [1]);
9851 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9852 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9854 /* FIXME: Pass opcode to is_inst_imm */
9856 /* Use the immediate opcodes if possible */
9857 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9858 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9859 if (imm_opcode != -1) {
9860 ins->opcode = imm_opcode;
9861 if (sp [1]->opcode == OP_I8CONST) {
9862 #if SIZEOF_REGISTER == 8
9863 ins->inst_imm = sp [1]->inst_l;
9865 ins->inst_ls_word = sp [1]->inst_ls_word;
9866 ins->inst_ms_word = sp [1]->inst_ms_word;
9870 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9873 /* Might be followed by an instruction added by add_widen_op */
9874 if (sp [1]->next == NULL)
9875 NULLIFY_INS (sp [1]);
9878 MONO_ADD_INS ((cfg)->cbb, (ins));
9880 *sp++ = mono_decompose_opcode (cfg, ins);
9893 case CEE_CONV_OVF_I8:
9894 case CEE_CONV_OVF_U8:
9898 /* Special case this earlier so we have long constants in the IR */
9899 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9900 int data = sp [-1]->inst_c0;
9901 sp [-1]->opcode = OP_I8CONST;
9902 sp [-1]->type = STACK_I8;
9903 #if SIZEOF_REGISTER == 8
9904 if ((*ip) == CEE_CONV_U8)
9905 sp [-1]->inst_c0 = (guint32)data;
9907 sp [-1]->inst_c0 = data;
9909 sp [-1]->inst_ls_word = data;
9910 if ((*ip) == CEE_CONV_U8)
9911 sp [-1]->inst_ms_word = 0;
9913 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9915 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9922 case CEE_CONV_OVF_I4:
9923 case CEE_CONV_OVF_I1:
9924 case CEE_CONV_OVF_I2:
9925 case CEE_CONV_OVF_I:
9926 case CEE_CONV_OVF_U:
9929 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9930 ADD_UNOP (CEE_CONV_OVF_I8);
9937 case CEE_CONV_OVF_U1:
9938 case CEE_CONV_OVF_U2:
9939 case CEE_CONV_OVF_U4:
9942 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9943 ADD_UNOP (CEE_CONV_OVF_U8);
9950 case CEE_CONV_OVF_I1_UN:
9951 case CEE_CONV_OVF_I2_UN:
9952 case CEE_CONV_OVF_I4_UN:
9953 case CEE_CONV_OVF_I8_UN:
9954 case CEE_CONV_OVF_U1_UN:
9955 case CEE_CONV_OVF_U2_UN:
9956 case CEE_CONV_OVF_U4_UN:
9957 case CEE_CONV_OVF_U8_UN:
9958 case CEE_CONV_OVF_I_UN:
9959 case CEE_CONV_OVF_U_UN:
9966 CHECK_CFG_EXCEPTION;
9970 case CEE_ADD_OVF_UN:
9972 case CEE_MUL_OVF_UN:
9974 case CEE_SUB_OVF_UN:
9980 GSHAREDVT_FAILURE (*ip);
9983 token = read32 (ip + 1);
9984 klass = mini_get_class (method, token, generic_context);
9985 CHECK_TYPELOAD (klass);
9987 if (generic_class_is_reference_type (cfg, klass)) {
9988 MonoInst *store, *load;
9989 int dreg = alloc_ireg_ref (cfg);
9991 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9992 load->flags |= ins_flag;
9993 MONO_ADD_INS (cfg->cbb, load);
9995 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9996 store->flags |= ins_flag;
9997 MONO_ADD_INS (cfg->cbb, store);
9999 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10000 emit_write_barrier (cfg, sp [0], sp [1]);
10002 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10008 int loc_index = -1;
10014 token = read32 (ip + 1);
10015 klass = mini_get_class (method, token, generic_context);
10016 CHECK_TYPELOAD (klass);
10018 /* Optimize the common ldobj+stloc combination */
10021 loc_index = ip [6];
10028 loc_index = ip [5] - CEE_STLOC_0;
10035 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10036 CHECK_LOCAL (loc_index);
10038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10039 ins->dreg = cfg->locals [loc_index]->dreg;
10040 ins->flags |= ins_flag;
10043 if (ins_flag & MONO_INST_VOLATILE) {
10044 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10045 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10051 /* Optimize the ldobj+stobj combination */
10052 /* The reference case ends up being a load+store anyway */
10053 /* Skip this if the operation is volatile. */
10054 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10059 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10066 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10067 ins->flags |= ins_flag;
10070 if (ins_flag & MONO_INST_VOLATILE) {
10071 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10072 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10081 CHECK_STACK_OVF (1);
10083 n = read32 (ip + 1);
10085 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10086 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10087 ins->type = STACK_OBJ;
10090 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10091 MonoInst *iargs [1];
10092 char *str = (char *)mono_method_get_wrapper_data (method, n);
10094 if (cfg->compile_aot)
10095 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10097 EMIT_NEW_PCONST (cfg, iargs [0], str);
10098 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10100 if (cfg->opt & MONO_OPT_SHARED) {
10101 MonoInst *iargs [3];
10103 if (cfg->compile_aot) {
10104 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10106 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10107 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10108 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10109 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10110 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10113 if (cfg->cbb->out_of_line) {
10114 MonoInst *iargs [2];
10116 if (image == mono_defaults.corlib) {
10118 * Avoid relocations in AOT and save some space by using a
10119 * version of helper_ldstr specialized to mscorlib.
10121 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10122 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10124 /* Avoid creating the string object */
10125 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10126 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10127 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10131 if (cfg->compile_aot) {
10132 NEW_LDSTRCONST (cfg, ins, image, n);
10134 MONO_ADD_INS (cfg->cbb, ins);
10137 NEW_PCONST (cfg, ins, NULL);
10138 ins->type = STACK_OBJ;
10139 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10143 OUT_OF_MEMORY_FAILURE;
10146 MONO_ADD_INS (cfg->cbb, ins);
10155 MonoInst *iargs [2];
10156 MonoMethodSignature *fsig;
10159 MonoInst *vtable_arg = NULL;
10162 token = read32 (ip + 1);
10163 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10166 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10169 mono_save_token_info (cfg, image, token, cmethod);
10171 if (!mono_class_init (cmethod->klass))
10172 TYPE_LOAD_ERROR (cmethod->klass);
10174 context_used = mini_method_check_context_used (cfg, cmethod);
10176 if (mono_security_core_clr_enabled ())
10177 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10179 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10180 emit_class_init (cfg, cmethod->klass);
10181 CHECK_TYPELOAD (cmethod->klass);
10185 if (cfg->gsharedvt) {
10186 if (mini_is_gsharedvt_variable_signature (sig))
10187 GSHAREDVT_FAILURE (*ip);
10191 n = fsig->param_count;
10195 * Generate smaller code for the common newobj <exception> instruction in
10196 * argument checking code.
10198 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10199 is_exception_class (cmethod->klass) && n <= 2 &&
10200 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10201 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10202 MonoInst *iargs [3];
10206 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10209 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10212 iargs [1] = sp [0];
10213 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10216 iargs [1] = sp [0];
10217 iargs [2] = sp [1];
10218 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10221 g_assert_not_reached ();
10229 /* move the args to allow room for 'this' in the first position */
10235 /* check_call_signature () requires sp[0] to be set */
10236 this_ins.type = STACK_OBJ;
10237 sp [0] = &this_ins;
10238 if (check_call_signature (cfg, fsig, sp))
10243 if (mini_class_is_system_array (cmethod->klass)) {
10244 *sp = emit_get_rgctx_method (cfg, context_used,
10245 cmethod, MONO_RGCTX_INFO_METHOD);
10247 /* Avoid varargs in the common case */
10248 if (fsig->param_count == 1)
10249 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10250 else if (fsig->param_count == 2)
10251 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10252 else if (fsig->param_count == 3)
10253 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10254 else if (fsig->param_count == 4)
10255 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10257 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10258 } else if (cmethod->string_ctor) {
10259 g_assert (!context_used);
10260 g_assert (!vtable_arg);
10261 /* we simply pass a null pointer */
10262 EMIT_NEW_PCONST (cfg, *sp, NULL);
10263 /* now call the string ctor */
10264 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10266 if (cmethod->klass->valuetype) {
10267 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10268 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10269 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10274 * The code generated by mini_emit_virtual_call () expects
10275 * iargs [0] to be a boxed instance, but luckily the vcall
10276 * will be transformed into a normal call there.
10278 } else if (context_used) {
10279 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10282 MonoVTable *vtable = NULL;
10284 if (!cfg->compile_aot)
10285 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10286 CHECK_TYPELOAD (cmethod->klass);
10289 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10290 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10291 * As a workaround, we call class cctors before allocating objects.
10293 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10294 emit_class_init (cfg, cmethod->klass);
10295 if (cfg->verbose_level > 2)
10296 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10297 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10300 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10303 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10306 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10308 /* Now call the actual ctor */
10309 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10310 CHECK_CFG_EXCEPTION;
10313 if (alloc == NULL) {
10315 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10316 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10324 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10325 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10328 case CEE_CASTCLASS:
10333 token = read32 (ip + 1);
10334 klass = mini_get_class (method, token, generic_context);
10335 CHECK_TYPELOAD (klass);
10336 if (sp [0]->type != STACK_OBJ)
10339 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10340 ins->dreg = alloc_preg (cfg);
10341 ins->sreg1 = (*sp)->dreg;
10342 ins->klass = klass;
10343 ins->type = STACK_OBJ;
10344 MONO_ADD_INS (cfg->cbb, ins);
10346 CHECK_CFG_EXCEPTION;
10350 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10353 case CEE_UNBOX_ANY: {
10354 MonoInst *res, *addr;
10359 token = read32 (ip + 1);
10360 klass = mini_get_class (method, token, generic_context);
10361 CHECK_TYPELOAD (klass);
10363 mono_save_token_info (cfg, image, token, klass);
10365 context_used = mini_class_check_context_used (cfg, klass);
10367 if (mini_is_gsharedvt_klass (klass)) {
10368 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10370 } else if (generic_class_is_reference_type (cfg, klass)) {
10371 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10372 EMIT_NEW_PCONST (cfg, res, NULL);
10373 res->type = STACK_OBJ;
10375 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10376 res->dreg = alloc_preg (cfg);
10377 res->sreg1 = (*sp)->dreg;
10378 res->klass = klass;
10379 res->type = STACK_OBJ;
10380 MONO_ADD_INS (cfg->cbb, res);
10381 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10383 } else if (mono_class_is_nullable (klass)) {
10384 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10386 addr = handle_unbox (cfg, klass, sp, context_used);
10388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10399 MonoClass *enum_class;
10400 MonoMethod *has_flag;
10406 token = read32 (ip + 1);
10407 klass = mini_get_class (method, token, generic_context);
10408 CHECK_TYPELOAD (klass);
10410 mono_save_token_info (cfg, image, token, klass);
10412 context_used = mini_class_check_context_used (cfg, klass);
10414 if (generic_class_is_reference_type (cfg, klass)) {
10420 if (klass == mono_defaults.void_class)
10422 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10424 /* frequent check in generic code: box (struct), brtrue */
10429 * <push int/long ptr>
10432 * constrained. MyFlags
10433 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10435 * If we find this sequence and the operand types on box and constrained
10436 * are equal, we can emit a specialized instruction sequence instead of
10437 * the very slow HasFlag () call.
10439 if ((cfg->opt & MONO_OPT_INTRINS) &&
10440 /* Cheap checks first. */
10441 ip + 5 + 6 + 5 < end &&
10442 ip [5] == CEE_PREFIX1 &&
10443 ip [6] == CEE_CONSTRAINED_ &&
10444 ip [11] == CEE_CALLVIRT &&
10445 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10446 mono_class_is_enum (klass) &&
10447 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10448 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10449 has_flag->klass == mono_defaults.enum_class &&
10450 !strcmp (has_flag->name, "HasFlag") &&
10451 has_flag->signature->hasthis &&
10452 has_flag->signature->param_count == 1) {
10453 CHECK_TYPELOAD (enum_class);
10455 if (enum_class == klass) {
10456 MonoInst *enum_this, *enum_flag;
10461 enum_this = sp [0];
10462 enum_flag = sp [1];
10464 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10469 // FIXME: LLVM can't handle the inconsistent bb linking
10470 if (!mono_class_is_nullable (klass) &&
10471 !mini_is_gsharedvt_klass (klass) &&
10472 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10473 (ip [5] == CEE_BRTRUE ||
10474 ip [5] == CEE_BRTRUE_S ||
10475 ip [5] == CEE_BRFALSE ||
10476 ip [5] == CEE_BRFALSE_S)) {
10477 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10479 MonoBasicBlock *true_bb, *false_bb;
10483 if (cfg->verbose_level > 3) {
10484 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10485 printf ("<box+brtrue opt>\n");
10490 case CEE_BRFALSE_S:
10493 target = ip + 1 + (signed char)(*ip);
10500 target = ip + 4 + (gint)(read32 (ip));
10504 g_assert_not_reached ();
10508 * We need to link both bblocks, since it is needed for handling stack
10509 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10510 * Branching to only one of them would lead to inconsistencies, so
10511 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10513 GET_BBLOCK (cfg, true_bb, target);
10514 GET_BBLOCK (cfg, false_bb, ip);
10516 mono_link_bblock (cfg, cfg->cbb, true_bb);
10517 mono_link_bblock (cfg, cfg->cbb, false_bb);
10519 if (sp != stack_start) {
10520 handle_stack_args (cfg, stack_start, sp - stack_start);
10522 CHECK_UNVERIFIABLE (cfg);
10525 if (COMPILE_LLVM (cfg)) {
10526 dreg = alloc_ireg (cfg);
10527 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10528 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10530 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10532 /* The JIT can't eliminate the iconst+compare */
10533 MONO_INST_NEW (cfg, ins, OP_BR);
10534 ins->inst_target_bb = is_true ? true_bb : false_bb;
10535 MONO_ADD_INS (cfg->cbb, ins);
10538 start_new_bblock = 1;
10542 *sp++ = handle_box (cfg, val, klass, context_used);
10544 CHECK_CFG_EXCEPTION;
10553 token = read32 (ip + 1);
10554 klass = mini_get_class (method, token, generic_context);
10555 CHECK_TYPELOAD (klass);
10557 mono_save_token_info (cfg, image, token, klass);
10559 context_used = mini_class_check_context_used (cfg, klass);
10561 if (mono_class_is_nullable (klass)) {
10564 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10565 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10569 ins = handle_unbox (cfg, klass, sp, context_used);
10582 MonoClassField *field;
10583 #ifndef DISABLE_REMOTING
10587 gboolean is_instance;
10589 gpointer addr = NULL;
10590 gboolean is_special_static;
10592 MonoInst *store_val = NULL;
10593 MonoInst *thread_ins;
10596 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10598 if (op == CEE_STFLD) {
10601 store_val = sp [1];
10606 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10608 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10611 if (op == CEE_STSFLD) {
10614 store_val = sp [0];
10619 token = read32 (ip + 1);
10620 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10621 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10622 klass = field->parent;
10625 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10628 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10629 FIELD_ACCESS_FAILURE (method, field);
10630 mono_class_init (klass);
10632 /* if the class is Critical then transparent code cannot access it's fields */
10633 if (!is_instance && mono_security_core_clr_enabled ())
10634 ensure_method_is_allowed_to_access_field (cfg, method, field);
10636 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10637 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10638 if (mono_security_core_clr_enabled ())
10639 ensure_method_is_allowed_to_access_field (cfg, method, field);
10642 ftype = mono_field_get_type (field);
10645 * LDFLD etc. is usable on static fields as well, so convert those cases to
10648 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10660 g_assert_not_reached ();
10662 is_instance = FALSE;
10665 context_used = mini_class_check_context_used (cfg, klass);
10667 /* INSTANCE CASE */
10669 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10670 if (op == CEE_STFLD) {
10671 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10673 #ifndef DISABLE_REMOTING
10674 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10675 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10676 MonoInst *iargs [5];
10678 GSHAREDVT_FAILURE (op);
10680 iargs [0] = sp [0];
10681 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10682 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10683 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10685 iargs [4] = sp [1];
10687 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10688 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10689 iargs, ip, cfg->real_offset, TRUE);
10690 CHECK_CFG_EXCEPTION;
10691 g_assert (costs > 0);
10693 cfg->real_offset += 5;
10695 inline_costs += costs;
10697 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10702 MonoInst *store, *wbarrier_ptr_ins = NULL;
10704 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10706 if (ins_flag & MONO_INST_VOLATILE) {
10707 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10708 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10711 if (mini_is_gsharedvt_klass (klass)) {
10712 MonoInst *offset_ins;
10714 context_used = mini_class_check_context_used (cfg, klass);
10716 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10717 /* The value is offset by 1 */
10718 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10719 dreg = alloc_ireg_mp (cfg);
10720 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10721 wbarrier_ptr_ins = ins;
10722 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10723 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10725 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10727 if (sp [0]->opcode != OP_LDADDR)
10728 store->flags |= MONO_INST_FAULT;
10730 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10731 if (mini_is_gsharedvt_klass (klass)) {
10732 g_assert (wbarrier_ptr_ins);
10733 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10735 /* insert call to write barrier */
10739 dreg = alloc_ireg_mp (cfg);
10740 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10741 emit_write_barrier (cfg, ptr, sp [1]);
10745 store->flags |= ins_flag;
10752 #ifndef DISABLE_REMOTING
10753 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10754 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10755 MonoInst *iargs [4];
10757 GSHAREDVT_FAILURE (op);
10759 iargs [0] = sp [0];
10760 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10761 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10762 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10763 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10764 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10765 iargs, ip, cfg->real_offset, TRUE);
10766 CHECK_CFG_EXCEPTION;
10767 g_assert (costs > 0);
10769 cfg->real_offset += 5;
10773 inline_costs += costs;
10775 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10781 if (sp [0]->type == STACK_VTYPE) {
10784 /* Have to compute the address of the variable */
10786 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10788 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10790 g_assert (var->klass == klass);
10792 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10796 if (op == CEE_LDFLDA) {
10797 if (sp [0]->type == STACK_OBJ) {
10798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10799 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10802 dreg = alloc_ireg_mp (cfg);
10804 if (mini_is_gsharedvt_klass (klass)) {
10805 MonoInst *offset_ins;
10807 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10808 /* The value is offset by 1 */
10809 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10810 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10812 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10814 ins->klass = mono_class_from_mono_type (field->type);
10815 ins->type = STACK_MP;
10820 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10822 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10823 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10832 if (mini_is_gsharedvt_klass (klass)) {
10833 MonoInst *offset_ins;
10835 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10836 /* The value is offset by 1 */
10837 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10838 dreg = alloc_ireg_mp (cfg);
10839 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10840 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10842 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10844 load->flags |= ins_flag;
10845 if (sp [0]->opcode != OP_LDADDR)
10846 load->flags |= MONO_INST_FAULT;
10858 context_used = mini_class_check_context_used (cfg, klass);
10860 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10861 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10865 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10866 * to be called here.
10868 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10869 mono_class_vtable (cfg->domain, klass);
10870 CHECK_TYPELOAD (klass);
10872 mono_domain_lock (cfg->domain);
10873 if (cfg->domain->special_static_fields)
10874 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10875 mono_domain_unlock (cfg->domain);
10877 is_special_static = mono_class_field_is_special_static (field);
10879 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10880 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10884 /* Generate IR to compute the field address */
10885 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10887 * Fast access to TLS data
10888 * Inline version of get_thread_static_data () in
10892 int idx, static_data_reg, array_reg, dreg;
10894 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10895 GSHAREDVT_FAILURE (op);
10897 static_data_reg = alloc_ireg (cfg);
10898 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10900 if (cfg->compile_aot) {
10901 int offset_reg, offset2_reg, idx_reg;
10903 /* For TLS variables, this will return the TLS offset */
10904 EMIT_NEW_SFLDACONST (cfg, ins, field);
10905 offset_reg = ins->dreg;
10906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10907 idx_reg = alloc_ireg (cfg);
10908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10910 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10911 array_reg = alloc_ireg (cfg);
10912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10913 offset2_reg = alloc_ireg (cfg);
10914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10916 dreg = alloc_ireg (cfg);
10917 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10919 offset = (gsize)addr & 0x7fffffff;
10920 idx = offset & 0x3f;
10922 array_reg = alloc_ireg (cfg);
10923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10924 dreg = alloc_ireg (cfg);
10925 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10927 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10928 (cfg->compile_aot && is_special_static) ||
10929 (context_used && is_special_static)) {
10930 MonoInst *iargs [2];
10932 g_assert (field->parent);
10933 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10934 if (context_used) {
10935 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10936 field, MONO_RGCTX_INFO_CLASS_FIELD);
10938 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10940 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10941 } else if (context_used) {
10942 MonoInst *static_data;
10945 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10946 method->klass->name_space, method->klass->name, method->name,
10947 depth, field->offset);
10950 if (mono_class_needs_cctor_run (klass, method))
10951 emit_class_init (cfg, klass);
10954 * The pointer we're computing here is
10956 * super_info.static_data + field->offset
10958 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10959 klass, MONO_RGCTX_INFO_STATIC_DATA);
10961 if (mini_is_gsharedvt_klass (klass)) {
10962 MonoInst *offset_ins;
10964 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10965 /* The value is offset by 1 */
10966 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10967 dreg = alloc_ireg_mp (cfg);
10968 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10969 } else if (field->offset == 0) {
10972 int addr_reg = mono_alloc_preg (cfg);
10973 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10975 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10976 MonoInst *iargs [2];
10978 g_assert (field->parent);
10979 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10980 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10981 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10983 MonoVTable *vtable = NULL;
10985 if (!cfg->compile_aot)
10986 vtable = mono_class_vtable (cfg->domain, klass);
10987 CHECK_TYPELOAD (klass);
10990 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10991 if (!(g_slist_find (class_inits, klass))) {
10992 emit_class_init (cfg, klass);
10993 if (cfg->verbose_level > 2)
10994 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10995 class_inits = g_slist_prepend (class_inits, klass);
10998 if (cfg->run_cctors) {
10999 /* This makes so that inline cannot trigger */
11000 /* .cctors: too many apps depend on them */
11001 /* running with a specific order... */
11003 if (! vtable->initialized)
11004 INLINE_FAILURE ("class init");
11005 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11006 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11007 goto exception_exit;
11011 if (cfg->compile_aot)
11012 EMIT_NEW_SFLDACONST (cfg, ins, field);
11015 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11017 EMIT_NEW_PCONST (cfg, ins, addr);
11020 MonoInst *iargs [1];
11021 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11022 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11026 /* Generate IR to do the actual load/store operation */
11028 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11029 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11030 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11033 if (op == CEE_LDSFLDA) {
11034 ins->klass = mono_class_from_mono_type (ftype);
11035 ins->type = STACK_PTR;
11037 } else if (op == CEE_STSFLD) {
11040 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11041 store->flags |= ins_flag;
11043 gboolean is_const = FALSE;
11044 MonoVTable *vtable = NULL;
11045 gpointer addr = NULL;
11047 if (!context_used) {
11048 vtable = mono_class_vtable (cfg->domain, klass);
11049 CHECK_TYPELOAD (klass);
11051 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11052 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11053 int ro_type = ftype->type;
11055 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11056 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11057 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11060 GSHAREDVT_FAILURE (op);
11062 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11065 case MONO_TYPE_BOOLEAN:
11067 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11071 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11074 case MONO_TYPE_CHAR:
11076 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11080 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11085 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11089 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11094 case MONO_TYPE_PTR:
11095 case MONO_TYPE_FNPTR:
11096 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11097 type_to_eval_stack_type ((cfg), field->type, *sp);
11100 case MONO_TYPE_STRING:
11101 case MONO_TYPE_OBJECT:
11102 case MONO_TYPE_CLASS:
11103 case MONO_TYPE_SZARRAY:
11104 case MONO_TYPE_ARRAY:
11105 if (!mono_gc_is_moving ()) {
11106 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11107 type_to_eval_stack_type ((cfg), field->type, *sp);
11115 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11120 case MONO_TYPE_VALUETYPE:
11130 CHECK_STACK_OVF (1);
11132 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11133 load->flags |= ins_flag;
11139 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11140 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11141 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11152 token = read32 (ip + 1);
11153 klass = mini_get_class (method, token, generic_context);
11154 CHECK_TYPELOAD (klass);
11155 if (ins_flag & MONO_INST_VOLATILE) {
11156 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11157 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11159 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11160 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11161 ins->flags |= ins_flag;
11162 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11163 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11164 /* insert call to write barrier */
11165 emit_write_barrier (cfg, sp [0], sp [1]);
11177 const char *data_ptr;
11179 guint32 field_token;
11185 token = read32 (ip + 1);
11187 klass = mini_get_class (method, token, generic_context);
11188 CHECK_TYPELOAD (klass);
11190 context_used = mini_class_check_context_used (cfg, klass);
11192 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11193 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11194 ins->sreg1 = sp [0]->dreg;
11195 ins->type = STACK_I4;
11196 ins->dreg = alloc_ireg (cfg);
11197 MONO_ADD_INS (cfg->cbb, ins);
11198 *sp = mono_decompose_opcode (cfg, ins);
11201 if (context_used) {
11202 MonoInst *args [3];
11203 MonoClass *array_class = mono_array_class_get (klass, 1);
11204 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11206 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11209 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11210 array_class, MONO_RGCTX_INFO_VTABLE);
11215 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11217 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11219 if (cfg->opt & MONO_OPT_SHARED) {
11220 /* Decompose now to avoid problems with references to the domainvar */
11221 MonoInst *iargs [3];
11223 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11224 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11225 iargs [2] = sp [0];
11227 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11229 /* Decompose later since it is needed by abcrem */
11230 MonoClass *array_type = mono_array_class_get (klass, 1);
11231 mono_class_vtable (cfg->domain, array_type);
11232 CHECK_TYPELOAD (array_type);
11234 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11235 ins->dreg = alloc_ireg_ref (cfg);
11236 ins->sreg1 = sp [0]->dreg;
11237 ins->inst_newa_class = klass;
11238 ins->type = STACK_OBJ;
11239 ins->klass = array_type;
11240 MONO_ADD_INS (cfg->cbb, ins);
11241 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11242 cfg->cbb->has_array_access = TRUE;
11244 /* Needed so mono_emit_load_get_addr () gets called */
11245 mono_get_got_var (cfg);
11255 * we inline/optimize the initialization sequence if possible.
11256 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11257 * for small sizes open code the memcpy
11258 * ensure the rva field is big enough
11260 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11261 MonoMethod *memcpy_method = get_memcpy_method ();
11262 MonoInst *iargs [3];
11263 int add_reg = alloc_ireg_mp (cfg);
11265 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11266 if (cfg->compile_aot) {
11267 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11269 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11271 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11272 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11281 if (sp [0]->type != STACK_OBJ)
11284 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11285 ins->dreg = alloc_preg (cfg);
11286 ins->sreg1 = sp [0]->dreg;
11287 ins->type = STACK_I4;
11288 /* This flag will be inherited by the decomposition */
11289 ins->flags |= MONO_INST_FAULT;
11290 MONO_ADD_INS (cfg->cbb, ins);
11291 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11292 cfg->cbb->has_array_access = TRUE;
11300 if (sp [0]->type != STACK_OBJ)
11303 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11305 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11306 CHECK_TYPELOAD (klass);
11307 /* we need to make sure that this array is exactly the type it needs
11308 * to be for correctness. the wrappers are lax with their usage
11309 * so we need to ignore them here
11311 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11312 MonoClass *array_class = mono_array_class_get (klass, 1);
11313 mini_emit_check_array_type (cfg, sp [0], array_class);
11314 CHECK_TYPELOAD (array_class);
11318 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11323 case CEE_LDELEM_I1:
11324 case CEE_LDELEM_U1:
11325 case CEE_LDELEM_I2:
11326 case CEE_LDELEM_U2:
11327 case CEE_LDELEM_I4:
11328 case CEE_LDELEM_U4:
11329 case CEE_LDELEM_I8:
11331 case CEE_LDELEM_R4:
11332 case CEE_LDELEM_R8:
11333 case CEE_LDELEM_REF: {
11339 if (*ip == CEE_LDELEM) {
11341 token = read32 (ip + 1);
11342 klass = mini_get_class (method, token, generic_context);
11343 CHECK_TYPELOAD (klass);
11344 mono_class_init (klass);
11347 klass = array_access_to_klass (*ip);
11349 if (sp [0]->type != STACK_OBJ)
11352 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11354 if (mini_is_gsharedvt_variable_klass (klass)) {
11355 // FIXME-VT: OP_ICONST optimization
11356 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11357 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11358 ins->opcode = OP_LOADV_MEMBASE;
11359 } else if (sp [1]->opcode == OP_ICONST) {
11360 int array_reg = sp [0]->dreg;
11361 int index_reg = sp [1]->dreg;
11362 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11364 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11365 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11367 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11370 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11371 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11374 if (*ip == CEE_LDELEM)
11381 case CEE_STELEM_I1:
11382 case CEE_STELEM_I2:
11383 case CEE_STELEM_I4:
11384 case CEE_STELEM_I8:
11385 case CEE_STELEM_R4:
11386 case CEE_STELEM_R8:
11387 case CEE_STELEM_REF:
11392 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11394 if (*ip == CEE_STELEM) {
11396 token = read32 (ip + 1);
11397 klass = mini_get_class (method, token, generic_context);
11398 CHECK_TYPELOAD (klass);
11399 mono_class_init (klass);
11402 klass = array_access_to_klass (*ip);
11404 if (sp [0]->type != STACK_OBJ)
11407 emit_array_store (cfg, klass, sp, TRUE);
11409 if (*ip == CEE_STELEM)
11416 case CEE_CKFINITE: {
11420 if (cfg->llvm_only) {
11421 MonoInst *iargs [1];
11423 iargs [0] = sp [0];
11424 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11426 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11427 ins->sreg1 = sp [0]->dreg;
11428 ins->dreg = alloc_freg (cfg);
11429 ins->type = STACK_R8;
11430 MONO_ADD_INS (cfg->cbb, ins);
11432 *sp++ = mono_decompose_opcode (cfg, ins);
11438 case CEE_REFANYVAL: {
11439 MonoInst *src_var, *src;
11441 int klass_reg = alloc_preg (cfg);
11442 int dreg = alloc_preg (cfg);
11444 GSHAREDVT_FAILURE (*ip);
11447 MONO_INST_NEW (cfg, ins, *ip);
11450 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11451 CHECK_TYPELOAD (klass);
11453 context_used = mini_class_check_context_used (cfg, klass);
11456 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11458 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11459 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11462 if (context_used) {
11463 MonoInst *klass_ins;
11465 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11466 klass, MONO_RGCTX_INFO_KLASS);
11469 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11470 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11472 mini_emit_class_check (cfg, klass_reg, klass);
11474 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11475 ins->type = STACK_MP;
11476 ins->klass = klass;
11481 case CEE_MKREFANY: {
11482 MonoInst *loc, *addr;
11484 GSHAREDVT_FAILURE (*ip);
11487 MONO_INST_NEW (cfg, ins, *ip);
11490 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11491 CHECK_TYPELOAD (klass);
11493 context_used = mini_class_check_context_used (cfg, klass);
11495 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11496 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11498 if (context_used) {
11499 MonoInst *const_ins;
11500 int type_reg = alloc_preg (cfg);
11502 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11503 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11507 int const_reg = alloc_preg (cfg);
11508 int type_reg = alloc_preg (cfg);
11510 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11513 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11515 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11517 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11518 ins->type = STACK_VTYPE;
11519 ins->klass = mono_defaults.typed_reference_class;
11524 case CEE_LDTOKEN: {
11526 MonoClass *handle_class;
11528 CHECK_STACK_OVF (1);
11531 n = read32 (ip + 1);
11533 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11534 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11535 handle = mono_method_get_wrapper_data (method, n);
11536 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11537 if (handle_class == mono_defaults.typehandle_class)
11538 handle = &((MonoClass*)handle)->byval_arg;
11541 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11546 mono_class_init (handle_class);
11547 if (cfg->gshared) {
11548 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11549 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11550 /* This case handles ldtoken
11551 of an open type, like for
11554 } else if (handle_class == mono_defaults.typehandle_class) {
11555 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11556 } else if (handle_class == mono_defaults.fieldhandle_class)
11557 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11558 else if (handle_class == mono_defaults.methodhandle_class)
11559 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11561 g_assert_not_reached ();
11564 if ((cfg->opt & MONO_OPT_SHARED) &&
11565 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11566 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11567 MonoInst *addr, *vtvar, *iargs [3];
11568 int method_context_used;
11570 method_context_used = mini_method_check_context_used (cfg, method);
11572 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11574 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11575 EMIT_NEW_ICONST (cfg, iargs [1], n);
11576 if (method_context_used) {
11577 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11578 method, MONO_RGCTX_INFO_METHOD);
11579 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11581 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11582 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11584 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11586 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11588 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11590 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11591 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11592 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11593 (cmethod->klass == mono_defaults.systemtype_class) &&
11594 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11595 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11597 mono_class_init (tclass);
11598 if (context_used) {
11599 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11600 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11601 } else if (cfg->compile_aot) {
11602 if (method->wrapper_type) {
11603 error_init (&error); //got to do it since there are multiple conditionals below
11604 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11605 /* Special case for static synchronized wrappers */
11606 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11608 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11609 /* FIXME: n is not a normal token */
11611 EMIT_NEW_PCONST (cfg, ins, NULL);
11614 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11617 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11619 EMIT_NEW_PCONST (cfg, ins, rt);
11621 ins->type = STACK_OBJ;
11622 ins->klass = cmethod->klass;
11625 MonoInst *addr, *vtvar;
11627 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11629 if (context_used) {
11630 if (handle_class == mono_defaults.typehandle_class) {
11631 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11632 mono_class_from_mono_type ((MonoType *)handle),
11633 MONO_RGCTX_INFO_TYPE);
11634 } else if (handle_class == mono_defaults.methodhandle_class) {
11635 ins = emit_get_rgctx_method (cfg, context_used,
11636 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11637 } else if (handle_class == mono_defaults.fieldhandle_class) {
11638 ins = emit_get_rgctx_field (cfg, context_used,
11639 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11641 g_assert_not_reached ();
11643 } else if (cfg->compile_aot) {
11644 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11646 EMIT_NEW_PCONST (cfg, ins, handle);
11648 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11650 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11660 if (sp [-1]->type != STACK_OBJ)
11663 MONO_INST_NEW (cfg, ins, OP_THROW);
11665 ins->sreg1 = sp [0]->dreg;
11667 cfg->cbb->out_of_line = TRUE;
11668 MONO_ADD_INS (cfg->cbb, ins);
11669 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11670 MONO_ADD_INS (cfg->cbb, ins);
11673 link_bblock (cfg, cfg->cbb, end_bblock);
11674 start_new_bblock = 1;
11675 /* This can complicate code generation for llvm since the return value might not be defined */
11676 if (COMPILE_LLVM (cfg))
11677 INLINE_FAILURE ("throw");
11679 case CEE_ENDFINALLY:
11680 if (!ip_in_finally_clause (cfg, ip - header->code))
11682 /* mono_save_seq_point_info () depends on this */
11683 if (sp != stack_start)
11684 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11685 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11686 MONO_ADD_INS (cfg->cbb, ins);
11688 start_new_bblock = 1;
11691 * Control will leave the method so empty the stack, otherwise
11692 * the next basic block will start with a nonempty stack.
11694 while (sp != stack_start) {
11699 case CEE_LEAVE_S: {
11702 if (*ip == CEE_LEAVE) {
11704 target = ip + 5 + (gint32)read32(ip + 1);
11707 target = ip + 2 + (signed char)(ip [1]);
11710 /* empty the stack */
11711 while (sp != stack_start) {
11716 * If this leave statement is in a catch block, check for a
11717 * pending exception, and rethrow it if necessary.
11718 * We avoid doing this in runtime invoke wrappers, since those are called
11719 * by native code which excepts the wrapper to catch all exceptions.
11721 for (i = 0; i < header->num_clauses; ++i) {
11722 MonoExceptionClause *clause = &header->clauses [i];
11725 * Use <= in the final comparison to handle clauses with multiple
11726 * leave statements, like in bug #78024.
11727 * The ordering of the exception clauses guarantees that we find the
11728 * innermost clause.
11730 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11732 MonoBasicBlock *dont_throw;
11737 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11740 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11742 NEW_BBLOCK (cfg, dont_throw);
11745 * Currently, we always rethrow the abort exception, despite the
11746 * fact that this is not correct. See thread6.cs for an example.
11747 * But propagating the abort exception is more important than
11748 * getting the sematics right.
11750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11751 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11752 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11754 MONO_START_BB (cfg, dont_throw);
11759 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11762 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11764 MonoExceptionClause *clause;
11766 for (tmp = handlers; tmp; tmp = tmp->next) {
11767 clause = (MonoExceptionClause *)tmp->data;
11768 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11770 link_bblock (cfg, cfg->cbb, tblock);
11771 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11772 ins->inst_target_bb = tblock;
11773 ins->inst_eh_block = clause;
11774 MONO_ADD_INS (cfg->cbb, ins);
11775 cfg->cbb->has_call_handler = 1;
11776 if (COMPILE_LLVM (cfg)) {
11777 MonoBasicBlock *target_bb;
11780 * Link the finally bblock with the target, since it will
11781 * conceptually branch there.
11783 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11784 GET_BBLOCK (cfg, target_bb, target);
11785 link_bblock (cfg, tblock, target_bb);
11788 g_list_free (handlers);
11791 MONO_INST_NEW (cfg, ins, OP_BR);
11792 MONO_ADD_INS (cfg->cbb, ins);
11793 GET_BBLOCK (cfg, tblock, target);
11794 link_bblock (cfg, cfg->cbb, tblock);
11795 ins->inst_target_bb = tblock;
11797 start_new_bblock = 1;
11799 if (*ip == CEE_LEAVE)
11808 * Mono specific opcodes
11810 case MONO_CUSTOM_PREFIX: {
11812 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11816 case CEE_MONO_ICALL: {
11818 MonoJitICallInfo *info;
11820 token = read32 (ip + 2);
11821 func = mono_method_get_wrapper_data (method, token);
11822 info = mono_find_jit_icall_by_addr (func);
11824 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11827 CHECK_STACK (info->sig->param_count);
11828 sp -= info->sig->param_count;
11830 ins = mono_emit_jit_icall (cfg, info->func, sp);
11831 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11835 inline_costs += 10 * num_calls++;
11839 case CEE_MONO_LDPTR_CARD_TABLE:
11840 case CEE_MONO_LDPTR_NURSERY_START:
11841 case CEE_MONO_LDPTR_NURSERY_BITS:
11842 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11843 CHECK_STACK_OVF (1);
11846 case CEE_MONO_LDPTR_CARD_TABLE:
11847 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11849 case CEE_MONO_LDPTR_NURSERY_START:
11850 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11852 case CEE_MONO_LDPTR_NURSERY_BITS:
11853 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11855 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11856 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11862 inline_costs += 10 * num_calls++;
11865 case CEE_MONO_LDPTR: {
11868 CHECK_STACK_OVF (1);
11870 token = read32 (ip + 2);
11872 ptr = mono_method_get_wrapper_data (method, token);
11873 EMIT_NEW_PCONST (cfg, ins, ptr);
11876 inline_costs += 10 * num_calls++;
11877 /* Can't embed random pointers into AOT code */
11881 case CEE_MONO_JIT_ICALL_ADDR: {
11882 MonoJitICallInfo *callinfo;
11885 CHECK_STACK_OVF (1);
11887 token = read32 (ip + 2);
11889 ptr = mono_method_get_wrapper_data (method, token);
11890 callinfo = mono_find_jit_icall_by_addr (ptr);
11891 g_assert (callinfo);
11892 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11895 inline_costs += 10 * num_calls++;
11898 case CEE_MONO_ICALL_ADDR: {
11899 MonoMethod *cmethod;
11902 CHECK_STACK_OVF (1);
11904 token = read32 (ip + 2);
11906 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11908 if (cfg->compile_aot) {
11909 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11911 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11912 * before the call, its not needed when using direct pinvoke.
11913 * This is not an optimization, but its used to avoid looking up pinvokes
11914 * on platforms which don't support dlopen ().
11916 EMIT_NEW_PCONST (cfg, ins, NULL);
11918 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11921 ptr = mono_lookup_internal_call (cmethod);
11923 EMIT_NEW_PCONST (cfg, ins, ptr);
11929 case CEE_MONO_VTADDR: {
11930 MonoInst *src_var, *src;
11936 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11937 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11942 case CEE_MONO_NEWOBJ: {
11943 MonoInst *iargs [2];
11945 CHECK_STACK_OVF (1);
11947 token = read32 (ip + 2);
11948 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11949 mono_class_init (klass);
11950 NEW_DOMAINCONST (cfg, iargs [0]);
11951 MONO_ADD_INS (cfg->cbb, iargs [0]);
11952 NEW_CLASSCONST (cfg, iargs [1], klass);
11953 MONO_ADD_INS (cfg->cbb, iargs [1]);
11954 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11956 inline_costs += 10 * num_calls++;
11959 case CEE_MONO_OBJADDR:
11962 MONO_INST_NEW (cfg, ins, OP_MOVE);
11963 ins->dreg = alloc_ireg_mp (cfg);
11964 ins->sreg1 = sp [0]->dreg;
11965 ins->type = STACK_MP;
11966 MONO_ADD_INS (cfg->cbb, ins);
11970 case CEE_MONO_LDNATIVEOBJ:
11972 * Similar to LDOBJ, but instead load the unmanaged
11973 * representation of the vtype to the stack.
11978 token = read32 (ip + 2);
11979 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11980 g_assert (klass->valuetype);
11981 mono_class_init (klass);
11984 MonoInst *src, *dest, *temp;
11987 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11988 temp->backend.is_pinvoke = 1;
11989 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11990 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11992 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11993 dest->type = STACK_VTYPE;
11994 dest->klass = klass;
12000 case CEE_MONO_RETOBJ: {
12002 * Same as RET, but return the native representation of a vtype
12005 g_assert (cfg->ret);
12006 g_assert (mono_method_signature (method)->pinvoke);
12011 token = read32 (ip + 2);
12012 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12014 if (!cfg->vret_addr) {
12015 g_assert (cfg->ret_var_is_local);
12017 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12019 EMIT_NEW_RETLOADA (cfg, ins);
12021 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12023 if (sp != stack_start)
12026 MONO_INST_NEW (cfg, ins, OP_BR);
12027 ins->inst_target_bb = end_bblock;
12028 MONO_ADD_INS (cfg->cbb, ins);
12029 link_bblock (cfg, cfg->cbb, end_bblock);
12030 start_new_bblock = 1;
12034 case CEE_MONO_SAVE_LMF:
12035 case CEE_MONO_RESTORE_LMF:
12038 case CEE_MONO_CLASSCONST:
12039 CHECK_STACK_OVF (1);
12041 token = read32 (ip + 2);
12042 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12045 inline_costs += 10 * num_calls++;
12047 case CEE_MONO_NOT_TAKEN:
12048 cfg->cbb->out_of_line = TRUE;
12051 case CEE_MONO_TLS: {
12054 CHECK_STACK_OVF (1);
12056 key = (MonoTlsKey)read32 (ip + 2);
12057 g_assert (key < TLS_KEY_NUM);
12059 ins = mono_create_tls_get (cfg, key);
12061 ins->type = STACK_PTR;
12066 case CEE_MONO_DYN_CALL: {
12067 MonoCallInst *call;
12069 /* It would be easier to call a trampoline, but that would put an
12070 * extra frame on the stack, confusing exception handling. So
12071 * implement it inline using an opcode for now.
12074 if (!cfg->dyn_call_var) {
12075 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12076 /* prevent it from being register allocated */
12077 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12080 /* Has to use a call inst since it local regalloc expects it */
12081 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12082 ins = (MonoInst*)call;
12084 ins->sreg1 = sp [0]->dreg;
12085 ins->sreg2 = sp [1]->dreg;
12086 MONO_ADD_INS (cfg->cbb, ins);
12088 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12091 inline_costs += 10 * num_calls++;
12095 case CEE_MONO_MEMORY_BARRIER: {
12097 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12101 case CEE_MONO_ATOMIC_STORE_I4: {
12102 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12108 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12109 ins->dreg = sp [0]->dreg;
12110 ins->sreg1 = sp [1]->dreg;
12111 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12112 MONO_ADD_INS (cfg->cbb, ins);
12117 case CEE_MONO_JIT_ATTACH: {
12118 MonoInst *args [16], *domain_ins;
12119 MonoInst *ad_ins, *jit_tls_ins;
12120 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12122 g_assert (!mono_threads_is_coop_enabled ());
12124 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12126 EMIT_NEW_PCONST (cfg, ins, NULL);
12127 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12129 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12130 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12132 if (ad_ins && jit_tls_ins) {
12133 NEW_BBLOCK (cfg, next_bb);
12134 NEW_BBLOCK (cfg, call_bb);
12136 if (cfg->compile_aot) {
12137 /* AOT code is only used in the root domain */
12138 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12140 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12149 MONO_START_BB (cfg, call_bb);
12152 /* AOT code is only used in the root domain */
12153 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12154 if (cfg->compile_aot) {
12158 * This is called on unattached threads, so it cannot go through the trampoline
12159 * infrastructure. Use an indirect call through a got slot initialized at load time
12162 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12163 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12165 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12167 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12170 MONO_START_BB (cfg, next_bb);
12175 case CEE_MONO_JIT_DETACH: {
12176 MonoInst *args [16];
12178 /* Restore the original domain */
12179 dreg = alloc_ireg (cfg);
12180 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12181 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12185 case CEE_MONO_CALLI_EXTRA_ARG: {
12187 MonoMethodSignature *fsig;
12191 * This is the same as CEE_CALLI, but passes an additional argument
12192 * to the called method in llvmonly mode.
12193 * This is only used by delegate invoke wrappers to call the
12194 * actual delegate method.
12196 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12199 token = read32 (ip + 2);
12207 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12210 if (cfg->llvm_only)
12211 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12213 n = fsig->param_count + fsig->hasthis + 1;
12220 if (cfg->llvm_only) {
12222 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12223 * cconv. This is set by mono_init_delegate ().
12225 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12226 MonoInst *callee = addr;
12227 MonoInst *call, *localloc_ins;
12228 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12229 int low_bit_reg = alloc_preg (cfg);
12231 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12232 NEW_BBLOCK (cfg, end_bb);
12234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12236 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12238 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12239 addr = emit_get_rgctx_sig (cfg, context_used,
12240 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12242 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12244 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12245 ins->dreg = alloc_preg (cfg);
12246 ins->inst_imm = 2 * SIZEOF_VOID_P;
12247 MONO_ADD_INS (cfg->cbb, ins);
12248 localloc_ins = ins;
12249 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12251 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12253 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12254 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12256 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12257 MONO_START_BB (cfg, is_gsharedvt_bb);
12258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12259 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12260 ins->dreg = call->dreg;
12262 MONO_START_BB (cfg, end_bb);
12264 /* Caller uses a normal calling conv */
12266 MonoInst *callee = addr;
12267 MonoInst *call, *localloc_ins;
12268 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12269 int low_bit_reg = alloc_preg (cfg);
12271 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12272 NEW_BBLOCK (cfg, end_bb);
12274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12278 /* Normal case: callee uses a normal cconv, no conversion is needed */
12279 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12280 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12281 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12282 MONO_START_BB (cfg, is_gsharedvt_bb);
12283 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12284 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12285 MONO_ADD_INS (cfg->cbb, addr);
12287 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12289 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12290 ins->dreg = alloc_preg (cfg);
12291 ins->inst_imm = 2 * SIZEOF_VOID_P;
12292 MONO_ADD_INS (cfg->cbb, ins);
12293 localloc_ins = ins;
12294 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12298 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12299 ins->dreg = call->dreg;
12300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12302 MONO_START_BB (cfg, end_bb);
12305 /* Same as CEE_CALLI */
12306 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12308 * We pass the address to the gsharedvt trampoline in the rgctx reg
12310 MonoInst *callee = addr;
12312 addr = emit_get_rgctx_sig (cfg, context_used,
12313 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12314 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12316 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12320 if (!MONO_TYPE_IS_VOID (fsig->ret))
12321 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12323 CHECK_CFG_EXCEPTION;
12327 constrained_class = NULL;
12330 case CEE_MONO_LDDOMAIN:
12331 CHECK_STACK_OVF (1);
12332 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12336 case CEE_MONO_GET_LAST_ERROR:
12338 CHECK_STACK_OVF (1);
12340 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12341 ins->dreg = alloc_dreg (cfg, STACK_I4);
12342 ins->type = STACK_I4;
12343 MONO_ADD_INS (cfg->cbb, ins);
12349 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12355 case CEE_PREFIX1: {
12358 case CEE_ARGLIST: {
12359 /* somewhat similar to LDTOKEN */
12360 MonoInst *addr, *vtvar;
12361 CHECK_STACK_OVF (1);
12362 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12364 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12365 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12367 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12368 ins->type = STACK_VTYPE;
12369 ins->klass = mono_defaults.argumenthandle_class;
12379 MonoInst *cmp, *arg1, *arg2;
12387 * The following transforms:
12388 * CEE_CEQ into OP_CEQ
12389 * CEE_CGT into OP_CGT
12390 * CEE_CGT_UN into OP_CGT_UN
12391 * CEE_CLT into OP_CLT
12392 * CEE_CLT_UN into OP_CLT_UN
12394 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12396 MONO_INST_NEW (cfg, ins, cmp->opcode);
12397 cmp->sreg1 = arg1->dreg;
12398 cmp->sreg2 = arg2->dreg;
12399 type_from_op (cfg, cmp, arg1, arg2);
12401 add_widen_op (cfg, cmp, &arg1, &arg2);
12402 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12403 cmp->opcode = OP_LCOMPARE;
12404 else if (arg1->type == STACK_R4)
12405 cmp->opcode = OP_RCOMPARE;
12406 else if (arg1->type == STACK_R8)
12407 cmp->opcode = OP_FCOMPARE;
12409 cmp->opcode = OP_ICOMPARE;
12410 MONO_ADD_INS (cfg->cbb, cmp);
12411 ins->type = STACK_I4;
12412 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12413 type_from_op (cfg, ins, arg1, arg2);
12415 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12417 * The backends expect the fceq opcodes to do the
12420 ins->sreg1 = cmp->sreg1;
12421 ins->sreg2 = cmp->sreg2;
12424 MONO_ADD_INS (cfg->cbb, ins);
12430 MonoInst *argconst;
12431 MonoMethod *cil_method;
12433 CHECK_STACK_OVF (1);
12435 n = read32 (ip + 2);
12436 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12439 mono_class_init (cmethod->klass);
12441 mono_save_token_info (cfg, image, n, cmethod);
12443 context_used = mini_method_check_context_used (cfg, cmethod);
12445 cil_method = cmethod;
12446 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12447 emit_method_access_failure (cfg, method, cil_method);
12449 if (mono_security_core_clr_enabled ())
12450 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12453 * Optimize the common case of ldftn+delegate creation
12455 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12456 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12457 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12458 MonoInst *target_ins, *handle_ins;
12459 MonoMethod *invoke;
12460 int invoke_context_used;
12462 invoke = mono_get_delegate_invoke (ctor_method->klass);
12463 if (!invoke || !mono_method_signature (invoke))
12466 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12468 target_ins = sp [-1];
12470 if (mono_security_core_clr_enabled ())
12471 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12473 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12474 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12475 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12477 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12481 /* FIXME: SGEN support */
12482 if (invoke_context_used == 0 || cfg->llvm_only) {
12484 if (cfg->verbose_level > 3)
12485 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12486 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12489 CHECK_CFG_EXCEPTION;
12499 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12500 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12504 inline_costs += 10 * num_calls++;
12507 case CEE_LDVIRTFTN: {
12508 MonoInst *args [2];
12512 n = read32 (ip + 2);
12513 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12516 mono_class_init (cmethod->klass);
12518 context_used = mini_method_check_context_used (cfg, cmethod);
12520 if (mono_security_core_clr_enabled ())
12521 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12524 * Optimize the common case of ldvirtftn+delegate creation
12526 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12527 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12528 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12529 MonoInst *target_ins, *handle_ins;
12530 MonoMethod *invoke;
12531 int invoke_context_used;
12532 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12534 invoke = mono_get_delegate_invoke (ctor_method->klass);
12535 if (!invoke || !mono_method_signature (invoke))
12538 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12540 target_ins = sp [-1];
12542 if (mono_security_core_clr_enabled ())
12543 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12545 /* FIXME: SGEN support */
12546 if (invoke_context_used == 0 || cfg->llvm_only) {
12548 if (cfg->verbose_level > 3)
12549 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12550 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12553 CHECK_CFG_EXCEPTION;
12566 args [1] = emit_get_rgctx_method (cfg, context_used,
12567 cmethod, MONO_RGCTX_INFO_METHOD);
12570 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12572 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12575 inline_costs += 10 * num_calls++;
12579 CHECK_STACK_OVF (1);
12581 n = read16 (ip + 2);
12583 EMIT_NEW_ARGLOAD (cfg, ins, n);
12588 CHECK_STACK_OVF (1);
12590 n = read16 (ip + 2);
12592 NEW_ARGLOADA (cfg, ins, n);
12593 MONO_ADD_INS (cfg->cbb, ins);
12601 n = read16 (ip + 2);
12603 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12605 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12609 CHECK_STACK_OVF (1);
12611 n = read16 (ip + 2);
12613 EMIT_NEW_LOCLOAD (cfg, ins, n);
12618 unsigned char *tmp_ip;
12619 CHECK_STACK_OVF (1);
12621 n = read16 (ip + 2);
12624 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12630 EMIT_NEW_LOCLOADA (cfg, ins, n);
12639 n = read16 (ip + 2);
12641 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12643 emit_stloc_ir (cfg, sp, header, n);
12647 case CEE_LOCALLOC: {
12649 MonoBasicBlock *non_zero_bb, *end_bb;
12650 int alloc_ptr = alloc_preg (cfg);
12652 if (sp != stack_start)
12654 if (cfg->method != method)
12656 * Inlining this into a loop in a parent could lead to
12657 * stack overflows which is different behavior than the
12658 * non-inlined case, thus disable inlining in this case.
12660 INLINE_FAILURE("localloc");
12662 NEW_BBLOCK (cfg, non_zero_bb);
12663 NEW_BBLOCK (cfg, end_bb);
12665 /* if size != zero */
12666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12669 //size is zero, so result is NULL
12670 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12673 MONO_START_BB (cfg, non_zero_bb);
12674 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12675 ins->dreg = alloc_ptr;
12676 ins->sreg1 = sp [0]->dreg;
12677 ins->type = STACK_PTR;
12678 MONO_ADD_INS (cfg->cbb, ins);
12680 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12682 ins->flags |= MONO_INST_INIT;
12684 MONO_START_BB (cfg, end_bb);
12685 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12686 ins->type = STACK_PTR;
12692 case CEE_ENDFILTER: {
12693 MonoExceptionClause *clause, *nearest;
12698 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12700 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12701 ins->sreg1 = (*sp)->dreg;
12702 MONO_ADD_INS (cfg->cbb, ins);
12703 start_new_bblock = 1;
12707 for (cc = 0; cc < header->num_clauses; ++cc) {
12708 clause = &header->clauses [cc];
12709 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12710 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12711 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12714 g_assert (nearest);
12715 if ((ip - header->code) != nearest->handler_offset)
12720 case CEE_UNALIGNED_:
12721 ins_flag |= MONO_INST_UNALIGNED;
12722 /* FIXME: record alignment? we can assume 1 for now */
12726 case CEE_VOLATILE_:
12727 ins_flag |= MONO_INST_VOLATILE;
12731 ins_flag |= MONO_INST_TAILCALL;
12732 cfg->flags |= MONO_CFG_HAS_TAIL;
12733 /* Can't inline tail calls at this time */
12734 inline_costs += 100000;
12741 token = read32 (ip + 2);
12742 klass = mini_get_class (method, token, generic_context);
12743 CHECK_TYPELOAD (klass);
12744 if (generic_class_is_reference_type (cfg, klass))
12745 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12747 mini_emit_initobj (cfg, *sp, NULL, klass);
12751 case CEE_CONSTRAINED_:
12753 token = read32 (ip + 2);
12754 constrained_class = mini_get_class (method, token, generic_context);
12755 CHECK_TYPELOAD (constrained_class);
12759 case CEE_INITBLK: {
12760 MonoInst *iargs [3];
12764 /* Skip optimized paths for volatile operations. */
12765 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12766 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12767 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12768 /* emit_memset only works when val == 0 */
12769 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12772 iargs [0] = sp [0];
12773 iargs [1] = sp [1];
12774 iargs [2] = sp [2];
12775 if (ip [1] == CEE_CPBLK) {
12777 * FIXME: It's unclear whether we should be emitting both the acquire
12778 * and release barriers for cpblk. It is technically both a load and
12779 * store operation, so it seems like that's the sensible thing to do.
12781 * FIXME: We emit full barriers on both sides of the operation for
12782 * simplicity. We should have a separate atomic memcpy method instead.
12784 MonoMethod *memcpy_method = get_memcpy_method ();
12786 if (ins_flag & MONO_INST_VOLATILE)
12787 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12789 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12790 call->flags |= ins_flag;
12792 if (ins_flag & MONO_INST_VOLATILE)
12793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12795 MonoMethod *memset_method = get_memset_method ();
12796 if (ins_flag & MONO_INST_VOLATILE) {
12797 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12798 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12800 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12801 call->flags |= ins_flag;
12812 ins_flag |= MONO_INST_NOTYPECHECK;
12814 ins_flag |= MONO_INST_NORANGECHECK;
12815 /* we ignore the no-nullcheck for now since we
12816 * really do it explicitly only when doing callvirt->call
12820 case CEE_RETHROW: {
12822 int handler_offset = -1;
12824 for (i = 0; i < header->num_clauses; ++i) {
12825 MonoExceptionClause *clause = &header->clauses [i];
12826 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12827 handler_offset = clause->handler_offset;
12832 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12834 if (handler_offset == -1)
12837 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12838 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12839 ins->sreg1 = load->dreg;
12840 MONO_ADD_INS (cfg->cbb, ins);
12842 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12843 MONO_ADD_INS (cfg->cbb, ins);
12846 link_bblock (cfg, cfg->cbb, end_bblock);
12847 start_new_bblock = 1;
12855 CHECK_STACK_OVF (1);
12857 token = read32 (ip + 2);
12858 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12859 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12862 val = mono_type_size (type, &ialign);
12864 MonoClass *klass = mini_get_class (method, token, generic_context);
12865 CHECK_TYPELOAD (klass);
12867 val = mono_type_size (&klass->byval_arg, &ialign);
12869 if (mini_is_gsharedvt_klass (klass))
12870 GSHAREDVT_FAILURE (*ip);
12872 EMIT_NEW_ICONST (cfg, ins, val);
12877 case CEE_REFANYTYPE: {
12878 MonoInst *src_var, *src;
12880 GSHAREDVT_FAILURE (*ip);
12886 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12888 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12889 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12890 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12895 case CEE_READONLY_:
12908 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12918 g_warning ("opcode 0x%02x not handled", *ip);
12922 if (start_new_bblock != 1)
12925 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12926 if (cfg->cbb->next_bb) {
12927 /* This could already be set because of inlining, #693905 */
12928 MonoBasicBlock *bb = cfg->cbb;
12930 while (bb->next_bb)
12932 bb->next_bb = end_bblock;
12934 cfg->cbb->next_bb = end_bblock;
12937 if (cfg->method == method && cfg->domainvar) {
12939 MonoInst *get_domain;
12941 cfg->cbb = init_localsbb;
12943 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12944 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12945 MONO_ADD_INS (cfg->cbb, store);
12948 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12949 if (cfg->compile_aot)
12950 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12951 mono_get_got_var (cfg);
12954 if (cfg->method == method && cfg->got_var)
12955 mono_emit_load_got_addr (cfg);
12957 if (init_localsbb) {
12958 cfg->cbb = init_localsbb;
12960 for (i = 0; i < header->num_locals; ++i) {
12961 emit_init_local (cfg, i, header->locals [i], init_locals);
12965 if (cfg->init_ref_vars && cfg->method == method) {
12966 /* Emit initialization for ref vars */
12967 // FIXME: Avoid duplication initialization for IL locals.
12968 for (i = 0; i < cfg->num_varinfo; ++i) {
12969 MonoInst *ins = cfg->varinfo [i];
12971 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12972 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12976 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12977 cfg->cbb = init_localsbb;
12978 emit_push_lmf (cfg);
12981 cfg->cbb = init_localsbb;
12982 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12985 MonoBasicBlock *bb;
12988 * Make seq points at backward branch targets interruptable.
12990 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12991 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12992 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12995 /* Add a sequence point for method entry/exit events */
12996 if (seq_points && cfg->gen_sdb_seq_points) {
12997 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12998 MONO_ADD_INS (init_localsbb, ins);
12999 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13000 MONO_ADD_INS (cfg->bb_exit, ins);
13004 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13005 * the code they refer to was dead (#11880).
13007 if (sym_seq_points) {
13008 for (i = 0; i < header->code_size; ++i) {
13009 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13012 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13013 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13020 if (cfg->method == method) {
13021 MonoBasicBlock *bb;
13022 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13023 if (bb == cfg->bb_init)
13026 bb->region = mono_find_block_region (cfg, bb->real_offset);
13028 mono_create_spvar_for_region (cfg, bb->region);
13029 if (cfg->verbose_level > 2)
13030 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13033 MonoBasicBlock *bb;
13034 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13035 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13036 bb->real_offset = inline_offset;
13040 if (inline_costs < 0) {
13043 /* Method is too large */
13044 mname = mono_method_full_name (method, TRUE);
13045 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13049 if ((cfg->verbose_level > 2) && (cfg->method == method))
13050 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13055 g_assert (!mono_error_ok (&cfg->error));
13059 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13063 set_exception_type_from_invalid_il (cfg, method, ip);
13067 g_slist_free (class_inits);
13068 mono_basic_block_free (original_bb);
13069 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13070 if (cfg->exception_type)
13073 return inline_costs;
13077 store_membase_reg_to_store_membase_imm (int opcode)
13080 case OP_STORE_MEMBASE_REG:
13081 return OP_STORE_MEMBASE_IMM;
13082 case OP_STOREI1_MEMBASE_REG:
13083 return OP_STOREI1_MEMBASE_IMM;
13084 case OP_STOREI2_MEMBASE_REG:
13085 return OP_STOREI2_MEMBASE_IMM;
13086 case OP_STOREI4_MEMBASE_REG:
13087 return OP_STOREI4_MEMBASE_IMM;
13088 case OP_STOREI8_MEMBASE_REG:
13089 return OP_STOREI8_MEMBASE_IMM;
13091 g_assert_not_reached ();
13098 mono_op_to_op_imm (int opcode)
13102 return OP_IADD_IMM;
13104 return OP_ISUB_IMM;
13106 return OP_IDIV_IMM;
13108 return OP_IDIV_UN_IMM;
13110 return OP_IREM_IMM;
13112 return OP_IREM_UN_IMM;
13114 return OP_IMUL_IMM;
13116 return OP_IAND_IMM;
13120 return OP_IXOR_IMM;
13122 return OP_ISHL_IMM;
13124 return OP_ISHR_IMM;
13126 return OP_ISHR_UN_IMM;
13129 return OP_LADD_IMM;
13131 return OP_LSUB_IMM;
13133 return OP_LAND_IMM;
13137 return OP_LXOR_IMM;
13139 return OP_LSHL_IMM;
13141 return OP_LSHR_IMM;
13143 return OP_LSHR_UN_IMM;
13144 #if SIZEOF_REGISTER == 8
13146 return OP_LREM_IMM;
13150 return OP_COMPARE_IMM;
13152 return OP_ICOMPARE_IMM;
13154 return OP_LCOMPARE_IMM;
13156 case OP_STORE_MEMBASE_REG:
13157 return OP_STORE_MEMBASE_IMM;
13158 case OP_STOREI1_MEMBASE_REG:
13159 return OP_STOREI1_MEMBASE_IMM;
13160 case OP_STOREI2_MEMBASE_REG:
13161 return OP_STOREI2_MEMBASE_IMM;
13162 case OP_STOREI4_MEMBASE_REG:
13163 return OP_STOREI4_MEMBASE_IMM;
13165 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13167 return OP_X86_PUSH_IMM;
13168 case OP_X86_COMPARE_MEMBASE_REG:
13169 return OP_X86_COMPARE_MEMBASE_IMM;
13171 #if defined(TARGET_AMD64)
13172 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13173 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13175 case OP_VOIDCALL_REG:
13176 return OP_VOIDCALL;
13184 return OP_LOCALLOC_IMM;
13191 ldind_to_load_membase (int opcode)
13195 return OP_LOADI1_MEMBASE;
13197 return OP_LOADU1_MEMBASE;
13199 return OP_LOADI2_MEMBASE;
13201 return OP_LOADU2_MEMBASE;
13203 return OP_LOADI4_MEMBASE;
13205 return OP_LOADU4_MEMBASE;
13207 return OP_LOAD_MEMBASE;
13208 case CEE_LDIND_REF:
13209 return OP_LOAD_MEMBASE;
13211 return OP_LOADI8_MEMBASE;
13213 return OP_LOADR4_MEMBASE;
13215 return OP_LOADR8_MEMBASE;
13217 g_assert_not_reached ();
13224 stind_to_store_membase (int opcode)
13228 return OP_STOREI1_MEMBASE_REG;
13230 return OP_STOREI2_MEMBASE_REG;
13232 return OP_STOREI4_MEMBASE_REG;
13234 case CEE_STIND_REF:
13235 return OP_STORE_MEMBASE_REG;
13237 return OP_STOREI8_MEMBASE_REG;
13239 return OP_STORER4_MEMBASE_REG;
13241 return OP_STORER8_MEMBASE_REG;
13243 g_assert_not_reached ();
13250 mono_load_membase_to_load_mem (int opcode)
13252 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13253 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13255 case OP_LOAD_MEMBASE:
13256 return OP_LOAD_MEM;
13257 case OP_LOADU1_MEMBASE:
13258 return OP_LOADU1_MEM;
13259 case OP_LOADU2_MEMBASE:
13260 return OP_LOADU2_MEM;
13261 case OP_LOADI4_MEMBASE:
13262 return OP_LOADI4_MEM;
13263 case OP_LOADU4_MEMBASE:
13264 return OP_LOADU4_MEM;
13265 #if SIZEOF_REGISTER == 8
13266 case OP_LOADI8_MEMBASE:
13267 return OP_LOADI8_MEM;
13276 op_to_op_dest_membase (int store_opcode, int opcode)
13278 #if defined(TARGET_X86)
13279 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13284 return OP_X86_ADD_MEMBASE_REG;
13286 return OP_X86_SUB_MEMBASE_REG;
13288 return OP_X86_AND_MEMBASE_REG;
13290 return OP_X86_OR_MEMBASE_REG;
13292 return OP_X86_XOR_MEMBASE_REG;
13295 return OP_X86_ADD_MEMBASE_IMM;
13298 return OP_X86_SUB_MEMBASE_IMM;
13301 return OP_X86_AND_MEMBASE_IMM;
13304 return OP_X86_OR_MEMBASE_IMM;
13307 return OP_X86_XOR_MEMBASE_IMM;
13313 #if defined(TARGET_AMD64)
13314 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13319 return OP_X86_ADD_MEMBASE_REG;
13321 return OP_X86_SUB_MEMBASE_REG;
13323 return OP_X86_AND_MEMBASE_REG;
13325 return OP_X86_OR_MEMBASE_REG;
13327 return OP_X86_XOR_MEMBASE_REG;
13329 return OP_X86_ADD_MEMBASE_IMM;
13331 return OP_X86_SUB_MEMBASE_IMM;
13333 return OP_X86_AND_MEMBASE_IMM;
13335 return OP_X86_OR_MEMBASE_IMM;
13337 return OP_X86_XOR_MEMBASE_IMM;
13339 return OP_AMD64_ADD_MEMBASE_REG;
13341 return OP_AMD64_SUB_MEMBASE_REG;
13343 return OP_AMD64_AND_MEMBASE_REG;
13345 return OP_AMD64_OR_MEMBASE_REG;
13347 return OP_AMD64_XOR_MEMBASE_REG;
13350 return OP_AMD64_ADD_MEMBASE_IMM;
13353 return OP_AMD64_SUB_MEMBASE_IMM;
13356 return OP_AMD64_AND_MEMBASE_IMM;
13359 return OP_AMD64_OR_MEMBASE_IMM;
13362 return OP_AMD64_XOR_MEMBASE_IMM;
13372 op_to_op_store_membase (int store_opcode, int opcode)
13374 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13377 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13378 return OP_X86_SETEQ_MEMBASE;
13380 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13381 return OP_X86_SETNE_MEMBASE;
13389 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13392 /* FIXME: This has sign extension issues */
13394 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13395 return OP_X86_COMPARE_MEMBASE8_IMM;
13398 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13403 return OP_X86_PUSH_MEMBASE;
13404 case OP_COMPARE_IMM:
13405 case OP_ICOMPARE_IMM:
13406 return OP_X86_COMPARE_MEMBASE_IMM;
13409 return OP_X86_COMPARE_MEMBASE_REG;
13413 #ifdef TARGET_AMD64
13414 /* FIXME: This has sign extension issues */
13416 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13417 return OP_X86_COMPARE_MEMBASE8_IMM;
13422 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13423 return OP_X86_PUSH_MEMBASE;
13425 /* FIXME: This only works for 32 bit immediates
13426 case OP_COMPARE_IMM:
13427 case OP_LCOMPARE_IMM:
13428 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13429 return OP_AMD64_COMPARE_MEMBASE_IMM;
13431 case OP_ICOMPARE_IMM:
13432 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13433 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13437 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13438 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13439 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13440 return OP_AMD64_COMPARE_MEMBASE_REG;
13443 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13444 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13453 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13456 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13462 return OP_X86_COMPARE_REG_MEMBASE;
13464 return OP_X86_ADD_REG_MEMBASE;
13466 return OP_X86_SUB_REG_MEMBASE;
13468 return OP_X86_AND_REG_MEMBASE;
13470 return OP_X86_OR_REG_MEMBASE;
13472 return OP_X86_XOR_REG_MEMBASE;
13476 #ifdef TARGET_AMD64
13477 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13480 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13482 return OP_X86_ADD_REG_MEMBASE;
13484 return OP_X86_SUB_REG_MEMBASE;
13486 return OP_X86_AND_REG_MEMBASE;
13488 return OP_X86_OR_REG_MEMBASE;
13490 return OP_X86_XOR_REG_MEMBASE;
13492 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13496 return OP_AMD64_COMPARE_REG_MEMBASE;
13498 return OP_AMD64_ADD_REG_MEMBASE;
13500 return OP_AMD64_SUB_REG_MEMBASE;
13502 return OP_AMD64_AND_REG_MEMBASE;
13504 return OP_AMD64_OR_REG_MEMBASE;
13506 return OP_AMD64_XOR_REG_MEMBASE;
13515 mono_op_to_op_imm_noemul (int opcode)
13518 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13524 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13531 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13536 return mono_op_to_op_imm (opcode);
13541 * mono_handle_global_vregs:
13543 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13547 mono_handle_global_vregs (MonoCompile *cfg)
13549 gint32 *vreg_to_bb;
13550 MonoBasicBlock *bb;
13553 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13555 #ifdef MONO_ARCH_SIMD_INTRINSICS
13556 if (cfg->uses_simd_intrinsics)
13557 mono_simd_simplify_indirection (cfg);
13560 /* Find local vregs used in more than one bb */
13561 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13562 MonoInst *ins = bb->code;
13563 int block_num = bb->block_num;
13565 if (cfg->verbose_level > 2)
13566 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13569 for (; ins; ins = ins->next) {
13570 const char *spec = INS_INFO (ins->opcode);
13571 int regtype = 0, regindex;
13574 if (G_UNLIKELY (cfg->verbose_level > 2))
13575 mono_print_ins (ins);
13577 g_assert (ins->opcode >= MONO_CEE_LAST);
13579 for (regindex = 0; regindex < 4; regindex ++) {
13582 if (regindex == 0) {
13583 regtype = spec [MONO_INST_DEST];
13584 if (regtype == ' ')
13587 } else if (regindex == 1) {
13588 regtype = spec [MONO_INST_SRC1];
13589 if (regtype == ' ')
13592 } else if (regindex == 2) {
13593 regtype = spec [MONO_INST_SRC2];
13594 if (regtype == ' ')
13597 } else if (regindex == 3) {
13598 regtype = spec [MONO_INST_SRC3];
13599 if (regtype == ' ')
13604 #if SIZEOF_REGISTER == 4
13605 /* In the LLVM case, the long opcodes are not decomposed */
13606 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13608 * Since some instructions reference the original long vreg,
13609 * and some reference the two component vregs, it is quite hard
13610 * to determine when it needs to be global. So be conservative.
13612 if (!get_vreg_to_inst (cfg, vreg)) {
13613 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13615 if (cfg->verbose_level > 2)
13616 printf ("LONG VREG R%d made global.\n", vreg);
13620 * Make the component vregs volatile since the optimizations can
13621 * get confused otherwise.
13623 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13624 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13628 g_assert (vreg != -1);
13630 prev_bb = vreg_to_bb [vreg];
13631 if (prev_bb == 0) {
13632 /* 0 is a valid block num */
13633 vreg_to_bb [vreg] = block_num + 1;
13634 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13635 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13638 if (!get_vreg_to_inst (cfg, vreg)) {
13639 if (G_UNLIKELY (cfg->verbose_level > 2))
13640 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13644 if (vreg_is_ref (cfg, vreg))
13645 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13647 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13650 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13653 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13657 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13660 g_assert_not_reached ();
13664 /* Flag as having been used in more than one bb */
13665 vreg_to_bb [vreg] = -1;
13671 /* If a variable is used in only one bblock, convert it into a local vreg */
13672 for (i = 0; i < cfg->num_varinfo; i++) {
13673 MonoInst *var = cfg->varinfo [i];
13674 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13676 switch (var->type) {
13682 #if SIZEOF_REGISTER == 8
13685 #if !defined(TARGET_X86)
13686 /* Enabling this screws up the fp stack on x86 */
13689 if (mono_arch_is_soft_float ())
13693 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13697 /* Arguments are implicitly global */
13698 /* Putting R4 vars into registers doesn't work currently */
13699 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13700 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13702 * Make that the variable's liveness interval doesn't contain a call, since
13703 * that would cause the lvreg to be spilled, making the whole optimization
13706 /* This is too slow for JIT compilation */
13708 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13710 int def_index, call_index, ins_index;
13711 gboolean spilled = FALSE;
13716 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13717 const char *spec = INS_INFO (ins->opcode);
13719 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13720 def_index = ins_index;
13722 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13723 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13724 if (call_index > def_index) {
13730 if (MONO_IS_CALL (ins))
13731 call_index = ins_index;
13741 if (G_UNLIKELY (cfg->verbose_level > 2))
13742 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13743 var->flags |= MONO_INST_IS_DEAD;
13744 cfg->vreg_to_inst [var->dreg] = NULL;
13751 * Compress the varinfo and vars tables so the liveness computation is faster and
13752 * takes up less space.
13755 for (i = 0; i < cfg->num_varinfo; ++i) {
13756 MonoInst *var = cfg->varinfo [i];
13757 if (pos < i && cfg->locals_start == i)
13758 cfg->locals_start = pos;
13759 if (!(var->flags & MONO_INST_IS_DEAD)) {
13761 cfg->varinfo [pos] = cfg->varinfo [i];
13762 cfg->varinfo [pos]->inst_c0 = pos;
13763 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13764 cfg->vars [pos].idx = pos;
13765 #if SIZEOF_REGISTER == 4
13766 if (cfg->varinfo [pos]->type == STACK_I8) {
13767 /* Modify the two component vars too */
13770 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13771 var1->inst_c0 = pos;
13772 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13773 var1->inst_c0 = pos;
13780 cfg->num_varinfo = pos;
13781 if (cfg->locals_start > cfg->num_varinfo)
13782 cfg->locals_start = cfg->num_varinfo;
13786 * mono_allocate_gsharedvt_vars:
13788 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13789 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13792 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13796 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13798 for (i = 0; i < cfg->num_varinfo; ++i) {
13799 MonoInst *ins = cfg->varinfo [i];
13802 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13803 if (i >= cfg->locals_start) {
13805 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13806 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13807 ins->opcode = OP_GSHAREDVT_LOCAL;
13808 ins->inst_imm = idx;
13811 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13812 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13819 * mono_spill_global_vars:
13821 * Generate spill code for variables which are not allocated to registers,
13822 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13823 * code is generated which could be optimized by the local optimization passes.
13826 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13828 MonoBasicBlock *bb;
13830 int orig_next_vreg;
13831 guint32 *vreg_to_lvreg;
13833 guint32 i, lvregs_len, lvregs_size;
13834 gboolean dest_has_lvreg = FALSE;
13835 MonoStackType stacktypes [128];
13836 MonoInst **live_range_start, **live_range_end;
13837 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13839 *need_local_opts = FALSE;
13841 memset (spec2, 0, sizeof (spec2));
13843 /* FIXME: Move this function to mini.c */
13844 stacktypes ['i'] = STACK_PTR;
13845 stacktypes ['l'] = STACK_I8;
13846 stacktypes ['f'] = STACK_R8;
13847 #ifdef MONO_ARCH_SIMD_INTRINSICS
13848 stacktypes ['x'] = STACK_VTYPE;
13851 #if SIZEOF_REGISTER == 4
13852 /* Create MonoInsts for longs */
13853 for (i = 0; i < cfg->num_varinfo; i++) {
13854 MonoInst *ins = cfg->varinfo [i];
13856 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13857 switch (ins->type) {
13862 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13865 g_assert (ins->opcode == OP_REGOFFSET);
13867 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13869 tree->opcode = OP_REGOFFSET;
13870 tree->inst_basereg = ins->inst_basereg;
13871 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13873 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13875 tree->opcode = OP_REGOFFSET;
13876 tree->inst_basereg = ins->inst_basereg;
13877 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13887 if (cfg->compute_gc_maps) {
13888 /* registers need liveness info even for !non refs */
13889 for (i = 0; i < cfg->num_varinfo; i++) {
13890 MonoInst *ins = cfg->varinfo [i];
13892 if (ins->opcode == OP_REGVAR)
13893 ins->flags |= MONO_INST_GC_TRACK;
13897 /* FIXME: widening and truncation */
13900 * As an optimization, when a variable allocated to the stack is first loaded into
13901 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13902 * the variable again.
13904 orig_next_vreg = cfg->next_vreg;
13905 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13906 lvregs_size = 1024;
13907 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13911 * These arrays contain the first and last instructions accessing a given
13913 * Since we emit bblocks in the same order we process them here, and we
13914 * don't split live ranges, these will precisely describe the live range of
13915 * the variable, i.e. the instruction range where a valid value can be found
13916 * in the variables location.
13917 * The live range is computed using the liveness info computed by the liveness pass.
13918 * We can't use vmv->range, since that is an abstract live range, and we need
13919 * one which is instruction precise.
13920 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13922 /* FIXME: Only do this if debugging info is requested */
13923 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13924 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13925 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13926 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13928 /* Add spill loads/stores */
13929 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13932 if (cfg->verbose_level > 2)
13933 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13935 /* Clear vreg_to_lvreg array */
13936 for (i = 0; i < lvregs_len; i++)
13937 vreg_to_lvreg [lvregs [i]] = 0;
13941 MONO_BB_FOR_EACH_INS (bb, ins) {
13942 const char *spec = INS_INFO (ins->opcode);
13943 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13944 gboolean store, no_lvreg;
13945 int sregs [MONO_MAX_SRC_REGS];
13947 if (G_UNLIKELY (cfg->verbose_level > 2))
13948 mono_print_ins (ins);
13950 if (ins->opcode == OP_NOP)
13954 * We handle LDADDR here as well, since it can only be decomposed
13955 * when variable addresses are known.
13957 if (ins->opcode == OP_LDADDR) {
13958 MonoInst *var = (MonoInst *)ins->inst_p0;
13960 if (var->opcode == OP_VTARG_ADDR) {
13961 /* Happens on SPARC/S390 where vtypes are passed by reference */
13962 MonoInst *vtaddr = var->inst_left;
13963 if (vtaddr->opcode == OP_REGVAR) {
13964 ins->opcode = OP_MOVE;
13965 ins->sreg1 = vtaddr->dreg;
13967 else if (var->inst_left->opcode == OP_REGOFFSET) {
13968 ins->opcode = OP_LOAD_MEMBASE;
13969 ins->inst_basereg = vtaddr->inst_basereg;
13970 ins->inst_offset = vtaddr->inst_offset;
13973 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13974 /* gsharedvt arg passed by ref */
13975 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13977 ins->opcode = OP_LOAD_MEMBASE;
13978 ins->inst_basereg = var->inst_basereg;
13979 ins->inst_offset = var->inst_offset;
13980 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13981 MonoInst *load, *load2, *load3;
13982 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13983 int reg1, reg2, reg3;
13984 MonoInst *info_var = cfg->gsharedvt_info_var;
13985 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13989 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13992 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13994 g_assert (info_var);
13995 g_assert (locals_var);
13997 /* Mark the instruction used to compute the locals var as used */
13998 cfg->gsharedvt_locals_var_ins = NULL;
14000 /* Load the offset */
14001 if (info_var->opcode == OP_REGOFFSET) {
14002 reg1 = alloc_ireg (cfg);
14003 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14004 } else if (info_var->opcode == OP_REGVAR) {
14006 reg1 = info_var->dreg;
14008 g_assert_not_reached ();
14010 reg2 = alloc_ireg (cfg);
14011 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14012 /* Load the locals area address */
14013 reg3 = alloc_ireg (cfg);
14014 if (locals_var->opcode == OP_REGOFFSET) {
14015 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14016 } else if (locals_var->opcode == OP_REGVAR) {
14017 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14019 g_assert_not_reached ();
14021 /* Compute the address */
14022 ins->opcode = OP_PADD;
14026 mono_bblock_insert_before_ins (bb, ins, load3);
14027 mono_bblock_insert_before_ins (bb, load3, load2);
14029 mono_bblock_insert_before_ins (bb, load2, load);
14031 g_assert (var->opcode == OP_REGOFFSET);
14033 ins->opcode = OP_ADD_IMM;
14034 ins->sreg1 = var->inst_basereg;
14035 ins->inst_imm = var->inst_offset;
14038 *need_local_opts = TRUE;
14039 spec = INS_INFO (ins->opcode);
14042 if (ins->opcode < MONO_CEE_LAST) {
14043 mono_print_ins (ins);
14044 g_assert_not_reached ();
14048 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14052 if (MONO_IS_STORE_MEMBASE (ins)) {
14053 tmp_reg = ins->dreg;
14054 ins->dreg = ins->sreg2;
14055 ins->sreg2 = tmp_reg;
14058 spec2 [MONO_INST_DEST] = ' ';
14059 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14060 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14061 spec2 [MONO_INST_SRC3] = ' ';
14063 } else if (MONO_IS_STORE_MEMINDEX (ins))
14064 g_assert_not_reached ();
14069 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14070 printf ("\t %.3s %d", spec, ins->dreg);
14071 num_sregs = mono_inst_get_src_registers (ins, sregs);
14072 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14073 printf (" %d", sregs [srcindex]);
14080 regtype = spec [MONO_INST_DEST];
14081 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14084 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14085 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14086 MonoInst *store_ins;
14088 MonoInst *def_ins = ins;
14089 int dreg = ins->dreg; /* The original vreg */
14091 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14093 if (var->opcode == OP_REGVAR) {
14094 ins->dreg = var->dreg;
14095 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14097 * Instead of emitting a load+store, use a _membase opcode.
14099 g_assert (var->opcode == OP_REGOFFSET);
14100 if (ins->opcode == OP_MOVE) {
14104 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14105 ins->inst_basereg = var->inst_basereg;
14106 ins->inst_offset = var->inst_offset;
14109 spec = INS_INFO (ins->opcode);
14113 g_assert (var->opcode == OP_REGOFFSET);
14115 prev_dreg = ins->dreg;
14117 /* Invalidate any previous lvreg for this vreg */
14118 vreg_to_lvreg [ins->dreg] = 0;
14122 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14124 store_opcode = OP_STOREI8_MEMBASE_REG;
14127 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14129 #if SIZEOF_REGISTER != 8
14130 if (regtype == 'l') {
14131 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14132 mono_bblock_insert_after_ins (bb, ins, store_ins);
14133 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14134 mono_bblock_insert_after_ins (bb, ins, store_ins);
14135 def_ins = store_ins;
14140 g_assert (store_opcode != OP_STOREV_MEMBASE);
14142 /* Try to fuse the store into the instruction itself */
14143 /* FIXME: Add more instructions */
14144 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14145 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14146 ins->inst_imm = ins->inst_c0;
14147 ins->inst_destbasereg = var->inst_basereg;
14148 ins->inst_offset = var->inst_offset;
14149 spec = INS_INFO (ins->opcode);
14150 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14151 ins->opcode = store_opcode;
14152 ins->inst_destbasereg = var->inst_basereg;
14153 ins->inst_offset = var->inst_offset;
14157 tmp_reg = ins->dreg;
14158 ins->dreg = ins->sreg2;
14159 ins->sreg2 = tmp_reg;
14162 spec2 [MONO_INST_DEST] = ' ';
14163 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14164 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14165 spec2 [MONO_INST_SRC3] = ' ';
14167 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14168 // FIXME: The backends expect the base reg to be in inst_basereg
14169 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14171 ins->inst_basereg = var->inst_basereg;
14172 ins->inst_offset = var->inst_offset;
14173 spec = INS_INFO (ins->opcode);
14175 /* printf ("INS: "); mono_print_ins (ins); */
14176 /* Create a store instruction */
14177 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14179 /* Insert it after the instruction */
14180 mono_bblock_insert_after_ins (bb, ins, store_ins);
14182 def_ins = store_ins;
14185 * We can't assign ins->dreg to var->dreg here, since the
14186 * sregs could use it. So set a flag, and do it after
14189 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14190 dest_has_lvreg = TRUE;
14195 if (def_ins && !live_range_start [dreg]) {
14196 live_range_start [dreg] = def_ins;
14197 live_range_start_bb [dreg] = bb;
14200 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14203 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14204 tmp->inst_c1 = dreg;
14205 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14212 num_sregs = mono_inst_get_src_registers (ins, sregs);
14213 for (srcindex = 0; srcindex < 3; ++srcindex) {
14214 regtype = spec [MONO_INST_SRC1 + srcindex];
14215 sreg = sregs [srcindex];
14217 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14218 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14219 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14220 MonoInst *use_ins = ins;
14221 MonoInst *load_ins;
14222 guint32 load_opcode;
14224 if (var->opcode == OP_REGVAR) {
14225 sregs [srcindex] = var->dreg;
14226 //mono_inst_set_src_registers (ins, sregs);
14227 live_range_end [sreg] = use_ins;
14228 live_range_end_bb [sreg] = bb;
14230 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14233 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14234 /* var->dreg is a hreg */
14235 tmp->inst_c1 = sreg;
14236 mono_bblock_insert_after_ins (bb, ins, tmp);
14242 g_assert (var->opcode == OP_REGOFFSET);
14244 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14246 g_assert (load_opcode != OP_LOADV_MEMBASE);
14248 if (vreg_to_lvreg [sreg]) {
14249 g_assert (vreg_to_lvreg [sreg] != -1);
14251 /* The variable is already loaded to an lvreg */
14252 if (G_UNLIKELY (cfg->verbose_level > 2))
14253 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14254 sregs [srcindex] = vreg_to_lvreg [sreg];
14255 //mono_inst_set_src_registers (ins, sregs);
14259 /* Try to fuse the load into the instruction */
14260 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14261 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14262 sregs [0] = var->inst_basereg;
14263 //mono_inst_set_src_registers (ins, sregs);
14264 ins->inst_offset = var->inst_offset;
14265 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14266 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14267 sregs [1] = var->inst_basereg;
14268 //mono_inst_set_src_registers (ins, sregs);
14269 ins->inst_offset = var->inst_offset;
14271 if (MONO_IS_REAL_MOVE (ins)) {
14272 ins->opcode = OP_NOP;
14275 //printf ("%d ", srcindex); mono_print_ins (ins);
14277 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14279 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14280 if (var->dreg == prev_dreg) {
14282 * sreg refers to the value loaded by the load
14283 * emitted below, but we need to use ins->dreg
14284 * since it refers to the store emitted earlier.
14288 g_assert (sreg != -1);
14289 vreg_to_lvreg [var->dreg] = sreg;
14290 if (lvregs_len >= lvregs_size) {
14291 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14292 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14293 lvregs = new_lvregs;
14296 lvregs [lvregs_len ++] = var->dreg;
14300 sregs [srcindex] = sreg;
14301 //mono_inst_set_src_registers (ins, sregs);
14303 #if SIZEOF_REGISTER != 8
14304 if (regtype == 'l') {
14305 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14306 mono_bblock_insert_before_ins (bb, ins, load_ins);
14307 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14308 mono_bblock_insert_before_ins (bb, ins, load_ins);
14309 use_ins = load_ins;
14314 #if SIZEOF_REGISTER == 4
14315 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14317 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14318 mono_bblock_insert_before_ins (bb, ins, load_ins);
14319 use_ins = load_ins;
14323 if (var->dreg < orig_next_vreg) {
14324 live_range_end [var->dreg] = use_ins;
14325 live_range_end_bb [var->dreg] = bb;
14328 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14331 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14332 tmp->inst_c1 = var->dreg;
14333 mono_bblock_insert_after_ins (bb, ins, tmp);
14337 mono_inst_set_src_registers (ins, sregs);
14339 if (dest_has_lvreg) {
14340 g_assert (ins->dreg != -1);
14341 vreg_to_lvreg [prev_dreg] = ins->dreg;
14342 if (lvregs_len >= lvregs_size) {
14343 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14344 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14345 lvregs = new_lvregs;
14348 lvregs [lvregs_len ++] = prev_dreg;
14349 dest_has_lvreg = FALSE;
14353 tmp_reg = ins->dreg;
14354 ins->dreg = ins->sreg2;
14355 ins->sreg2 = tmp_reg;
14358 if (MONO_IS_CALL (ins)) {
14359 /* Clear vreg_to_lvreg array */
14360 for (i = 0; i < lvregs_len; i++)
14361 vreg_to_lvreg [lvregs [i]] = 0;
14363 } else if (ins->opcode == OP_NOP) {
14365 MONO_INST_NULLIFY_SREGS (ins);
14368 if (cfg->verbose_level > 2)
14369 mono_print_ins_index (1, ins);
14372 /* Extend the live range based on the liveness info */
14373 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14374 for (i = 0; i < cfg->num_varinfo; i ++) {
14375 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14377 if (vreg_is_volatile (cfg, vi->vreg))
14378 /* The liveness info is incomplete */
14381 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14382 /* Live from at least the first ins of this bb */
14383 live_range_start [vi->vreg] = bb->code;
14384 live_range_start_bb [vi->vreg] = bb;
14387 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14388 /* Live at least until the last ins of this bb */
14389 live_range_end [vi->vreg] = bb->last_ins;
14390 live_range_end_bb [vi->vreg] = bb;
14397 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14398 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14400 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14401 for (i = 0; i < cfg->num_varinfo; ++i) {
14402 int vreg = MONO_VARINFO (cfg, i)->vreg;
14405 if (live_range_start [vreg]) {
14406 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14408 ins->inst_c1 = vreg;
14409 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14411 if (live_range_end [vreg]) {
14412 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14414 ins->inst_c1 = vreg;
14415 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14416 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14418 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14423 if (cfg->gsharedvt_locals_var_ins) {
14424 /* Nullify if unused */
14425 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14426 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14429 g_free (live_range_start);
14430 g_free (live_range_end);
14431 g_free (live_range_start_bb);
14432 g_free (live_range_end_bb);
14438 * - use 'iadd' instead of 'int_add'
14439 * - handling ovf opcodes: decompose in method_to_ir.
14440 * - unify iregs/fregs
14441 * -> partly done, the missing parts are:
14442 * - a more complete unification would involve unifying the hregs as well, so
14443 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14444 * would no longer map to the machine hregs, so the code generators would need to
14445 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14446 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14447 * fp/non-fp branches speeds it up by about 15%.
14448 * - use sext/zext opcodes instead of shifts
14450 * - get rid of TEMPLOADs if possible and use vregs instead
14451 * - clean up usage of OP_P/OP_ opcodes
14452 * - cleanup usage of DUMMY_USE
14453 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14455 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14456 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14457 * - make sure handle_stack_args () is called before the branch is emitted
14458 * - when the new IR is done, get rid of all unused stuff
14459 * - COMPARE/BEQ as separate instructions or unify them ?
14460 * - keeping them separate allows specialized compare instructions like
14461 * compare_imm, compare_membase
14462 * - most back ends unify fp compare+branch, fp compare+ceq
14463 * - integrate mono_save_args into inline_method
14464 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14465 * - handle long shift opts on 32 bit platforms somehow: they require
14466 * 3 sregs (2 for arg1 and 1 for arg2)
14467 * - make byref a 'normal' type.
14468 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14469 * variable if needed.
14470 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14471 * like inline_method.
14472 * - remove inlining restrictions
14473 * - fix LNEG and enable cfold of INEG
14474 * - generalize x86 optimizations like ldelema as a peephole optimization
14475 * - add store_mem_imm for amd64
14476 * - optimize the loading of the interruption flag in the managed->native wrappers
14477 * - avoid special handling of OP_NOP in passes
14478 * - move code inserting instructions into one function/macro.
14479 * - try a coalescing phase after liveness analysis
14480 * - add float -> vreg conversion + local optimizations on !x86
14481 * - figure out how to handle decomposed branches during optimizations, ie.
14482 * compare+branch, op_jump_table+op_br etc.
14483 * - promote RuntimeXHandles to vregs
14484 * - vtype cleanups:
14485 * - add a NEW_VARLOADA_VREG macro
14486 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14487 * accessing vtype fields.
14488 * - get rid of I8CONST on 64 bit platforms
14489 * - dealing with the increase in code size due to branches created during opcode
14491 * - use extended basic blocks
14492 * - all parts of the JIT
14493 * - handle_global_vregs () && local regalloc
14494 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14495 * - sources of increase in code size:
14498 * - isinst and castclass
14499 * - lvregs not allocated to global registers even if used multiple times
14500 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14502 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14503 * - add all micro optimizations from the old JIT
14504 * - put tree optimizations into the deadce pass
14505 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14506 * specific function.
14507 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14508 * fcompare + branchCC.
14509 * - create a helper function for allocating a stack slot, taking into account
14510 * MONO_CFG_HAS_SPILLUP.
14512 * - merge the ia64 switch changes.
14513 * - optimize mono_regstate2_alloc_int/float.
14514 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14515 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14516 * parts of the tree could be separated by other instructions, killing the tree
14517 * arguments, or stores killing loads etc. Also, should we fold loads into other
14518 * instructions if the result of the load is used multiple times ?
14519 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14520 * - LAST MERGE: 108395.
14521 * - when returning vtypes in registers, generate IR and append it to the end of the
14522 * last bb instead of doing it in the epilog.
14523 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14531 - When to decompose opcodes:
14532 - earlier: this makes some optimizations hard to implement, since the low level IR
14533 no longer contains the neccessary information. But it is easier to do.
14534 - later: harder to implement, enables more optimizations.
14535 - Branches inside bblocks:
14536 - created when decomposing complex opcodes.
14537 - branches to another bblock: harmless, but not tracked by the branch
14538 optimizations, so need to branch to a label at the start of the bblock.
14539 - branches to inside the same bblock: very problematic, trips up the local
14540 reg allocator. Can be fixed by spitting the current bblock, but that is a
14541 complex operation, since some local vregs can become global vregs etc.
14542 - Local/global vregs:
14543 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14544 local register allocator.
14545 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14546 structure, created by mono_create_var (). Assigned to hregs or the stack by
14547 the global register allocator.
14548 - When to do optimizations like alu->alu_imm:
14549 - earlier -> saves work later on since the IR will be smaller/simpler
14550 - later -> can work on more instructions
14551 - Handling of valuetypes:
14552 - When a vtype is pushed on the stack, a new temporary is created, an
14553 instruction computing its address (LDADDR) is emitted and pushed on
14554 the stack. Need to optimize cases when the vtype is used immediately as in
14555 argument passing, stloc etc.
14556 - Instead of the to_end stuff in the old JIT, simply call the function handling
14557 the values on the stack before emitting the last instruction of the bb.
14560 #else /* !DISABLE_JIT */
14563 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14567 #endif /* !DISABLE_JIT */