2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
155 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
158 * Instruction metadata
166 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
167 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
173 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
178 /* keep in sync with the enum in mini.h */
181 #include "mini-ops.h"
186 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
187 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
189 * This should contain the index of the last sreg + 1. This is not the same
190 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
192 const gint8 ins_sreg_counts[] = {
193 #include "mini-ops.h"
198 #define MONO_INIT_VARINFO(vi,id) do { \
199 (vi)->range.first_use.pos.bid = 0xffff; \
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (cfg, type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 type = &type->data.generic_class->container_class->byval_arg;
327 g_assert (cfg->generic_sharing_context);
328 if (mini_type_var_is_vt (cfg, type))
331 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
333 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
339 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 printf ("\n%s %d: [IN: ", msg, bb->block_num);
345 for (i = 0; i < bb->in_count; ++i)
346 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
348 for (i = 0; i < bb->out_count; ++i)
349 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
351 for (tree = bb->code; tree; tree = tree->next)
352 mono_print_ins_index (-1, tree);
356 mono_create_helper_signatures (void)
358 helper_sig_domain_get = mono_create_icall_signature ("ptr");
359 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
360 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
361 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
362 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
363 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
366 static MONO_NEVER_INLINE void
367 break_on_unverified (void)
369 if (mini_get_debug_options ()->break_on_unverified)
373 static MONO_NEVER_INLINE void
374 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
376 char *method_fname = mono_method_full_name (method, TRUE);
377 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
379 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
380 g_free (method_fname);
381 g_free (cil_method_fname);
384 static MONO_NEVER_INLINE void
385 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
387 char *method_fname = mono_method_full_name (method, TRUE);
388 char *field_fname = mono_field_full_name (field);
389 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
390 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
391 g_free (method_fname);
392 g_free (field_fname);
395 static MONO_NEVER_INLINE void
396 inline_failure (MonoCompile *cfg, const char *msg)
398 if (cfg->verbose_level >= 2)
399 printf ("inline failed: %s\n", msg);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
403 static MONO_NEVER_INLINE void
404 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 if (cfg->verbose_level > 2) \
407 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
411 static MONO_NEVER_INLINE void
412 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
414 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
415 if (cfg->verbose_level >= 2)
416 printf ("%s\n", cfg->exception_message);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
421 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
422 * foo<T> (int i) { ldarg.0; box T; }
424 #define UNVERIFIED do { \
425 if (cfg->gsharedvt) { \
426 if (cfg->verbose_level > 2) \
427 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
429 goto exception_exit; \
431 break_on_unverified (); \
435 #define GET_BBLOCK(cfg,tblock,ip) do { \
436 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
438 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
439 NEW_BBLOCK (cfg, (tblock)); \
440 (tblock)->cil_code = (ip); \
441 ADD_BBLOCK (cfg, (tblock)); \
445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
446 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
447 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
448 (dest)->dreg = alloc_ireg_mp ((cfg)); \
449 (dest)->sreg1 = (sr1); \
450 (dest)->sreg2 = (sr2); \
451 (dest)->inst_imm = (imm); \
452 (dest)->backend.shift_amount = (shift); \
453 MONO_ADD_INS ((cfg)->cbb, (dest)); \
457 /* Emit conversions so both operands of a binary opcode are of the same type */
459 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
461 MonoInst *arg1 = *arg1_ref;
462 MonoInst *arg2 = *arg2_ref;
465 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
466 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
469 /* Mixing r4/r8 is allowed by the spec */
470 if (arg1->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
474 conv->type = STACK_R8;
478 if (arg2->type == STACK_R4) {
479 int dreg = alloc_freg (cfg);
481 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
482 conv->type = STACK_R8;
488 #if SIZEOF_REGISTER == 8
489 /* FIXME: Need to add many more cases */
490 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
493 int dr = alloc_preg (cfg);
494 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
495 (ins)->sreg2 = widen->dreg;
500 #define ADD_BINOP(op) do { \
501 MONO_INST_NEW (cfg, ins, (op)); \
503 ins->sreg1 = sp [0]->dreg; \
504 ins->sreg2 = sp [1]->dreg; \
505 type_from_op (cfg, ins, sp [0], sp [1]); \
507 /* Have to insert a widening op */ \
508 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
509 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
510 MONO_ADD_INS ((cfg)->cbb, (ins)); \
511 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
514 #define ADD_UNOP(op) do { \
515 MONO_INST_NEW (cfg, ins, (op)); \
517 ins->sreg1 = sp [0]->dreg; \
518 type_from_op (cfg, ins, sp [0], NULL); \
520 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
521 MONO_ADD_INS ((cfg)->cbb, (ins)); \
522 *sp++ = mono_decompose_opcode (cfg, ins); \
525 #define ADD_BINCOND(next_block) do { \
528 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
529 cmp->sreg1 = sp [0]->dreg; \
530 cmp->sreg2 = sp [1]->dreg; \
531 type_from_op (cfg, cmp, sp [0], sp [1]); \
533 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
536 GET_BBLOCK (cfg, tblock, target); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_true_bb = tblock; \
539 if ((next_block)) { \
540 link_bblock (cfg, cfg->cbb, (next_block)); \
541 ins->inst_false_bb = (next_block); \
542 start_new_bblock = 1; \
544 GET_BBLOCK (cfg, tblock, ip); \
545 link_bblock (cfg, cfg->cbb, tblock); \
546 ins->inst_false_bb = tblock; \
547 start_new_bblock = 2; \
549 if (sp != stack_start) { \
550 handle_stack_args (cfg, stack_start, sp - stack_start); \
551 CHECK_UNVERIFIABLE (cfg); \
553 MONO_ADD_INS (cfg->cbb, cmp); \
554 MONO_ADD_INS (cfg->cbb, ins); \
558 * link_bblock: Links two basic blocks
560 * links two basic blocks in the control flow graph, the 'from'
561 * argument is the starting block and the 'to' argument is the block
562 * the control flow ends to after 'from'.
565 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
567 MonoBasicBlock **newa;
571 if (from->cil_code) {
573 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
575 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
578 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
580 printf ("edge from entry to exit\n");
585 for (i = 0; i < from->out_count; ++i) {
586 if (to == from->out_bb [i]) {
592 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
593 for (i = 0; i < from->out_count; ++i) {
594 newa [i] = from->out_bb [i];
602 for (i = 0; i < to->in_count; ++i) {
603 if (from == to->in_bb [i]) {
609 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
610 for (i = 0; i < to->in_count; ++i) {
611 newa [i] = to->in_bb [i];
620 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
622 link_bblock (cfg, from, to);
626 * mono_find_block_region:
628 * We mark each basic block with a region ID. We use that to avoid BB
629 * optimizations when blocks are in different regions.
632 * A region token that encodes where this region is, and information
633 * about the clause owner for this block.
635 * The region encodes the try/catch/filter clause that owns this block
636 * as well as the type. -1 is a special value that represents a block
637 * that is in none of try/catch/filter.
640 mono_find_block_region (MonoCompile *cfg, int offset)
642 MonoMethodHeader *header = cfg->header;
643 MonoExceptionClause *clause;
646 for (i = 0; i < header->num_clauses; ++i) {
647 clause = &header->clauses [i];
648 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
649 (offset < (clause->handler_offset)))
650 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
652 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
653 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
654 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
655 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
656 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
658 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
661 for (i = 0; i < header->num_clauses; ++i) {
662 clause = &header->clauses [i];
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (cfg, type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->generic_sharing_context);
801 if (mini_is_gsharedvt_type (cfg, type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1167 switch (args->type) {
1177 for (i = 0; i < sig->param_count; ++i) {
1178 switch (args [i].type) {
1182 if (!sig->params [i]->byref)
1186 if (sig->params [i]->byref)
1188 switch (sig->params [i]->type) {
1189 case MONO_TYPE_CLASS:
1190 case MONO_TYPE_STRING:
1191 case MONO_TYPE_OBJECT:
1192 case MONO_TYPE_SZARRAY:
1193 case MONO_TYPE_ARRAY:
1200 if (sig->params [i]->byref)
1202 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1211 /*if (!param_table [args [i].type] [sig->params [i]->type])
1219 * When we need a pointer to the current domain many times in a method, we
1220 * call mono_domain_get() once and we store the result in a local variable.
1221 * This function returns the variable that represents the MonoDomain*.
1223 inline static MonoInst *
1224 mono_get_domainvar (MonoCompile *cfg)
1226 if (!cfg->domainvar)
1227 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1228 return cfg->domainvar;
1232 * The got_var contains the address of the Global Offset Table when AOT
1236 mono_get_got_var (MonoCompile *cfg)
1238 #ifdef MONO_ARCH_NEED_GOT_VAR
1239 if (!cfg->compile_aot)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1251 mono_get_vtable_var (MonoCompile *cfg)
1253 g_assert (cfg->generic_sharing_context);
1255 if (!cfg->rgctx_var) {
1256 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 /* force the var to be stack allocated */
1258 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1261 return cfg->rgctx_var;
1265 type_from_stack_type (MonoInst *ins) {
1266 switch (ins->type) {
1267 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1268 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1269 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1270 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1271 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1273 return &ins->klass->this_arg;
1274 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1275 case STACK_VTYPE: return &ins->klass->byval_arg;
1277 g_error ("stack type %d to monotype not handled\n", ins->type);
1282 static G_GNUC_UNUSED int
1283 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1285 t = mono_type_get_underlying_type (t);
1297 case MONO_TYPE_FNPTR:
1299 case MONO_TYPE_CLASS:
1300 case MONO_TYPE_STRING:
1301 case MONO_TYPE_OBJECT:
1302 case MONO_TYPE_SZARRAY:
1303 case MONO_TYPE_ARRAY:
1309 return cfg->r4_stack_type;
1312 case MONO_TYPE_VALUETYPE:
1313 case MONO_TYPE_TYPEDBYREF:
1315 case MONO_TYPE_GENERICINST:
1316 if (mono_type_generic_inst_is_valuetype (t))
1322 g_assert_not_reached ();
1329 array_access_to_klass (int opcode)
1333 return mono_defaults.byte_class;
1335 return mono_defaults.uint16_class;
1338 return mono_defaults.int_class;
1341 return mono_defaults.sbyte_class;
1344 return mono_defaults.int16_class;
1347 return mono_defaults.int32_class;
1349 return mono_defaults.uint32_class;
1352 return mono_defaults.int64_class;
1355 return mono_defaults.single_class;
1358 return mono_defaults.double_class;
1359 case CEE_LDELEM_REF:
1360 case CEE_STELEM_REF:
1361 return mono_defaults.object_class;
1363 g_assert_not_reached ();
1369 * We try to share variables when possible
1372 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1377 /* inlining can result in deeper stacks */
1378 if (slot >= cfg->header->max_stack)
1379 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 pos = ins->type - 1 + slot * STACK_MAX;
1383 switch (ins->type) {
1390 if ((vnum = cfg->intvars [pos]))
1391 return cfg->varinfo [vnum];
1392 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 cfg->intvars [pos] = res->inst_c0;
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1402 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1405 * Don't use this if a generic_context is set, since that means AOT can't
1406 * look up the method using just the image+token.
1407 * table == 0 means this is a reference made from a wrapper.
1409 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1410 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1411 jump_info_token->image = image;
1412 jump_info_token->token = token;
1413 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1418 * This function is called to handle items that are left on the evaluation stack
1419 * at basic block boundaries. What happens is that we save the values to local variables
1420 * and we reload them later when first entering the target basic block (with the
1421 * handle_loaded_temps () function).
1422 * A single joint point will use the same variables (stored in the array bb->out_stack or
1423 * bb->in_stack, if the basic block is before or after the joint point).
1425 * This function needs to be called _before_ emitting the last instruction of
1426 * the bb (i.e. before emitting a branch).
1427 * If the stack merge fails at a join point, cfg->unverifiable is set.
1430 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1433 MonoBasicBlock *bb = cfg->cbb;
1434 MonoBasicBlock *outb;
1435 MonoInst *inst, **locals;
1440 if (cfg->verbose_level > 3)
1441 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1442 if (!bb->out_scount) {
1443 bb->out_scount = count;
1444 //printf ("bblock %d has out:", bb->block_num);
1446 for (i = 0; i < bb->out_count; ++i) {
1447 outb = bb->out_bb [i];
1448 /* exception handlers are linked, but they should not be considered for stack args */
1449 if (outb->flags & BB_EXCEPTION_HANDLER)
1451 //printf (" %d", outb->block_num);
1452 if (outb->in_stack) {
1454 bb->out_stack = outb->in_stack;
1460 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1461 for (i = 0; i < count; ++i) {
1463 * try to reuse temps already allocated for this purpouse, if they occupy the same
1464 * stack slot and if they are of the same type.
1465 * This won't cause conflicts since if 'local' is used to
1466 * store one of the values in the in_stack of a bblock, then
1467 * the same variable will be used for the same outgoing stack
1469 * This doesn't work when inlining methods, since the bblocks
1470 * in the inlined methods do not inherit their in_stack from
1471 * the bblock they are inlined to. See bug #58863 for an
1474 if (cfg->inlined_method)
1475 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1477 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1482 for (i = 0; i < bb->out_count; ++i) {
1483 outb = bb->out_bb [i];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER)
1487 if (outb->in_scount) {
1488 if (outb->in_scount != bb->out_scount) {
1489 cfg->unverifiable = TRUE;
1492 continue; /* check they are the same locals */
1494 outb->in_scount = count;
1495 outb->in_stack = bb->out_stack;
1498 locals = bb->out_stack;
1500 for (i = 0; i < count; ++i) {
1501 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1502 inst->cil_code = sp [i]->cil_code;
1503 sp [i] = locals [i];
1504 if (cfg->verbose_level > 3)
1505 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1509 * It is possible that the out bblocks already have in_stack assigned, and
1510 * the in_stacks differ. In this case, we will store to all the different
1517 /* Find a bblock which has a different in_stack */
1519 while (bindex < bb->out_count) {
1520 outb = bb->out_bb [bindex];
1521 /* exception handlers are linked, but they should not be considered for stack args */
1522 if (outb->flags & BB_EXCEPTION_HANDLER) {
1526 if (outb->in_stack != locals) {
1527 for (i = 0; i < count; ++i) {
1528 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1529 inst->cil_code = sp [i]->cil_code;
1530 sp [i] = locals [i];
1531 if (cfg->verbose_level > 3)
1532 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1534 locals = outb->in_stack;
1544 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1546 int ibitmap_reg = alloc_preg (cfg);
1547 #ifdef COMPRESSED_INTERFACE_BITMAP
1549 MonoInst *res, *ins;
1550 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1551 MONO_ADD_INS (cfg->cbb, ins);
1553 if (cfg->compile_aot)
1554 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1556 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1557 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1558 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1560 int ibitmap_byte_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1564 if (cfg->compile_aot) {
1565 int iid_reg = alloc_preg (cfg);
1566 int shifted_iid_reg = alloc_preg (cfg);
1567 int ibitmap_byte_address_reg = alloc_preg (cfg);
1568 int masked_iid_reg = alloc_preg (cfg);
1569 int iid_one_bit_reg = alloc_preg (cfg);
1570 int iid_bit_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1573 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1576 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1587 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1588 * stored in "klass_reg" implements the interface "klass".
1591 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1593 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1597 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1598 * stored in "vtable_reg" implements the interface "klass".
1601 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1603 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1607 * Emit code which checks whenever the interface id of @klass is smaller than
1608 * than the value given by max_iid_reg.
1611 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 if (cfg->compile_aot) {
1615 int iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1617 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 /* Same as above, but obtains max_iid from a vtable */
1629 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1630 MonoBasicBlock *false_target)
1632 int max_iid_reg = alloc_preg (cfg);
1634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1635 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 /* Same as above, but obtains max_iid from a klass */
1640 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1641 MonoBasicBlock *false_target)
1643 int max_iid_reg = alloc_preg (cfg);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1646 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1650 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1652 int idepth_reg = alloc_preg (cfg);
1653 int stypes_reg = alloc_preg (cfg);
1654 int stype = alloc_preg (cfg);
1656 mono_class_setup_supertypes (klass);
1658 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1666 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1667 } else if (cfg->compile_aot) {
1668 int const_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1678 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1680 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1684 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1686 int intf_reg = alloc_preg (cfg);
1688 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1689 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1694 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1698 * Variant of the above that takes a register to the class, not the vtable.
1701 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1703 int intf_bit_reg = alloc_preg (cfg);
1705 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1706 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1715 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1719 } else if (cfg->compile_aot) {
1720 int const_reg = alloc_preg (cfg);
1721 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1730 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1732 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1736 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1738 if (cfg->compile_aot) {
1739 int const_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1749 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1752 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1755 int rank_reg = alloc_preg (cfg);
1756 int eclass_reg = alloc_preg (cfg);
1758 g_assert (!klass_inst);
1759 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1761 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1762 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1764 if (klass->cast_class == mono_defaults.object_class) {
1765 int parent_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1767 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1768 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1769 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1770 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1771 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1772 } else if (klass->cast_class == mono_defaults.enum_class) {
1773 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1774 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1775 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1777 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1778 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1781 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1782 /* Check that the object is a vector too */
1783 int bounds_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1789 int idepth_reg = alloc_preg (cfg);
1790 int stypes_reg = alloc_preg (cfg);
1791 int stype = alloc_preg (cfg);
1793 mono_class_setup_supertypes (klass);
1795 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1796 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1798 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1802 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1807 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1809 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1813 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1817 g_assert (val == 0);
1822 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1825 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1828 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1833 #if SIZEOF_REGISTER == 8
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1841 val_reg = alloc_preg (cfg);
1843 if (SIZEOF_REGISTER == 8)
1844 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1846 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1849 /* This could be optimized further if neccesary */
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1858 #if !NO_UNALIGNED_ACCESS
1859 if (SIZEOF_REGISTER == 8) {
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1874 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1891 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1898 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1899 g_assert (size < 10000);
1902 /* This could be optimized further if neccesary */
1904 cur_reg = alloc_preg (cfg);
1905 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1913 #if !NO_UNALIGNED_ACCESS
1914 if (SIZEOF_REGISTER == 8) {
1916 cur_reg = alloc_preg (cfg);
1917 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1943 cur_reg = alloc_preg (cfg);
1944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1953 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1957 if (cfg->compile_aot) {
1958 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1959 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1961 ins->sreg2 = c->dreg;
1962 MONO_ADD_INS (cfg->cbb, ins);
1964 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1966 ins->inst_offset = mini_get_tls_offset (tls_key);
1967 MONO_ADD_INS (cfg->cbb, ins);
1974 * Emit IR to push the current LMF onto the LMF stack.
1977 emit_push_lmf (MonoCompile *cfg)
1980 * Emit IR to push the LMF:
1981 * lmf_addr = <lmf_addr from tls>
1982 * lmf->lmf_addr = lmf_addr
1983 * lmf->prev_lmf = *lmf_addr
1986 int lmf_reg, prev_lmf_reg;
1987 MonoInst *ins, *lmf_ins;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load current lmf */
1994 lmf_ins = mono_get_lmf_intrinsic (cfg);
1996 MONO_ADD_INS (cfg->cbb, lmf_ins);
1997 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1998 lmf_reg = ins->dreg;
1999 /* Save previous_lmf */
2000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2002 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2005 * Store lmf_addr in a variable, so it can be allocated to a global register.
2007 if (!cfg->lmf_addr_var)
2008 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2011 ins = mono_get_jit_tls_intrinsic (cfg);
2013 int jit_tls_dreg = ins->dreg;
2015 MONO_ADD_INS (cfg->cbb, ins);
2016 lmf_reg = alloc_preg (cfg);
2017 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2022 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2024 MONO_ADD_INS (cfg->cbb, lmf_ins);
2027 MonoInst *args [16], *jit_tls_ins, *ins;
2029 /* Inline mono_get_lmf_addr () */
2030 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2032 /* Load mono_jit_tls_id */
2033 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2034 /* call pthread_getspecific () */
2035 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2036 /* lmf_addr = &jit_tls->lmf */
2037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2040 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2046 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2047 lmf_reg = ins->dreg;
2049 prev_lmf_reg = alloc_preg (cfg);
2050 /* Save previous_lmf */
2051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2052 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2054 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2061 * Emit IR to pop the current LMF from the LMF stack.
2064 emit_pop_lmf (MonoCompile *cfg)
2066 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2072 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2073 lmf_reg = ins->dreg;
2075 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2076 /* Load previous_lmf */
2077 prev_lmf_reg = alloc_preg (cfg);
2078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2083 * Emit IR to pop the LMF:
2084 * *(lmf->lmf_addr) = lmf->prev_lmf
2086 /* This could be called before emit_push_lmf () */
2087 if (!cfg->lmf_addr_var)
2088 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2089 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2091 prev_lmf_reg = alloc_preg (cfg);
2092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2093 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2098 emit_instrumentation_call (MonoCompile *cfg, void *func)
2100 MonoInst *iargs [1];
2103 * Avoid instrumenting inlined methods since it can
2104 * distort profiling results.
2106 if (cfg->method != cfg->current_method)
2109 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2110 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2111 mono_emit_jit_icall (cfg, func, iargs);
2116 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2119 type = mini_get_underlying_type (cfg, type);
2120 switch (type->type) {
2121 case MONO_TYPE_VOID:
2122 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2129 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2133 case MONO_TYPE_FNPTR:
2134 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2135 case MONO_TYPE_CLASS:
2136 case MONO_TYPE_STRING:
2137 case MONO_TYPE_OBJECT:
2138 case MONO_TYPE_SZARRAY:
2139 case MONO_TYPE_ARRAY:
2140 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2143 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2146 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2148 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2150 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2151 case MONO_TYPE_VALUETYPE:
2152 if (type->data.klass->enumtype) {
2153 type = mono_class_enum_basetype (type->data.klass);
2156 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2157 case MONO_TYPE_TYPEDBYREF:
2158 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2159 case MONO_TYPE_GENERICINST:
2160 type = &type->data.generic_class->container_class->byval_arg;
2163 case MONO_TYPE_MVAR:
2165 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2167 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2173 * target_type_is_incompatible:
2174 * @cfg: MonoCompile context
2176 * Check that the item @arg on the evaluation stack can be stored
2177 * in the target type (can be a local, or field, etc).
2178 * The cfg arg can be used to check if we need verification or just
2181 * Returns: non-0 value if arg can't be stored on a target.
2184 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2186 MonoType *simple_type;
2189 if (target->byref) {
2190 /* FIXME: check that the pointed to types match */
2191 if (arg->type == STACK_MP)
2192 return arg->klass != mono_class_from_mono_type (target);
2193 if (arg->type == STACK_PTR)
2198 simple_type = mini_get_underlying_type (cfg, target);
2199 switch (simple_type->type) {
2200 case MONO_TYPE_VOID:
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2212 /* STACK_MP is needed when setting pinned locals */
2213 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2218 case MONO_TYPE_FNPTR:
2220 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2221 * in native int. (#688008).
2223 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2226 case MONO_TYPE_CLASS:
2227 case MONO_TYPE_STRING:
2228 case MONO_TYPE_OBJECT:
2229 case MONO_TYPE_SZARRAY:
2230 case MONO_TYPE_ARRAY:
2231 if (arg->type != STACK_OBJ)
2233 /* FIXME: check type compatibility */
2237 if (arg->type != STACK_I8)
2241 if (arg->type != cfg->r4_stack_type)
2245 if (arg->type != STACK_R8)
2248 case MONO_TYPE_VALUETYPE:
2249 if (arg->type != STACK_VTYPE)
2251 klass = mono_class_from_mono_type (simple_type);
2252 if (klass != arg->klass)
2255 case MONO_TYPE_TYPEDBYREF:
2256 if (arg->type != STACK_VTYPE)
2258 klass = mono_class_from_mono_type (simple_type);
2259 if (klass != arg->klass)
2262 case MONO_TYPE_GENERICINST:
2263 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2266 klass = mono_class_from_mono_type (simple_type);
2267 /* The second cases is needed when doing partial sharing */
2268 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2272 if (arg->type != STACK_OBJ)
2274 /* FIXME: check type compatibility */
2278 case MONO_TYPE_MVAR:
2279 g_assert (cfg->generic_sharing_context);
2280 if (mini_type_var_is_vt (cfg, simple_type)) {
2281 if (arg->type != STACK_VTYPE)
2284 if (arg->type != STACK_OBJ)
2289 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2295 * Prepare arguments for passing to a function call.
2296 * Return a non-zero value if the arguments can't be passed to the given
2298 * The type checks are not yet complete and some conversions may need
2299 * casts on 32 or 64 bit architectures.
2301 * FIXME: implement this using target_type_is_incompatible ()
2304 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2306 MonoType *simple_type;
2310 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2314 for (i = 0; i < sig->param_count; ++i) {
2315 if (sig->params [i]->byref) {
2316 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2320 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2322 switch (simple_type->type) {
2323 case MONO_TYPE_VOID:
2332 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2338 case MONO_TYPE_FNPTR:
2339 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2342 case MONO_TYPE_CLASS:
2343 case MONO_TYPE_STRING:
2344 case MONO_TYPE_OBJECT:
2345 case MONO_TYPE_SZARRAY:
2346 case MONO_TYPE_ARRAY:
2347 if (args [i]->type != STACK_OBJ)
2352 if (args [i]->type != STACK_I8)
2356 if (args [i]->type != cfg->r4_stack_type)
2360 if (args [i]->type != STACK_R8)
2363 case MONO_TYPE_VALUETYPE:
2364 if (simple_type->data.klass->enumtype) {
2365 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2368 if (args [i]->type != STACK_VTYPE)
2371 case MONO_TYPE_TYPEDBYREF:
2372 if (args [i]->type != STACK_VTYPE)
2375 case MONO_TYPE_GENERICINST:
2376 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2379 case MONO_TYPE_MVAR:
2381 if (args [i]->type != STACK_VTYPE)
2385 g_error ("unknown type 0x%02x in check_call_signature",
2393 callvirt_to_call (int opcode)
2396 case OP_CALL_MEMBASE:
2398 case OP_VOIDCALL_MEMBASE:
2400 case OP_FCALL_MEMBASE:
2402 case OP_RCALL_MEMBASE:
2404 case OP_VCALL_MEMBASE:
2406 case OP_LCALL_MEMBASE:
2409 g_assert_not_reached ();
2415 /* Either METHOD or IMT_ARG needs to be set */
2417 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2421 if (COMPILE_LLVM (cfg)) {
2422 method_reg = alloc_preg (cfg);
2425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2426 } else if (cfg->compile_aot) {
2427 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2430 MONO_INST_NEW (cfg, ins, OP_PCONST);
2431 ins->inst_p0 = method;
2432 ins->dreg = method_reg;
2433 MONO_ADD_INS (cfg->cbb, ins);
2437 call->imt_arg_reg = method_reg;
2439 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2457 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2460 static MonoJumpInfo *
2461 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2463 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2467 ji->data.target = target;
2473 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2475 if (cfg->generic_sharing_context)
2476 return mono_class_check_context_used (klass);
2482 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2484 if (cfg->generic_sharing_context)
2485 return mono_method_check_context_used (method);
2491 * check_method_sharing:
2493 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2496 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2498 gboolean pass_vtable = FALSE;
2499 gboolean pass_mrgctx = FALSE;
2501 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2502 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2503 gboolean sharable = FALSE;
2505 if (mono_method_is_generic_sharable (cmethod, TRUE))
2509 * Pass vtable iff target method might
2510 * be shared, which means that sharing
2511 * is enabled for its class and its
2512 * context is sharable (and it's not a
2515 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2519 if (mini_method_get_context (cmethod) &&
2520 mini_method_get_context (cmethod)->method_inst) {
2521 g_assert (!pass_vtable);
2523 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2526 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2531 if (out_pass_vtable)
2532 *out_pass_vtable = pass_vtable;
2533 if (out_pass_mrgctx)
2534 *out_pass_mrgctx = pass_mrgctx;
2537 inline static MonoCallInst *
2538 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2539 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2543 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2548 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2550 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2552 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2555 call->signature = sig;
2556 call->rgctx_reg = rgctx;
2557 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2559 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2562 if (mini_type_is_vtype (cfg, sig_ret)) {
2563 call->vret_var = cfg->vret_addr;
2564 //g_assert_not_reached ();
2566 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2567 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2570 temp->backend.is_pinvoke = sig->pinvoke;
2573 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2574 * address of return value to increase optimization opportunities.
2575 * Before vtype decomposition, the dreg of the call ins itself represents the
2576 * fact the call modifies the return value. After decomposition, the call will
2577 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2578 * will be transformed into an LDADDR.
2580 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2581 loada->dreg = alloc_preg (cfg);
2582 loada->inst_p0 = temp;
2583 /* We reference the call too since call->dreg could change during optimization */
2584 loada->inst_p1 = call;
2585 MONO_ADD_INS (cfg->cbb, loada);
2587 call->inst.dreg = temp->dreg;
2589 call->vret_var = loada;
2590 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2591 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2593 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2594 if (COMPILE_SOFT_FLOAT (cfg)) {
2596 * If the call has a float argument, we would need to do an r8->r4 conversion using
2597 * an icall, but that cannot be done during the call sequence since it would clobber
2598 * the call registers + the stack. So we do it before emitting the call.
2600 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2602 MonoInst *in = call->args [i];
2604 if (i >= sig->hasthis)
2605 t = sig->params [i - sig->hasthis];
2607 t = &mono_defaults.int_class->byval_arg;
2608 t = mono_type_get_underlying_type (t);
2610 if (!t->byref && t->type == MONO_TYPE_R4) {
2611 MonoInst *iargs [1];
2615 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2617 /* The result will be in an int vreg */
2618 call->args [i] = conv;
2624 call->need_unbox_trampoline = unbox_trampoline;
2627 if (COMPILE_LLVM (cfg))
2628 mono_llvm_emit_call (cfg, call);
2630 mono_arch_emit_call (cfg, call);
2632 mono_arch_emit_call (cfg, call);
2635 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2636 cfg->flags |= MONO_CFG_HAS_CALLS;
2642 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2644 #ifdef MONO_ARCH_RGCTX_REG
2645 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2646 cfg->uses_rgctx_reg = TRUE;
2647 call->rgctx_reg = TRUE;
2649 call->rgctx_arg_reg = rgctx_reg;
2656 inline static MonoInst*
2657 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2662 gboolean check_sp = FALSE;
2664 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2665 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2667 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2672 rgctx_reg = mono_alloc_preg (cfg);
2673 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2677 if (!cfg->stack_inbalance_var)
2678 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2680 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2681 ins->dreg = cfg->stack_inbalance_var->dreg;
2682 MONO_ADD_INS (cfg->cbb, ins);
2685 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2687 call->inst.sreg1 = addr->dreg;
2690 emit_imt_argument (cfg, call, NULL, imt_arg);
2692 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2697 sp_reg = mono_alloc_preg (cfg);
2699 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2701 MONO_ADD_INS (cfg->cbb, ins);
2703 /* Restore the stack so we don't crash when throwing the exception */
2704 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2705 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2706 MONO_ADD_INS (cfg->cbb, ins);
2708 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2709 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2713 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2715 return (MonoInst*)call;
2719 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2722 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2724 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2727 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2728 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2730 #ifndef DISABLE_REMOTING
2731 gboolean might_be_remote = FALSE;
2733 gboolean virtual = this != NULL;
2734 gboolean enable_for_aot = TRUE;
2738 gboolean need_unbox_trampoline;
2741 sig = mono_method_signature (method);
2744 rgctx_reg = mono_alloc_preg (cfg);
2745 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2748 if (method->string_ctor) {
2749 /* Create the real signature */
2750 /* FIXME: Cache these */
2751 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2752 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2757 context_used = mini_method_check_context_used (cfg, method);
2759 #ifndef DISABLE_REMOTING
2760 might_be_remote = this && sig->hasthis &&
2761 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2762 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2764 if (might_be_remote && context_used) {
2767 g_assert (cfg->generic_sharing_context);
2769 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2771 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2775 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2777 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2779 #ifndef DISABLE_REMOTING
2780 if (might_be_remote)
2781 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2784 call->method = method;
2785 call->inst.flags |= MONO_INST_HAS_METHOD;
2786 call->inst.inst_left = this;
2787 call->tail_call = tail;
2790 int vtable_reg, slot_reg, this_reg;
2793 this_reg = this->dreg;
2795 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2796 MonoInst *dummy_use;
2798 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2800 /* Make a call to delegate->invoke_impl */
2801 call->inst.inst_basereg = this_reg;
2802 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2803 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2805 /* We must emit a dummy use here because the delegate trampoline will
2806 replace the 'this' argument with the delegate target making this activation
2807 no longer a root for the delegate.
2808 This is an issue for delegates that target collectible code such as dynamic
2809 methods of GC'able assemblies.
2811 For a test case look into #667921.
2813 FIXME: a dummy use is not the best way to do it as the local register allocator
2814 will put it on a caller save register and spil it around the call.
2815 Ideally, we would either put it on a callee save register or only do the store part.
2817 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2819 return (MonoInst*)call;
2822 if ((!cfg->compile_aot || enable_for_aot) &&
2823 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2824 (MONO_METHOD_IS_FINAL (method) &&
2825 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2826 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2828 * the method is not virtual, we just need to ensure this is not null
2829 * and then we can call the method directly.
2831 #ifndef DISABLE_REMOTING
2832 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2834 * The check above ensures method is not gshared, this is needed since
2835 * gshared methods can't have wrappers.
2837 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2841 if (!method->string_ctor)
2842 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2844 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2845 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2847 * the method is virtual, but we can statically dispatch since either
2848 * it's class or the method itself are sealed.
2849 * But first we need to ensure it's not a null reference.
2851 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2853 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2855 vtable_reg = alloc_preg (cfg);
2856 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2857 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2858 guint32 imt_slot = mono_method_get_imt_slot (method);
2859 emit_imt_argument (cfg, call, call->method, imt_arg);
2860 slot_reg = vtable_reg;
2861 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2863 slot_reg = vtable_reg;
2864 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2865 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2867 g_assert (mono_method_signature (method)->generic_param_count);
2868 emit_imt_argument (cfg, call, call->method, imt_arg);
2872 call->inst.sreg1 = slot_reg;
2873 call->inst.inst_offset = offset;
2874 call->virtual = TRUE;
2878 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2881 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2883 return (MonoInst*)call;
2887 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2889 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2893 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2900 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2903 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2905 return (MonoInst*)call;
2909 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2911 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2915 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2919 * mono_emit_abs_call:
2921 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2923 inline static MonoInst*
2924 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2925 MonoMethodSignature *sig, MonoInst **args)
2927 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2931 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2934 if (cfg->abs_patches == NULL)
2935 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2936 g_hash_table_insert (cfg->abs_patches, ji, ji);
2937 ins = mono_emit_native_call (cfg, ji, sig, args);
2938 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2943 direct_icalls_enabled (MonoCompile *cfg)
2945 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2947 if (cfg->compile_llvm)
2950 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2956 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
2959 * Call the jit icall without a wrapper if possible.
2960 * The wrapper is needed for the following reasons:
2961 * - to handle exceptions thrown using mono_raise_exceptions () from the
2962 * icall function. The EH code needs the lmf frame pushed by the
2963 * wrapper to be able to unwind back to managed code.
2964 * - to be able to do stack walks for asynchronously suspended
2965 * threads when debugging.
2967 if (info->no_raise && direct_icalls_enabled (cfg)) {
2971 if (!info->wrapper_method) {
2972 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2973 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2975 mono_memory_barrier ();
2979 * Inline the wrapper method, which is basically a call to the C icall, and
2980 * an exception check.
2982 costs = inline_method (cfg, info->wrapper_method, NULL,
2983 args, NULL, cfg->real_offset, TRUE);
2984 g_assert (costs > 0);
2985 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2989 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2994 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2996 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2997 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3001 * Native code might return non register sized integers
3002 * without initializing the upper bits.
3004 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3005 case OP_LOADI1_MEMBASE:
3006 widen_op = OP_ICONV_TO_I1;
3008 case OP_LOADU1_MEMBASE:
3009 widen_op = OP_ICONV_TO_U1;
3011 case OP_LOADI2_MEMBASE:
3012 widen_op = OP_ICONV_TO_I2;
3014 case OP_LOADU2_MEMBASE:
3015 widen_op = OP_ICONV_TO_U2;
3021 if (widen_op != -1) {
3022 int dreg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3026 widen->type = ins->type;
3036 get_memcpy_method (void)
3038 static MonoMethod *memcpy_method = NULL;
3039 if (!memcpy_method) {
3040 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3042 g_error ("Old corlib found. Install a new one");
3044 return memcpy_method;
3048 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3050 MonoClassField *field;
3051 gpointer iter = NULL;
3053 while ((field = mono_class_get_fields (klass, &iter))) {
3056 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3058 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3059 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3060 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3061 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3063 MonoClass *field_class = mono_class_from_mono_type (field->type);
3064 if (field_class->has_references)
3065 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3071 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3073 int card_table_shift_bits;
3074 gpointer card_table_mask;
3076 MonoInst *dummy_use;
3077 int nursery_shift_bits;
3078 size_t nursery_size;
3079 gboolean has_card_table_wb = FALSE;
3081 if (!cfg->gen_write_barriers)
3084 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3086 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3088 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3089 has_card_table_wb = TRUE;
3092 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3095 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3096 wbarrier->sreg1 = ptr->dreg;
3097 wbarrier->sreg2 = value->dreg;
3098 MONO_ADD_INS (cfg->cbb, wbarrier);
3099 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3100 int offset_reg = alloc_preg (cfg);
3101 int card_reg = alloc_preg (cfg);
3104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3105 if (card_table_mask)
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3108 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3109 * IMM's larger than 32bits.
3111 if (cfg->compile_aot) {
3112 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3114 MONO_INST_NEW (cfg, ins, OP_PCONST);
3115 ins->inst_p0 = card_table;
3116 ins->dreg = card_reg;
3117 MONO_ADD_INS (cfg->cbb, ins);
3120 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3121 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3123 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3124 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3127 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3131 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3133 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3134 unsigned need_wb = 0;
3139 /*types with references can't have alignment smaller than sizeof(void*) */
3140 if (align < SIZEOF_VOID_P)
3143 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3144 if (size > 32 * SIZEOF_VOID_P)
3147 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3149 /* We don't unroll more than 5 stores to avoid code bloat. */
3150 if (size > 5 * SIZEOF_VOID_P) {
3151 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3152 size += (SIZEOF_VOID_P - 1);
3153 size &= ~(SIZEOF_VOID_P - 1);
3155 EMIT_NEW_ICONST (cfg, iargs [2], size);
3156 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3157 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3161 destreg = iargs [0]->dreg;
3162 srcreg = iargs [1]->dreg;
3165 dest_ptr_reg = alloc_preg (cfg);
3166 tmp_reg = alloc_preg (cfg);
3169 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3171 while (size >= SIZEOF_VOID_P) {
3172 MonoInst *load_inst;
3173 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3174 load_inst->dreg = tmp_reg;
3175 load_inst->inst_basereg = srcreg;
3176 load_inst->inst_offset = offset;
3177 MONO_ADD_INS (cfg->cbb, load_inst);
3179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3182 emit_write_barrier (cfg, iargs [0], load_inst);
3184 offset += SIZEOF_VOID_P;
3185 size -= SIZEOF_VOID_P;
3188 /*tmp += sizeof (void*)*/
3189 if (size >= SIZEOF_VOID_P) {
3190 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3191 MONO_ADD_INS (cfg->cbb, iargs [0]);
3195 /* Those cannot be references since size < sizeof (void*) */
3197 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3204 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3205 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3211 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3212 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3221 * Emit code to copy a valuetype of type @klass whose address is stored in
3222 * @src->dreg to memory whose address is stored at @dest->dreg.
3225 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3227 MonoInst *iargs [4];
3230 MonoMethod *memcpy_method;
3231 MonoInst *size_ins = NULL;
3232 MonoInst *memcpy_ins = NULL;
3235 if (cfg->generic_sharing_context)
3236 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3239 * This check breaks with spilled vars... need to handle it during verification anyway.
3240 * g_assert (klass && klass == src->klass && klass == dest->klass);
3243 if (mini_is_gsharedvt_klass (cfg, klass)) {
3245 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3246 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3250 n = mono_class_native_size (klass, &align);
3252 n = mono_class_value_size (klass, &align);
3254 /* if native is true there should be no references in the struct */
3255 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3256 /* Avoid barriers when storing to the stack */
3257 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3258 (dest->opcode == OP_LDADDR))) {
3264 context_used = mini_class_check_context_used (cfg, klass);
3266 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3267 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3269 } else if (context_used) {
3270 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3272 if (cfg->compile_aot) {
3273 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3275 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3276 mono_class_compute_gc_descriptor (klass);
3281 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3283 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3288 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3289 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3290 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3295 iargs [2] = size_ins;
3297 EMIT_NEW_ICONST (cfg, iargs [2], n);
3299 memcpy_method = get_memcpy_method ();
3301 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3303 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3308 get_memset_method (void)
3310 static MonoMethod *memset_method = NULL;
3311 if (!memset_method) {
3312 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3314 g_error ("Old corlib found. Install a new one");
3316 return memset_method;
3320 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3322 MonoInst *iargs [3];
3325 MonoMethod *memset_method;
3326 MonoInst *size_ins = NULL;
3327 MonoInst *bzero_ins = NULL;
3328 static MonoMethod *bzero_method;
3330 /* FIXME: Optimize this for the case when dest is an LDADDR */
3331 mono_class_init (klass);
3332 if (mini_is_gsharedvt_klass (cfg, klass)) {
3333 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3334 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3336 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3337 g_assert (bzero_method);
3339 iargs [1] = size_ins;
3340 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3344 n = mono_class_value_size (klass, &align);
3346 if (n <= sizeof (gpointer) * 8) {
3347 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3350 memset_method = get_memset_method ();
3352 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3353 EMIT_NEW_ICONST (cfg, iargs [2], n);
3354 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3361 * Emit IR to return either the this pointer for instance method,
3362 * or the mrgctx for static methods.
3365 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3367 MonoInst *this = NULL;
3369 g_assert (cfg->generic_sharing_context);
3371 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3372 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3373 !method->klass->valuetype)
3374 EMIT_NEW_ARGLOAD (cfg, this, 0);
3376 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3377 MonoInst *mrgctx_loc, *mrgctx_var;
3380 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3382 mrgctx_loc = mono_get_vtable_var (cfg);
3383 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3386 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3387 MonoInst *vtable_loc, *vtable_var;
3391 vtable_loc = mono_get_vtable_var (cfg);
3392 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3394 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3395 MonoInst *mrgctx_var = vtable_var;
3398 vtable_reg = alloc_preg (cfg);
3399 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3400 vtable_var->type = STACK_PTR;
3408 vtable_reg = alloc_preg (cfg);
3409 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3414 static MonoJumpInfoRgctxEntry *
3415 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3417 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3418 res->method = method;
3419 res->in_mrgctx = in_mrgctx;
3420 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3421 res->data->type = patch_type;
3422 res->data->data.target = patch_data;
3423 res->info_type = info_type;
3431 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3434 static inline MonoInst*
3435 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3437 /* Inline version, not currently used */
3438 // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
3440 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3442 MonoBasicBlock *is_null_bb, *end_bb;
3443 MonoInst *res, *ins, *call;
3446 slot = mini_get_rgctx_entry_slot (entry);
3448 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3449 index = MONO_RGCTX_SLOT_INDEX (slot);
3451 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3452 for (depth = 0; ; ++depth) {
3453 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3455 if (index < size - 1)
3460 NEW_BBLOCK (cfg, end_bb);
3461 NEW_BBLOCK (cfg, is_null_bb);
3464 rgctx_reg = rgctx->dreg;
3466 rgctx_reg = alloc_preg (cfg);
3468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3469 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3470 NEW_BBLOCK (cfg, is_null_bb);
3472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3476 for (i = 0; i < depth; ++i) {
3477 int array_reg = alloc_preg (cfg);
3479 /* load ptr to next array */
3480 if (mrgctx && i == 0)
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3484 rgctx_reg = array_reg;
3485 /* is the ptr null? */
3486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3487 /* if yes, jump to actual trampoline */
3488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3492 val_reg = alloc_preg (cfg);
3493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3494 /* is the slot null? */
3495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3496 /* if yes, jump to actual trampoline */
3497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3500 res_reg = alloc_preg (cfg);
3501 MONO_INST_NEW (cfg, ins, OP_MOVE);
3502 ins->dreg = res_reg;
3503 ins->sreg1 = val_reg;
3504 MONO_ADD_INS (cfg->cbb, ins);
3506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3509 MONO_START_BB (cfg, is_null_bb);
3511 EMIT_NEW_ICONST (cfg, args [1], index);
3513 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3515 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3516 MONO_INST_NEW (cfg, ins, OP_MOVE);
3517 ins->dreg = res_reg;
3518 ins->sreg1 = call->dreg;
3519 MONO_ADD_INS (cfg->cbb, ins);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3522 MONO_START_BB (cfg, end_bb);
3526 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3531 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3532 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3534 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3535 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3537 return emit_rgctx_fetch (cfg, rgctx, entry);
3541 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3542 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3544 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3545 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3547 return emit_rgctx_fetch (cfg, rgctx, entry);
3551 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3552 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3554 MonoJumpInfoGSharedVtCall *call_info;
3555 MonoJumpInfoRgctxEntry *entry;
3558 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3559 call_info->sig = sig;
3560 call_info->method = cmethod;
3562 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3563 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3565 return emit_rgctx_fetch (cfg, rgctx, entry);
3569 * emit_get_rgctx_virt_method:
3571 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3574 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3575 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3577 MonoJumpInfoVirtMethod *info;
3578 MonoJumpInfoRgctxEntry *entry;
3581 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3582 info->klass = klass;
3583 info->method = virt_method;
3585 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3586 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3588 return emit_rgctx_fetch (cfg, rgctx, entry);
3592 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3593 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3595 MonoJumpInfoRgctxEntry *entry;
3598 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3599 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3601 return emit_rgctx_fetch (cfg, rgctx, entry);
3605 * emit_get_rgctx_method:
3607 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3608 * normal constants, else emit a load from the rgctx.
3611 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3612 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3614 if (!context_used) {
3617 switch (rgctx_type) {
3618 case MONO_RGCTX_INFO_METHOD:
3619 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3621 case MONO_RGCTX_INFO_METHOD_RGCTX:
3622 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3625 g_assert_not_reached ();
3628 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3629 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3631 return emit_rgctx_fetch (cfg, rgctx, entry);
3636 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3637 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3639 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3640 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3642 return emit_rgctx_fetch (cfg, rgctx, entry);
3646 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3648 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3649 MonoRuntimeGenericContextInfoTemplate *template;
3654 for (i = 0; i < info->num_entries; ++i) {
3655 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3657 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3661 if (info->num_entries == info->count_entries) {
3662 MonoRuntimeGenericContextInfoTemplate *new_entries;
3663 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3665 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3667 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3668 info->entries = new_entries;
3669 info->count_entries = new_count_entries;
3672 idx = info->num_entries;
3673 template = &info->entries [idx];
3674 template->info_type = rgctx_type;
3675 template->data = data;
3677 info->num_entries ++;
3683 * emit_get_gsharedvt_info:
3685 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3688 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3693 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3694 /* Load info->entries [idx] */
3695 dreg = alloc_preg (cfg);
3696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3702 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3704 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3708 * On return the caller must check @klass for load errors.
3711 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3713 MonoInst *vtable_arg;
3715 gboolean use_op_generic_class_init = FALSE;
3717 context_used = mini_class_check_context_used (cfg, klass);
3720 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3721 klass, MONO_RGCTX_INFO_VTABLE);
3723 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3727 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3730 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3731 if (!COMPILE_LLVM (cfg))
3732 use_op_generic_class_init = TRUE;
3735 if (use_op_generic_class_init) {
3739 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3740 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3742 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3743 ins->sreg1 = vtable_arg->dreg;
3744 MONO_ADD_INS (cfg->cbb, ins);
3746 static int byte_offset = -1;
3747 static guint8 bitmask;
3748 int bits_reg, inited_reg;
3749 MonoBasicBlock *inited_bb;
3750 MonoInst *args [16];
3752 if (byte_offset < 0)
3753 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3755 bits_reg = alloc_ireg (cfg);
3756 inited_reg = alloc_ireg (cfg);
3758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3761 NEW_BBLOCK (cfg, inited_bb);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3766 args [0] = vtable_arg;
3767 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3769 MONO_START_BB (cfg, inited_bb);
3775 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3777 /* This could be used as a fallback if needed */
3778 if (cfg->compile_aot) {
3779 /* With the overhead of plt entries, the inline version is comparable in size/speed */
3780 emit_generic_class_init (cfg, klass);
3784 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
3788 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3792 if (cfg->gen_seq_points && cfg->method == method) {
3793 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3795 ins->flags |= MONO_INST_NONEMPTY_STACK;
3796 MONO_ADD_INS (cfg->cbb, ins);
3801 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3803 if (mini_get_debug_options ()->better_cast_details) {
3804 int vtable_reg = alloc_preg (cfg);
3805 int klass_reg = alloc_preg (cfg);
3806 MonoBasicBlock *is_null_bb = NULL;
3808 int to_klass_reg, context_used;
3811 NEW_BBLOCK (cfg, is_null_bb);
3813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3817 tls_get = mono_get_jit_tls_intrinsic (cfg);
3819 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3823 MONO_ADD_INS (cfg->cbb, tls_get);
3824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3829 context_used = mini_class_check_context_used (cfg, klass);
3831 MonoInst *class_ins;
3833 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3834 to_klass_reg = class_ins->dreg;
3836 to_klass_reg = alloc_preg (cfg);
3837 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3842 MONO_START_BB (cfg, is_null_bb);
3847 reset_cast_details (MonoCompile *cfg)
3849 /* Reset the variables holding the cast details */
3850 if (mini_get_debug_options ()->better_cast_details) {
3851 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3853 MONO_ADD_INS (cfg->cbb, tls_get);
3854 /* It is enough to reset the from field */
3855 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3860 * On return the caller must check @array_class for load errors
3863 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3865 int vtable_reg = alloc_preg (cfg);
3868 context_used = mini_class_check_context_used (cfg, array_class);
3870 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3872 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3874 if (cfg->opt & MONO_OPT_SHARED) {
3875 int class_reg = alloc_preg (cfg);
3876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3877 if (cfg->compile_aot) {
3878 int klass_reg = alloc_preg (cfg);
3879 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3880 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3884 } else if (context_used) {
3885 MonoInst *vtable_ins;
3887 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3888 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3890 if (cfg->compile_aot) {
3894 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3896 vt_reg = alloc_preg (cfg);
3897 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3898 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3901 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3907 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3909 reset_cast_details (cfg);
3913 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3914 * generic code is generated.
3917 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3919 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3922 MonoInst *rgctx, *addr;
3924 /* FIXME: What if the class is shared? We might not
3925 have to get the address of the method from the
3927 addr = emit_get_rgctx_method (cfg, context_used, method,
3928 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3930 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3932 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3934 gboolean pass_vtable, pass_mrgctx;
3935 MonoInst *rgctx_arg = NULL;
3937 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3938 g_assert (!pass_mrgctx);
3941 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3944 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3947 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3952 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3956 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3957 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3958 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3959 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3961 obj_reg = sp [0]->dreg;
3962 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3965 /* FIXME: generics */
3966 g_assert (klass->rank == 0);
3969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3970 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3976 MonoInst *element_class;
3978 /* This assertion is from the unboxcast insn */
3979 g_assert (klass->rank == 0);
3981 element_class = emit_get_rgctx_klass (cfg, context_used,
3982 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3984 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3985 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3987 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3988 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3989 reset_cast_details (cfg);
3992 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3993 MONO_ADD_INS (cfg->cbb, add);
3994 add->type = STACK_MP;
4001 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4003 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4004 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4008 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4014 args [1] = klass_inst;
4017 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4019 NEW_BBLOCK (cfg, is_ref_bb);
4020 NEW_BBLOCK (cfg, is_nullable_bb);
4021 NEW_BBLOCK (cfg, end_bb);
4022 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4026 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4027 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4029 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4030 addr_reg = alloc_dreg (cfg, STACK_MP);
4034 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4035 MONO_ADD_INS (cfg->cbb, addr);
4037 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4040 MONO_START_BB (cfg, is_ref_bb);
4042 /* Save the ref to a temporary */
4043 dreg = alloc_ireg (cfg);
4044 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4045 addr->dreg = addr_reg;
4046 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4050 MONO_START_BB (cfg, is_nullable_bb);
4053 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4054 MonoInst *unbox_call;
4055 MonoMethodSignature *unbox_sig;
4057 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4058 unbox_sig->ret = &klass->byval_arg;
4059 unbox_sig->param_count = 1;
4060 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4061 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4063 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4064 addr->dreg = addr_reg;
4067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4070 MONO_START_BB (cfg, end_bb);
4073 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4079 * Returns NULL and set the cfg exception on error.
4082 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4084 MonoInst *iargs [2];
4090 MonoInst *iargs [2];
4091 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
4093 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4095 if (cfg->opt & MONO_OPT_SHARED)
4096 rgctx_info = MONO_RGCTX_INFO_KLASS;
4098 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4099 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4101 if (cfg->opt & MONO_OPT_SHARED) {
4102 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4104 alloc_ftn = mono_object_new;
4107 alloc_ftn = mono_object_new_specific;
4110 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4111 if (known_instance_size) {
4112 int size = mono_class_instance_size (klass);
4113 if (size < sizeof (MonoObject))
4114 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4116 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4118 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4121 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4124 if (cfg->opt & MONO_OPT_SHARED) {
4125 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4126 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4128 alloc_ftn = mono_object_new;
4129 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4130 /* This happens often in argument checking code, eg. throw new FooException... */
4131 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4132 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4133 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4135 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4136 MonoMethod *managed_alloc = NULL;
4140 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4141 cfg->exception_ptr = klass;
4145 #ifndef MONO_CROSS_COMPILE
4146 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4149 if (managed_alloc) {
4150 int size = mono_class_instance_size (klass);
4151 if (size < sizeof (MonoObject))
4152 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4154 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4155 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4156 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4158 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4160 guint32 lw = vtable->klass->instance_size;
4161 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4162 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4163 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4166 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4170 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4174 * Returns NULL and set the cfg exception on error.
4177 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4179 MonoInst *alloc, *ins;
4181 if (mono_class_is_nullable (klass)) {
4182 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4185 /* FIXME: What if the class is shared? We might not
4186 have to get the method address from the RGCTX. */
4187 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4188 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4189 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4191 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4193 gboolean pass_vtable, pass_mrgctx;
4194 MonoInst *rgctx_arg = NULL;
4196 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4197 g_assert (!pass_mrgctx);
4200 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4203 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4206 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4210 if (mini_is_gsharedvt_klass (cfg, klass)) {
4211 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4212 MonoInst *res, *is_ref, *src_var, *addr;
4215 dreg = alloc_ireg (cfg);
4217 NEW_BBLOCK (cfg, is_ref_bb);
4218 NEW_BBLOCK (cfg, is_nullable_bb);
4219 NEW_BBLOCK (cfg, end_bb);
4220 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4228 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4231 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4232 ins->opcode = OP_STOREV_MEMBASE;
4234 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4235 res->type = STACK_OBJ;
4237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4240 MONO_START_BB (cfg, is_ref_bb);
4242 /* val is a vtype, so has to load the value manually */
4243 src_var = get_vreg_to_inst (cfg, val->dreg);
4245 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4246 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4248 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4251 MONO_START_BB (cfg, is_nullable_bb);
4254 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4255 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4257 MonoMethodSignature *box_sig;
4260 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4261 * construct that method at JIT time, so have to do things by hand.
4263 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4264 box_sig->ret = &mono_defaults.object_class->byval_arg;
4265 box_sig->param_count = 1;
4266 box_sig->params [0] = &klass->byval_arg;
4267 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4268 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4269 res->type = STACK_OBJ;
4273 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4275 MONO_START_BB (cfg, end_bb);
4279 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4283 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4289 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4292 MonoGenericContainer *container;
4293 MonoGenericInst *ginst;
4295 if (klass->generic_class) {
4296 container = klass->generic_class->container_class->generic_container;
4297 ginst = klass->generic_class->context.class_inst;
4298 } else if (klass->generic_container && context_used) {
4299 container = klass->generic_container;
4300 ginst = container->context.class_inst;
4305 for (i = 0; i < container->type_argc; ++i) {
4307 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4309 type = ginst->type_argv [i];
4310 if (mini_type_is_reference (cfg, type))
4316 static GHashTable* direct_icall_type_hash;
4319 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4321 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4322 if (!direct_icalls_enabled (cfg))
4326 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4327 * Whitelist a few icalls for now.
4329 if (!direct_icall_type_hash) {
4330 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4332 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4333 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4334 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4335 mono_memory_barrier ();
4336 direct_icall_type_hash = h;
4339 if (cmethod->klass == mono_defaults.math_class)
4341 /* No locking needed */
4342 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4347 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4350 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4352 MonoMethod *mono_castclass;
4355 mono_castclass = mono_marshal_get_castclass_with_cache ();
4357 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4358 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4359 reset_cast_details (cfg);
4365 get_castclass_cache_idx (MonoCompile *cfg)
4367 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4368 cfg->castclass_cache_index ++;
4369 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4373 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4382 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4385 if (cfg->compile_aot) {
4386 idx = get_castclass_cache_idx (cfg);
4387 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4389 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4392 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4393 return emit_castclass_with_cache (cfg, klass, args);
4397 * Returns NULL and set the cfg exception on error.
4400 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4402 MonoBasicBlock *is_null_bb;
4403 int obj_reg = src->dreg;
4404 int vtable_reg = alloc_preg (cfg);
4406 MonoInst *klass_inst = NULL, *res;
4408 context_used = mini_class_check_context_used (cfg, klass);
4410 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4411 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4412 (*inline_costs) += 2;
4414 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4415 MonoMethod *mono_castclass;
4416 MonoInst *iargs [1];
4419 mono_castclass = mono_marshal_get_castclass (klass);
4422 save_cast_details (cfg, klass, src->dreg, TRUE);
4423 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4424 iargs, ip, cfg->real_offset, TRUE);
4425 reset_cast_details (cfg);
4426 CHECK_CFG_EXCEPTION;
4427 g_assert (costs > 0);
4429 cfg->real_offset += 5;
4431 (*inline_costs) += costs;
4439 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4440 MonoInst *cache_ins;
4442 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4447 /* klass - it's the second element of the cache entry*/
4448 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4451 args [2] = cache_ins;
4453 return emit_castclass_with_cache (cfg, klass, args);
4456 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4459 NEW_BBLOCK (cfg, is_null_bb);
4461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4464 save_cast_details (cfg, klass, obj_reg, FALSE);
4466 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4468 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4470 int klass_reg = alloc_preg (cfg);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4474 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4475 /* the remoting code is broken, access the class for now */
4476 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4477 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4479 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4480 cfg->exception_ptr = klass;
4483 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4485 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4488 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4491 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4495 MONO_START_BB (cfg, is_null_bb);
4497 reset_cast_details (cfg);
4506 * Returns NULL and set the cfg exception on error.
4509 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4512 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4513 int obj_reg = src->dreg;
4514 int vtable_reg = alloc_preg (cfg);
4515 int res_reg = alloc_ireg_ref (cfg);
4516 MonoInst *klass_inst = NULL;
4521 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4522 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4523 MonoInst *cache_ins;
4525 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4530 /* klass - it's the second element of the cache entry*/
4531 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4534 args [2] = cache_ins;
4536 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4539 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4542 NEW_BBLOCK (cfg, is_null_bb);
4543 NEW_BBLOCK (cfg, false_bb);
4544 NEW_BBLOCK (cfg, end_bb);
4546 /* Do the assignment at the beginning, so the other assignment can be if converted */
4547 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4548 ins->type = STACK_OBJ;
4551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4556 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4557 g_assert (!context_used);
4558 /* the is_null_bb target simply copies the input register to the output */
4559 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4561 int klass_reg = alloc_preg (cfg);
4564 int rank_reg = alloc_preg (cfg);
4565 int eclass_reg = alloc_preg (cfg);
4567 g_assert (!context_used);
4568 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4573 if (klass->cast_class == mono_defaults.object_class) {
4574 int parent_reg = alloc_preg (cfg);
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4576 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4577 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4579 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4580 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4581 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4583 } else if (klass->cast_class == mono_defaults.enum_class) {
4584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4585 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4586 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4587 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4589 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4590 /* Check that the object is a vector too */
4591 int bounds_reg = alloc_preg (cfg);
4592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4597 /* the is_null_bb target simply copies the input register to the output */
4598 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4600 } else if (mono_class_is_nullable (klass)) {
4601 g_assert (!context_used);
4602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4603 /* the is_null_bb target simply copies the input register to the output */
4604 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4606 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4607 g_assert (!context_used);
4608 /* the remoting code is broken, access the class for now */
4609 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4610 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4612 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4613 cfg->exception_ptr = klass;
4616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4621 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4625 /* the is_null_bb target simply copies the input register to the output */
4626 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4631 MONO_START_BB (cfg, false_bb);
4633 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4634 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4636 MONO_START_BB (cfg, is_null_bb);
4638 MONO_START_BB (cfg, end_bb);
4644 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4646 /* This opcode takes as input an object reference and a class, and returns:
4647 0) if the object is an instance of the class,
4648 1) if the object is not instance of the class,
4649 2) if the object is a proxy whose type cannot be determined */
4652 #ifndef DISABLE_REMOTING
4653 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4655 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4657 int obj_reg = src->dreg;
4658 int dreg = alloc_ireg (cfg);
4660 #ifndef DISABLE_REMOTING
4661 int klass_reg = alloc_preg (cfg);
4664 NEW_BBLOCK (cfg, true_bb);
4665 NEW_BBLOCK (cfg, false_bb);
4666 NEW_BBLOCK (cfg, end_bb);
4667 #ifndef DISABLE_REMOTING
4668 NEW_BBLOCK (cfg, false2_bb);
4669 NEW_BBLOCK (cfg, no_proxy_bb);
4672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4675 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4676 #ifndef DISABLE_REMOTING
4677 NEW_BBLOCK (cfg, interface_fail_bb);
4680 tmp_reg = alloc_preg (cfg);
4681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4682 #ifndef DISABLE_REMOTING
4683 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4684 MONO_START_BB (cfg, interface_fail_bb);
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4687 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4689 tmp_reg = alloc_preg (cfg);
4690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4694 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4697 #ifndef DISABLE_REMOTING
4698 tmp_reg = alloc_preg (cfg);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4702 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4703 tmp_reg = alloc_preg (cfg);
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4707 tmp_reg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4712 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4715 MONO_START_BB (cfg, no_proxy_bb);
4717 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4719 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4723 MONO_START_BB (cfg, false_bb);
4725 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4728 #ifndef DISABLE_REMOTING
4729 MONO_START_BB (cfg, false2_bb);
4731 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4732 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4735 MONO_START_BB (cfg, true_bb);
4737 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4739 MONO_START_BB (cfg, end_bb);
4742 MONO_INST_NEW (cfg, ins, OP_ICONST);
4744 ins->type = STACK_I4;
4750 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4752 /* This opcode takes as input an object reference and a class, and returns:
4753 0) if the object is an instance of the class,
4754 1) if the object is a proxy whose type cannot be determined
4755 an InvalidCastException exception is thrown otherwhise*/
4758 #ifndef DISABLE_REMOTING
4759 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4761 MonoBasicBlock *ok_result_bb;
4763 int obj_reg = src->dreg;
4764 int dreg = alloc_ireg (cfg);
4765 int tmp_reg = alloc_preg (cfg);
4767 #ifndef DISABLE_REMOTING
4768 int klass_reg = alloc_preg (cfg);
4769 NEW_BBLOCK (cfg, end_bb);
4772 NEW_BBLOCK (cfg, ok_result_bb);
4774 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4777 save_cast_details (cfg, klass, obj_reg, FALSE);
4779 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4780 #ifndef DISABLE_REMOTING
4781 NEW_BBLOCK (cfg, interface_fail_bb);
4783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4784 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4785 MONO_START_BB (cfg, interface_fail_bb);
4786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4788 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4790 tmp_reg = alloc_preg (cfg);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4792 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4793 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4795 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4799 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4800 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4803 #ifndef DISABLE_REMOTING
4804 NEW_BBLOCK (cfg, no_proxy_bb);
4806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4808 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4810 tmp_reg = alloc_preg (cfg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4814 tmp_reg = alloc_preg (cfg);
4815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4817 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4819 NEW_BBLOCK (cfg, fail_1_bb);
4821 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4823 MONO_START_BB (cfg, fail_1_bb);
4825 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4826 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4828 MONO_START_BB (cfg, no_proxy_bb);
4830 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4832 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4836 MONO_START_BB (cfg, ok_result_bb);
4838 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4840 #ifndef DISABLE_REMOTING
4841 MONO_START_BB (cfg, end_bb);
4845 MONO_INST_NEW (cfg, ins, OP_ICONST);
4847 ins->type = STACK_I4;
4852 static G_GNUC_UNUSED MonoInst*
4853 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4855 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4856 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4859 switch (enum_type->type) {
4862 #if SIZEOF_REGISTER == 8
4874 MonoInst *load, *and, *cmp, *ceq;
4875 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4876 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4877 int dest_reg = alloc_ireg (cfg);
4879 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4880 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4881 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4882 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4884 ceq->type = STACK_I4;
4887 load = mono_decompose_opcode (cfg, load);
4888 and = mono_decompose_opcode (cfg, and);
4889 cmp = mono_decompose_opcode (cfg, cmp);
4890 ceq = mono_decompose_opcode (cfg, ceq);
4898 * Returns NULL and set the cfg exception on error.
4900 static G_GNUC_UNUSED MonoInst*
4901 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4905 gpointer trampoline;
4906 MonoInst *obj, *method_ins, *tramp_ins;
4911 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4914 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4918 obj = handle_alloc (cfg, klass, FALSE, 0);
4922 /* Inline the contents of mono_delegate_ctor */
4924 /* Set target field */
4925 /* Optimize away setting of NULL target */
4926 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4928 if (cfg->gen_write_barriers) {
4929 dreg = alloc_preg (cfg);
4930 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4931 emit_write_barrier (cfg, ptr, target);
4935 /* Set method field */
4936 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4940 * To avoid looking up the compiled code belonging to the target method
4941 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4942 * store it, and we fill it after the method has been compiled.
4944 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4945 MonoInst *code_slot_ins;
4948 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4950 domain = mono_domain_get ();
4951 mono_domain_lock (domain);
4952 if (!domain_jit_info (domain)->method_code_hash)
4953 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4954 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4956 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4957 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4959 mono_domain_unlock (domain);
4961 if (cfg->compile_aot)
4962 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4964 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4966 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4969 if (cfg->compile_aot) {
4970 MonoDelegateClassMethodPair *del_tramp;
4972 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4973 del_tramp->klass = klass;
4974 del_tramp->method = context_used ? NULL : method;
4975 del_tramp->virtual = virtual;
4976 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4979 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4981 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4982 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4985 /* Set invoke_impl field */
4987 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4989 dreg = alloc_preg (cfg);
4990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4991 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4993 dreg = alloc_preg (cfg);
4994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4995 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4998 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5004 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5006 MonoJitICallInfo *info;
5008 /* Need to register the icall so it gets an icall wrapper */
5009 info = mono_get_array_new_va_icall (rank);
5011 cfg->flags |= MONO_CFG_HAS_VARARGS;
5013 /* mono_array_new_va () needs a vararg calling convention */
5014 cfg->disable_llvm = TRUE;
5016 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5017 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5021 * handle_constrained_gsharedvt_call:
5023 * Handle constrained calls where the receiver is a gsharedvt type.
5024 * Return the instruction representing the call. Set the cfg exception on failure.
5027 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5028 gboolean *ref_emit_widen)
5030 MonoInst *ins = NULL;
5031 gboolean emit_widen = *ref_emit_widen;
5034 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5035 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5036 * pack the arguments into an array, and do the rest of the work in in an icall.
5038 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5039 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
5040 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
5041 MonoInst *args [16];
5044 * This case handles calls to
5045 * - object:ToString()/Equals()/GetHashCode(),
5046 * - System.IComparable<T>:CompareTo()
5047 * - System.IEquatable<T>:Equals ()
5048 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5052 if (mono_method_check_context_used (cmethod))
5053 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5055 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5056 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5058 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5059 if (fsig->hasthis && fsig->param_count) {
5060 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5061 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5062 ins->dreg = alloc_preg (cfg);
5063 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5064 MONO_ADD_INS (cfg->cbb, ins);
5067 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
5070 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5072 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5073 addr_reg = ins->dreg;
5074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5076 EMIT_NEW_ICONST (cfg, args [3], 0);
5077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5080 EMIT_NEW_ICONST (cfg, args [3], 0);
5081 EMIT_NEW_ICONST (cfg, args [4], 0);
5083 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5086 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
5087 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5088 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5092 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5093 MONO_ADD_INS (cfg->cbb, add);
5095 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5096 MONO_ADD_INS (cfg->cbb, ins);
5097 /* ins represents the call result */
5100 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5103 *ref_emit_widen = emit_widen;
5112 mono_emit_load_got_addr (MonoCompile *cfg)
5114 MonoInst *getaddr, *dummy_use;
5116 if (!cfg->got_var || cfg->got_var_allocated)
5119 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5120 getaddr->cil_code = cfg->header->code;
5121 getaddr->dreg = cfg->got_var->dreg;
5123 /* Add it to the start of the first bblock */
5124 if (cfg->bb_entry->code) {
5125 getaddr->next = cfg->bb_entry->code;
5126 cfg->bb_entry->code = getaddr;
5129 MONO_ADD_INS (cfg->bb_entry, getaddr);
5131 cfg->got_var_allocated = TRUE;
5134 * Add a dummy use to keep the got_var alive, since real uses might
5135 * only be generated by the back ends.
5136 * Add it to end_bblock, so the variable's lifetime covers the whole
5138 * It would be better to make the usage of the got var explicit in all
5139 * cases when the backend needs it (i.e. calls, throw etc.), so this
5140 * wouldn't be needed.
5142 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5143 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5146 static int inline_limit;
5147 static gboolean inline_limit_inited;
5150 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5152 MonoMethodHeaderSummary header;
5154 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5155 MonoMethodSignature *sig = mono_method_signature (method);
5159 if (cfg->disable_inline)
5161 if (cfg->generic_sharing_context)
5164 if (cfg->inline_depth > 10)
5167 #ifdef MONO_ARCH_HAVE_LMF_OPS
5168 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5169 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5170 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5175 if (!mono_method_get_header_summary (method, &header))
5178 /*runtime, icall and pinvoke are checked by summary call*/
5179 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5180 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5181 (mono_class_is_marshalbyref (method->klass)) ||
5185 /* also consider num_locals? */
5186 /* Do the size check early to avoid creating vtables */
5187 if (!inline_limit_inited) {
5188 if (g_getenv ("MONO_INLINELIMIT"))
5189 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5191 inline_limit = INLINE_LENGTH_LIMIT;
5192 inline_limit_inited = TRUE;
5194 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5198 * if we can initialize the class of the method right away, we do,
5199 * otherwise we don't allow inlining if the class needs initialization,
5200 * since it would mean inserting a call to mono_runtime_class_init()
5201 * inside the inlined code
5203 if (!(cfg->opt & MONO_OPT_SHARED)) {
5204 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5205 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5206 vtable = mono_class_vtable (cfg->domain, method->klass);
5209 if (!cfg->compile_aot)
5210 mono_runtime_class_init (vtable);
5211 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5212 if (cfg->run_cctors && method->klass->has_cctor) {
5213 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5214 if (!method->klass->runtime_info)
5215 /* No vtable created yet */
5217 vtable = mono_class_vtable (cfg->domain, method->klass);
5220 /* This makes so that inline cannot trigger */
5221 /* .cctors: too many apps depend on them */
5222 /* running with a specific order... */
5223 if (! vtable->initialized)
5225 mono_runtime_class_init (vtable);
5227 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5228 if (!method->klass->runtime_info)
5229 /* No vtable created yet */
5231 vtable = mono_class_vtable (cfg->domain, method->klass);
5234 if (!vtable->initialized)
5239 * If we're compiling for shared code
5240 * the cctor will need to be run at aot method load time, for example,
5241 * or at the end of the compilation of the inlining method.
5243 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5247 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5248 if (mono_arch_is_soft_float ()) {
5250 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5252 for (i = 0; i < sig->param_count; ++i)
5253 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5258 if (g_list_find (cfg->dont_inline, method))
5265 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5267 if (!cfg->compile_aot) {
5269 if (vtable->initialized)
5273 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5274 if (cfg->method == method)
5278 if (!mono_class_needs_cctor_run (klass, method))
5281 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5282 /* The initialization is already done before the method is called */
5289 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5293 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5296 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5299 mono_class_init (klass);
5300 size = mono_class_array_element_size (klass);
5303 mult_reg = alloc_preg (cfg);
5304 array_reg = arr->dreg;
5305 index_reg = index->dreg;
5307 #if SIZEOF_REGISTER == 8
5308 /* The array reg is 64 bits but the index reg is only 32 */
5309 if (COMPILE_LLVM (cfg)) {
5311 index2_reg = index_reg;
5313 index2_reg = alloc_preg (cfg);
5314 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5317 if (index->type == STACK_I8) {
5318 index2_reg = alloc_preg (cfg);
5319 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5321 index2_reg = index_reg;
5326 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5328 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5329 if (size == 1 || size == 2 || size == 4 || size == 8) {
5330 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5332 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5333 ins->klass = mono_class_get_element_class (klass);
5334 ins->type = STACK_MP;
5340 add_reg = alloc_ireg_mp (cfg);
5343 MonoInst *rgctx_ins;
5346 g_assert (cfg->generic_sharing_context);
5347 context_used = mini_class_check_context_used (cfg, klass);
5348 g_assert (context_used);
5349 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5350 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5354 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5355 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5356 ins->klass = mono_class_get_element_class (klass);
5357 ins->type = STACK_MP;
5358 MONO_ADD_INS (cfg->cbb, ins);
5363 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5365 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5367 int bounds_reg = alloc_preg (cfg);
5368 int add_reg = alloc_ireg_mp (cfg);
5369 int mult_reg = alloc_preg (cfg);
5370 int mult2_reg = alloc_preg (cfg);
5371 int low1_reg = alloc_preg (cfg);
5372 int low2_reg = alloc_preg (cfg);
5373 int high1_reg = alloc_preg (cfg);
5374 int high2_reg = alloc_preg (cfg);
5375 int realidx1_reg = alloc_preg (cfg);
5376 int realidx2_reg = alloc_preg (cfg);
5377 int sum_reg = alloc_preg (cfg);
5378 int index1, index2, tmpreg;
5382 mono_class_init (klass);
5383 size = mono_class_array_element_size (klass);
5385 index1 = index_ins1->dreg;
5386 index2 = index_ins2->dreg;
5388 #if SIZEOF_REGISTER == 8
5389 /* The array reg is 64 bits but the index reg is only 32 */
5390 if (COMPILE_LLVM (cfg)) {
5393 tmpreg = alloc_preg (cfg);
5394 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5396 tmpreg = alloc_preg (cfg);
5397 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5401 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5405 /* range checking */
5406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5407 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5410 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5411 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5412 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5413 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5414 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5415 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5417 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5418 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5419 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5420 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5421 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5422 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5423 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5425 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5426 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5428 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5429 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5431 ins->type = STACK_MP;
5433 MONO_ADD_INS (cfg->cbb, ins);
5440 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5444 MonoMethod *addr_method;
5446 MonoClass *eclass = cmethod->klass->element_class;
5448 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5451 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5453 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5454 /* emit_ldelema_2 depends on OP_LMUL */
5455 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5456 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5460 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5463 element_size = mono_class_array_element_size (eclass);
5464 addr_method = mono_marshal_get_array_address (rank, element_size);
5465 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5470 static MonoBreakPolicy
5471 always_insert_breakpoint (MonoMethod *method)
5473 return MONO_BREAK_POLICY_ALWAYS;
5476 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5479 * mono_set_break_policy:
5480 * policy_callback: the new callback function
5482 * Allow embedders to decide wherther to actually obey breakpoint instructions
5483 * (both break IL instructions and Debugger.Break () method calls), for example
5484 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5485 * untrusted or semi-trusted code.
5487 * @policy_callback will be called every time a break point instruction needs to
5488 * be inserted with the method argument being the method that calls Debugger.Break()
5489 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5490 * if it wants the breakpoint to not be effective in the given method.
5491 * #MONO_BREAK_POLICY_ALWAYS is the default.
5494 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5496 if (policy_callback)
5497 break_policy_func = policy_callback;
5499 break_policy_func = always_insert_breakpoint;
5503 should_insert_brekpoint (MonoMethod *method) {
5504 switch (break_policy_func (method)) {
5505 case MONO_BREAK_POLICY_ALWAYS:
5507 case MONO_BREAK_POLICY_NEVER:
5509 case MONO_BREAK_POLICY_ON_DBG:
5510 g_warning ("mdb no longer supported");
5513 g_warning ("Incorrect value returned from break policy callback");
5518 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5520 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5522 MonoInst *addr, *store, *load;
5523 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5525 /* the bounds check is already done by the callers */
5526 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5528 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5529 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5530 if (mini_type_is_reference (cfg, fsig->params [2]))
5531 emit_write_barrier (cfg, addr, load);
5533 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5534 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5541 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5543 return mini_type_is_reference (cfg, &klass->byval_arg);
5547 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5549 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5550 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5551 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5552 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5553 MonoInst *iargs [3];
5556 mono_class_setup_vtable (obj_array);
5557 g_assert (helper->slot);
5559 if (sp [0]->type != STACK_OBJ)
5561 if (sp [2]->type != STACK_OBJ)
5568 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5572 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5575 // FIXME-VT: OP_ICONST optimization
5576 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5577 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5578 ins->opcode = OP_STOREV_MEMBASE;
5579 } else if (sp [1]->opcode == OP_ICONST) {
5580 int array_reg = sp [0]->dreg;
5581 int index_reg = sp [1]->dreg;
5582 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5585 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5586 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5588 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5589 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5590 if (generic_class_is_reference_type (cfg, klass))
5591 emit_write_barrier (cfg, addr, sp [2]);
5598 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5603 eklass = mono_class_from_mono_type (fsig->params [2]);
5605 eklass = mono_class_from_mono_type (fsig->ret);
5608 return emit_array_store (cfg, eklass, args, FALSE);
5610 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5611 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5617 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5621 param_klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, ¶m_klass->byval_arg));
5623 //Only allow for valuetypes
5624 if (!param_klass->valuetype || !return_klass->valuetype)
5628 if (param_klass->has_references || return_klass->has_references)
5631 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5632 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5633 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5636 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5637 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5640 //And have the same size
5641 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5647 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5649 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5650 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5652 //Valuetypes that are semantically equivalent
5653 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5656 //Arrays of valuetypes that are semantically equivalent
5657 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5664 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5666 #ifdef MONO_ARCH_SIMD_INTRINSICS
5667 MonoInst *ins = NULL;
5669 if (cfg->opt & MONO_OPT_SIMD) {
5670 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5676 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5680 emit_memory_barrier (MonoCompile *cfg, int kind)
5682 MonoInst *ins = NULL;
5683 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5684 MONO_ADD_INS (cfg->cbb, ins);
5685 ins->backend.memory_barrier_kind = kind;
5691 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5693 MonoInst *ins = NULL;
5696 /* The LLVM backend supports these intrinsics */
5697 if (cmethod->klass == mono_defaults.math_class) {
5698 if (strcmp (cmethod->name, "Sin") == 0) {
5700 } else if (strcmp (cmethod->name, "Cos") == 0) {
5702 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5704 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5708 if (opcode && fsig->param_count == 1) {
5709 MONO_INST_NEW (cfg, ins, opcode);
5710 ins->type = STACK_R8;
5711 ins->dreg = mono_alloc_freg (cfg);
5712 ins->sreg1 = args [0]->dreg;
5713 MONO_ADD_INS (cfg->cbb, ins);
5717 if (cfg->opt & MONO_OPT_CMOV) {
5718 if (strcmp (cmethod->name, "Min") == 0) {
5719 if (fsig->params [0]->type == MONO_TYPE_I4)
5721 if (fsig->params [0]->type == MONO_TYPE_U4)
5722 opcode = OP_IMIN_UN;
5723 else if (fsig->params [0]->type == MONO_TYPE_I8)
5725 else if (fsig->params [0]->type == MONO_TYPE_U8)
5726 opcode = OP_LMIN_UN;
5727 } else if (strcmp (cmethod->name, "Max") == 0) {
5728 if (fsig->params [0]->type == MONO_TYPE_I4)
5730 if (fsig->params [0]->type == MONO_TYPE_U4)
5731 opcode = OP_IMAX_UN;
5732 else if (fsig->params [0]->type == MONO_TYPE_I8)
5734 else if (fsig->params [0]->type == MONO_TYPE_U8)
5735 opcode = OP_LMAX_UN;
5739 if (opcode && fsig->param_count == 2) {
5740 MONO_INST_NEW (cfg, ins, opcode);
5741 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5742 ins->dreg = mono_alloc_ireg (cfg);
5743 ins->sreg1 = args [0]->dreg;
5744 ins->sreg2 = args [1]->dreg;
5745 MONO_ADD_INS (cfg->cbb, ins);
5753 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5755 if (cmethod->klass == mono_defaults.array_class) {
5756 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5757 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5758 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5759 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5760 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5761 return emit_array_unsafe_mov (cfg, fsig, args);
5768 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5770 MonoInst *ins = NULL;
5772 static MonoClass *runtime_helpers_class = NULL;
5773 if (! runtime_helpers_class)
5774 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5775 "System.Runtime.CompilerServices", "RuntimeHelpers");
5777 if (cmethod->klass == mono_defaults.string_class) {
5778 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5779 int dreg = alloc_ireg (cfg);
5780 int index_reg = alloc_preg (cfg);
5781 int add_reg = alloc_preg (cfg);
5783 #if SIZEOF_REGISTER == 8
5784 /* The array reg is 64 bits but the index reg is only 32 */
5785 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5787 index_reg = args [1]->dreg;
5789 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5791 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5792 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5793 add_reg = ins->dreg;
5794 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5797 int mult_reg = alloc_preg (cfg);
5798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5799 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5800 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5801 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5803 type_from_op (cfg, ins, NULL, NULL);
5805 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5806 int dreg = alloc_ireg (cfg);
5807 /* Decompose later to allow more optimizations */
5808 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5809 ins->type = STACK_I4;
5810 ins->flags |= MONO_INST_FAULT;
5811 cfg->cbb->has_array_access = TRUE;
5812 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5817 } else if (cmethod->klass == mono_defaults.object_class) {
5819 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5820 int dreg = alloc_ireg_ref (cfg);
5821 int vt_reg = alloc_preg (cfg);
5822 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5823 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5824 type_from_op (cfg, ins, NULL, NULL);
5827 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5828 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5829 int dreg = alloc_ireg (cfg);
5830 int t1 = alloc_ireg (cfg);
5832 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5833 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5834 ins->type = STACK_I4;
5838 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5839 MONO_INST_NEW (cfg, ins, OP_NOP);
5840 MONO_ADD_INS (cfg->cbb, ins);
5844 } else if (cmethod->klass == mono_defaults.array_class) {
5845 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5846 return emit_array_generic_access (cfg, fsig, args, FALSE);
5847 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5848 return emit_array_generic_access (cfg, fsig, args, TRUE);
5850 #ifndef MONO_BIG_ARRAYS
5852 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5855 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5856 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5857 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5858 int dreg = alloc_ireg (cfg);
5859 int bounds_reg = alloc_ireg_mp (cfg);
5860 MonoBasicBlock *end_bb, *szarray_bb;
5861 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5863 NEW_BBLOCK (cfg, end_bb);
5864 NEW_BBLOCK (cfg, szarray_bb);
5866 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5867 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5870 /* Non-szarray case */
5872 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5873 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5875 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5876 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5877 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5878 MONO_START_BB (cfg, szarray_bb);
5881 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5882 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5884 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5885 MONO_START_BB (cfg, end_bb);
5887 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5888 ins->type = STACK_I4;
5894 if (cmethod->name [0] != 'g')
5897 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5898 int dreg = alloc_ireg (cfg);
5899 int vtable_reg = alloc_preg (cfg);
5900 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5901 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5902 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5903 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5904 type_from_op (cfg, ins, NULL, NULL);
5907 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5908 int dreg = alloc_ireg (cfg);
5910 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5911 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5912 type_from_op (cfg, ins, NULL, NULL);
5917 } else if (cmethod->klass == runtime_helpers_class) {
5919 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5920 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5924 } else if (cmethod->klass == mono_defaults.thread_class) {
5925 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5926 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5927 MONO_ADD_INS (cfg->cbb, ins);
5929 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5930 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5931 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5933 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5935 if (fsig->params [0]->type == MONO_TYPE_I1)
5936 opcode = OP_LOADI1_MEMBASE;
5937 else if (fsig->params [0]->type == MONO_TYPE_U1)
5938 opcode = OP_LOADU1_MEMBASE;
5939 else if (fsig->params [0]->type == MONO_TYPE_I2)
5940 opcode = OP_LOADI2_MEMBASE;
5941 else if (fsig->params [0]->type == MONO_TYPE_U2)
5942 opcode = OP_LOADU2_MEMBASE;
5943 else if (fsig->params [0]->type == MONO_TYPE_I4)
5944 opcode = OP_LOADI4_MEMBASE;
5945 else if (fsig->params [0]->type == MONO_TYPE_U4)
5946 opcode = OP_LOADU4_MEMBASE;
5947 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5948 opcode = OP_LOADI8_MEMBASE;
5949 else if (fsig->params [0]->type == MONO_TYPE_R4)
5950 opcode = OP_LOADR4_MEMBASE;
5951 else if (fsig->params [0]->type == MONO_TYPE_R8)
5952 opcode = OP_LOADR8_MEMBASE;
5953 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5954 opcode = OP_LOAD_MEMBASE;
5957 MONO_INST_NEW (cfg, ins, opcode);
5958 ins->inst_basereg = args [0]->dreg;
5959 ins->inst_offset = 0;
5960 MONO_ADD_INS (cfg->cbb, ins);
5962 switch (fsig->params [0]->type) {
5969 ins->dreg = mono_alloc_ireg (cfg);
5970 ins->type = STACK_I4;
5974 ins->dreg = mono_alloc_lreg (cfg);
5975 ins->type = STACK_I8;
5979 ins->dreg = mono_alloc_ireg (cfg);
5980 #if SIZEOF_REGISTER == 8
5981 ins->type = STACK_I8;
5983 ins->type = STACK_I4;
5988 ins->dreg = mono_alloc_freg (cfg);
5989 ins->type = STACK_R8;
5992 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5993 ins->dreg = mono_alloc_ireg_ref (cfg);
5994 ins->type = STACK_OBJ;
5998 if (opcode == OP_LOADI8_MEMBASE)
5999 ins = mono_decompose_opcode (cfg, ins);
6001 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6005 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6007 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6009 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6010 opcode = OP_STOREI1_MEMBASE_REG;
6011 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6012 opcode = OP_STOREI2_MEMBASE_REG;
6013 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6014 opcode = OP_STOREI4_MEMBASE_REG;
6015 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6016 opcode = OP_STOREI8_MEMBASE_REG;
6017 else if (fsig->params [0]->type == MONO_TYPE_R4)
6018 opcode = OP_STORER4_MEMBASE_REG;
6019 else if (fsig->params [0]->type == MONO_TYPE_R8)
6020 opcode = OP_STORER8_MEMBASE_REG;
6021 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6022 opcode = OP_STORE_MEMBASE_REG;
6025 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6027 MONO_INST_NEW (cfg, ins, opcode);
6028 ins->sreg1 = args [1]->dreg;
6029 ins->inst_destbasereg = args [0]->dreg;
6030 ins->inst_offset = 0;
6031 MONO_ADD_INS (cfg->cbb, ins);
6033 if (opcode == OP_STOREI8_MEMBASE_REG)
6034 ins = mono_decompose_opcode (cfg, ins);
6039 } else if (cmethod->klass == mono_defaults.monitor_class) {
6040 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
6041 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
6044 if (COMPILE_LLVM (cfg)) {
6046 * Pass the argument normally, the LLVM backend will handle the
6047 * calling convention problems.
6049 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6051 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
6052 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6053 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6054 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6057 return (MonoInst*)call;
6058 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
6059 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
6062 if (COMPILE_LLVM (cfg)) {
6064 * Pass the argument normally, the LLVM backend will handle the
6065 * calling convention problems.
6067 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
6069 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
6070 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6071 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6072 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6075 return (MonoInst*)call;
6077 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6080 if (COMPILE_LLVM (cfg)) {
6081 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6083 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6084 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6085 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6086 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6089 return (MonoInst*)call;
6092 } else if (cmethod->klass->image == mono_defaults.corlib &&
6093 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6094 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6097 #if SIZEOF_REGISTER == 8
6098 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6099 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6100 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6101 ins->dreg = mono_alloc_preg (cfg);
6102 ins->sreg1 = args [0]->dreg;
6103 ins->type = STACK_I8;
6104 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6105 MONO_ADD_INS (cfg->cbb, ins);
6109 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6111 /* 64 bit reads are already atomic */
6112 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6113 load_ins->dreg = mono_alloc_preg (cfg);
6114 load_ins->inst_basereg = args [0]->dreg;
6115 load_ins->inst_offset = 0;
6116 load_ins->type = STACK_I8;
6117 MONO_ADD_INS (cfg->cbb, load_ins);
6119 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6126 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6127 MonoInst *ins_iconst;
6130 if (fsig->params [0]->type == MONO_TYPE_I4) {
6131 opcode = OP_ATOMIC_ADD_I4;
6132 cfg->has_atomic_add_i4 = TRUE;
6134 #if SIZEOF_REGISTER == 8
6135 else if (fsig->params [0]->type == MONO_TYPE_I8)
6136 opcode = OP_ATOMIC_ADD_I8;
6139 if (!mono_arch_opcode_supported (opcode))
6141 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6142 ins_iconst->inst_c0 = 1;
6143 ins_iconst->dreg = mono_alloc_ireg (cfg);
6144 MONO_ADD_INS (cfg->cbb, ins_iconst);
6146 MONO_INST_NEW (cfg, ins, opcode);
6147 ins->dreg = mono_alloc_ireg (cfg);
6148 ins->inst_basereg = args [0]->dreg;
6149 ins->inst_offset = 0;
6150 ins->sreg2 = ins_iconst->dreg;
6151 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6152 MONO_ADD_INS (cfg->cbb, ins);
6154 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6155 MonoInst *ins_iconst;
6158 if (fsig->params [0]->type == MONO_TYPE_I4) {
6159 opcode = OP_ATOMIC_ADD_I4;
6160 cfg->has_atomic_add_i4 = TRUE;
6162 #if SIZEOF_REGISTER == 8
6163 else if (fsig->params [0]->type == MONO_TYPE_I8)
6164 opcode = OP_ATOMIC_ADD_I8;
6167 if (!mono_arch_opcode_supported (opcode))
6169 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6170 ins_iconst->inst_c0 = -1;
6171 ins_iconst->dreg = mono_alloc_ireg (cfg);
6172 MONO_ADD_INS (cfg->cbb, ins_iconst);
6174 MONO_INST_NEW (cfg, ins, opcode);
6175 ins->dreg = mono_alloc_ireg (cfg);
6176 ins->inst_basereg = args [0]->dreg;
6177 ins->inst_offset = 0;
6178 ins->sreg2 = ins_iconst->dreg;
6179 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6180 MONO_ADD_INS (cfg->cbb, ins);
6182 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6185 if (fsig->params [0]->type == MONO_TYPE_I4) {
6186 opcode = OP_ATOMIC_ADD_I4;
6187 cfg->has_atomic_add_i4 = TRUE;
6189 #if SIZEOF_REGISTER == 8
6190 else if (fsig->params [0]->type == MONO_TYPE_I8)
6191 opcode = OP_ATOMIC_ADD_I8;
6194 if (!mono_arch_opcode_supported (opcode))
6196 MONO_INST_NEW (cfg, ins, opcode);
6197 ins->dreg = mono_alloc_ireg (cfg);
6198 ins->inst_basereg = args [0]->dreg;
6199 ins->inst_offset = 0;
6200 ins->sreg2 = args [1]->dreg;
6201 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6202 MONO_ADD_INS (cfg->cbb, ins);
6205 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6206 MonoInst *f2i = NULL, *i2f;
6207 guint32 opcode, f2i_opcode, i2f_opcode;
6208 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6209 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6211 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6212 fsig->params [0]->type == MONO_TYPE_R4) {
6213 opcode = OP_ATOMIC_EXCHANGE_I4;
6214 f2i_opcode = OP_MOVE_F_TO_I4;
6215 i2f_opcode = OP_MOVE_I4_TO_F;
6216 cfg->has_atomic_exchange_i4 = TRUE;
6218 #if SIZEOF_REGISTER == 8
6220 fsig->params [0]->type == MONO_TYPE_I8 ||
6221 fsig->params [0]->type == MONO_TYPE_R8 ||
6222 fsig->params [0]->type == MONO_TYPE_I) {
6223 opcode = OP_ATOMIC_EXCHANGE_I8;
6224 f2i_opcode = OP_MOVE_F_TO_I8;
6225 i2f_opcode = OP_MOVE_I8_TO_F;
6228 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6229 opcode = OP_ATOMIC_EXCHANGE_I4;
6230 cfg->has_atomic_exchange_i4 = TRUE;
6236 if (!mono_arch_opcode_supported (opcode))
6240 /* TODO: Decompose these opcodes instead of bailing here. */
6241 if (COMPILE_SOFT_FLOAT (cfg))
6244 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6245 f2i->dreg = mono_alloc_ireg (cfg);
6246 f2i->sreg1 = args [1]->dreg;
6247 if (f2i_opcode == OP_MOVE_F_TO_I4)
6248 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6249 MONO_ADD_INS (cfg->cbb, f2i);
6252 MONO_INST_NEW (cfg, ins, opcode);
6253 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6254 ins->inst_basereg = args [0]->dreg;
6255 ins->inst_offset = 0;
6256 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6257 MONO_ADD_INS (cfg->cbb, ins);
6259 switch (fsig->params [0]->type) {
6261 ins->type = STACK_I4;
6264 ins->type = STACK_I8;
6267 #if SIZEOF_REGISTER == 8
6268 ins->type = STACK_I8;
6270 ins->type = STACK_I4;
6275 ins->type = STACK_R8;
6278 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6279 ins->type = STACK_OBJ;
6284 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6285 i2f->dreg = mono_alloc_freg (cfg);
6286 i2f->sreg1 = ins->dreg;
6287 i2f->type = STACK_R8;
6288 if (i2f_opcode == OP_MOVE_I4_TO_F)
6289 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6290 MONO_ADD_INS (cfg->cbb, i2f);
6295 if (cfg->gen_write_barriers && is_ref)
6296 emit_write_barrier (cfg, args [0], args [1]);
6298 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6299 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6300 guint32 opcode, f2i_opcode, i2f_opcode;
6301 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6302 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6304 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6305 fsig->params [1]->type == MONO_TYPE_R4) {
6306 opcode = OP_ATOMIC_CAS_I4;
6307 f2i_opcode = OP_MOVE_F_TO_I4;
6308 i2f_opcode = OP_MOVE_I4_TO_F;
6309 cfg->has_atomic_cas_i4 = TRUE;
6311 #if SIZEOF_REGISTER == 8
6313 fsig->params [1]->type == MONO_TYPE_I8 ||
6314 fsig->params [1]->type == MONO_TYPE_R8 ||
6315 fsig->params [1]->type == MONO_TYPE_I) {
6316 opcode = OP_ATOMIC_CAS_I8;
6317 f2i_opcode = OP_MOVE_F_TO_I8;
6318 i2f_opcode = OP_MOVE_I8_TO_F;
6321 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6322 opcode = OP_ATOMIC_CAS_I4;
6323 cfg->has_atomic_cas_i4 = TRUE;
6329 if (!mono_arch_opcode_supported (opcode))
6333 /* TODO: Decompose these opcodes instead of bailing here. */
6334 if (COMPILE_SOFT_FLOAT (cfg))
6337 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6338 f2i_new->dreg = mono_alloc_ireg (cfg);
6339 f2i_new->sreg1 = args [1]->dreg;
6340 if (f2i_opcode == OP_MOVE_F_TO_I4)
6341 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6342 MONO_ADD_INS (cfg->cbb, f2i_new);
6344 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6345 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6346 f2i_cmp->sreg1 = args [2]->dreg;
6347 if (f2i_opcode == OP_MOVE_F_TO_I4)
6348 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6349 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6352 MONO_INST_NEW (cfg, ins, opcode);
6353 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6354 ins->sreg1 = args [0]->dreg;
6355 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6356 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6357 MONO_ADD_INS (cfg->cbb, ins);
6359 switch (fsig->params [1]->type) {
6361 ins->type = STACK_I4;
6364 ins->type = STACK_I8;
6367 #if SIZEOF_REGISTER == 8
6368 ins->type = STACK_I8;
6370 ins->type = STACK_I4;
6375 ins->type = STACK_R8;
6378 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6379 ins->type = STACK_OBJ;
6384 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6385 i2f->dreg = mono_alloc_freg (cfg);
6386 i2f->sreg1 = ins->dreg;
6387 i2f->type = STACK_R8;
6388 if (i2f_opcode == OP_MOVE_I4_TO_F)
6389 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6390 MONO_ADD_INS (cfg->cbb, i2f);
6395 if (cfg->gen_write_barriers && is_ref)
6396 emit_write_barrier (cfg, args [0], args [1]);
6398 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6399 fsig->params [1]->type == MONO_TYPE_I4) {
6400 MonoInst *cmp, *ceq;
6402 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6405 /* int32 r = CAS (location, value, comparand); */
6406 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6407 ins->dreg = alloc_ireg (cfg);
6408 ins->sreg1 = args [0]->dreg;
6409 ins->sreg2 = args [1]->dreg;
6410 ins->sreg3 = args [2]->dreg;
6411 ins->type = STACK_I4;
6412 MONO_ADD_INS (cfg->cbb, ins);
6414 /* bool result = r == comparand; */
6415 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6416 cmp->sreg1 = ins->dreg;
6417 cmp->sreg2 = args [2]->dreg;
6418 cmp->type = STACK_I4;
6419 MONO_ADD_INS (cfg->cbb, cmp);
6421 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6422 ceq->dreg = alloc_ireg (cfg);
6423 ceq->type = STACK_I4;
6424 MONO_ADD_INS (cfg->cbb, ceq);
6426 /* *success = result; */
6427 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6429 cfg->has_atomic_cas_i4 = TRUE;
6431 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6432 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6436 } else if (cmethod->klass->image == mono_defaults.corlib &&
6437 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6438 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6441 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6443 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6444 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6446 if (fsig->params [0]->type == MONO_TYPE_I1)
6447 opcode = OP_ATOMIC_LOAD_I1;
6448 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6449 opcode = OP_ATOMIC_LOAD_U1;
6450 else if (fsig->params [0]->type == MONO_TYPE_I2)
6451 opcode = OP_ATOMIC_LOAD_I2;
6452 else if (fsig->params [0]->type == MONO_TYPE_U2)
6453 opcode = OP_ATOMIC_LOAD_U2;
6454 else if (fsig->params [0]->type == MONO_TYPE_I4)
6455 opcode = OP_ATOMIC_LOAD_I4;
6456 else if (fsig->params [0]->type == MONO_TYPE_U4)
6457 opcode = OP_ATOMIC_LOAD_U4;
6458 else if (fsig->params [0]->type == MONO_TYPE_R4)
6459 opcode = OP_ATOMIC_LOAD_R4;
6460 else if (fsig->params [0]->type == MONO_TYPE_R8)
6461 opcode = OP_ATOMIC_LOAD_R8;
6462 #if SIZEOF_REGISTER == 8
6463 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6464 opcode = OP_ATOMIC_LOAD_I8;
6465 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6466 opcode = OP_ATOMIC_LOAD_U8;
6468 else if (fsig->params [0]->type == MONO_TYPE_I)
6469 opcode = OP_ATOMIC_LOAD_I4;
6470 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6471 opcode = OP_ATOMIC_LOAD_U4;
6475 if (!mono_arch_opcode_supported (opcode))
6478 MONO_INST_NEW (cfg, ins, opcode);
6479 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6480 ins->sreg1 = args [0]->dreg;
6481 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6482 MONO_ADD_INS (cfg->cbb, ins);
6484 switch (fsig->params [0]->type) {
6485 case MONO_TYPE_BOOLEAN:
6492 ins->type = STACK_I4;
6496 ins->type = STACK_I8;
6500 #if SIZEOF_REGISTER == 8
6501 ins->type = STACK_I8;
6503 ins->type = STACK_I4;
6508 ins->type = STACK_R8;
6511 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6512 ins->type = STACK_OBJ;
6518 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6520 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6522 if (fsig->params [0]->type == MONO_TYPE_I1)
6523 opcode = OP_ATOMIC_STORE_I1;
6524 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6525 opcode = OP_ATOMIC_STORE_U1;
6526 else if (fsig->params [0]->type == MONO_TYPE_I2)
6527 opcode = OP_ATOMIC_STORE_I2;
6528 else if (fsig->params [0]->type == MONO_TYPE_U2)
6529 opcode = OP_ATOMIC_STORE_U2;
6530 else if (fsig->params [0]->type == MONO_TYPE_I4)
6531 opcode = OP_ATOMIC_STORE_I4;
6532 else if (fsig->params [0]->type == MONO_TYPE_U4)
6533 opcode = OP_ATOMIC_STORE_U4;
6534 else if (fsig->params [0]->type == MONO_TYPE_R4)
6535 opcode = OP_ATOMIC_STORE_R4;
6536 else if (fsig->params [0]->type == MONO_TYPE_R8)
6537 opcode = OP_ATOMIC_STORE_R8;
6538 #if SIZEOF_REGISTER == 8
6539 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6540 opcode = OP_ATOMIC_STORE_I8;
6541 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6542 opcode = OP_ATOMIC_STORE_U8;
6544 else if (fsig->params [0]->type == MONO_TYPE_I)
6545 opcode = OP_ATOMIC_STORE_I4;
6546 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6547 opcode = OP_ATOMIC_STORE_U4;
6551 if (!mono_arch_opcode_supported (opcode))
6554 MONO_INST_NEW (cfg, ins, opcode);
6555 ins->dreg = args [0]->dreg;
6556 ins->sreg1 = args [1]->dreg;
6557 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6558 MONO_ADD_INS (cfg->cbb, ins);
6560 if (cfg->gen_write_barriers && is_ref)
6561 emit_write_barrier (cfg, args [0], args [1]);
6567 } else if (cmethod->klass->image == mono_defaults.corlib &&
6568 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6569 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6570 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6571 if (should_insert_brekpoint (cfg->method)) {
6572 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6574 MONO_INST_NEW (cfg, ins, OP_NOP);
6575 MONO_ADD_INS (cfg->cbb, ins);
6579 } else if (cmethod->klass->image == mono_defaults.corlib &&
6580 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6581 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6582 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6584 EMIT_NEW_ICONST (cfg, ins, 1);
6586 EMIT_NEW_ICONST (cfg, ins, 0);
6589 } else if (cmethod->klass == mono_defaults.math_class) {
6591 * There is general branchless code for Min/Max, but it does not work for
6593 * http://everything2.com/?node_id=1051618
6595 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6596 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6597 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6598 !strcmp (cmethod->klass->name, "Selector")) ||
6599 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6600 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6601 !strcmp (cmethod->klass->name, "Selector"))
6603 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6604 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6605 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6608 MonoJumpInfoToken *ji;
6611 cfg->disable_llvm = TRUE;
6613 if (args [0]->opcode == OP_GOT_ENTRY) {
6614 pi = args [0]->inst_p1;
6615 g_assert (pi->opcode == OP_PATCH_INFO);
6616 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6619 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6620 ji = args [0]->inst_p0;
6623 NULLIFY_INS (args [0]);
6626 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6627 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6628 ins->dreg = mono_alloc_ireg (cfg);
6630 ins->inst_p0 = mono_string_to_utf8 (s);
6631 MONO_ADD_INS (cfg->cbb, ins);
6637 #ifdef MONO_ARCH_SIMD_INTRINSICS
6638 if (cfg->opt & MONO_OPT_SIMD) {
6639 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6645 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6649 if (COMPILE_LLVM (cfg)) {
6650 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6655 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6659 * This entry point could be used later for arbitrary method
6662 inline static MonoInst*
6663 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6664 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6666 if (method->klass == mono_defaults.string_class) {
6667 /* managed string allocation support */
6668 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6669 MonoInst *iargs [2];
6670 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6671 MonoMethod *managed_alloc = NULL;
6673 g_assert (vtable); /*Should not fail since it System.String*/
6674 #ifndef MONO_CROSS_COMPILE
6675 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6679 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6680 iargs [1] = args [0];
6681 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6688 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6690 MonoInst *store, *temp;
6693 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6694 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6697 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6698 * would be different than the MonoInst's used to represent arguments, and
6699 * the ldelema implementation can't deal with that.
6700 * Solution: When ldelema is used on an inline argument, create a var for
6701 * it, emit ldelema on that var, and emit the saving code below in
6702 * inline_method () if needed.
6704 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6705 cfg->args [i] = temp;
6706 /* This uses cfg->args [i] which is set by the preceeding line */
6707 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6708 store->cil_code = sp [0]->cil_code;
6713 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6714 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6716 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6718 check_inline_called_method_name_limit (MonoMethod *called_method)
6721 static const char *limit = NULL;
6723 if (limit == NULL) {
6724 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6726 if (limit_string != NULL)
6727 limit = limit_string;
6732 if (limit [0] != '\0') {
6733 char *called_method_name = mono_method_full_name (called_method, TRUE);
6735 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6736 g_free (called_method_name);
6738 //return (strncmp_result <= 0);
6739 return (strncmp_result == 0);
6746 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6748 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6751 static const char *limit = NULL;
6753 if (limit == NULL) {
6754 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6755 if (limit_string != NULL) {
6756 limit = limit_string;
6762 if (limit [0] != '\0') {
6763 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6765 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6766 g_free (caller_method_name);
6768 //return (strncmp_result <= 0);
6769 return (strncmp_result == 0);
6777 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6779 static double r8_0 = 0.0;
6780 static float r4_0 = 0.0;
6784 rtype = mini_get_underlying_type (cfg, rtype);
6788 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6789 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6790 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6791 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6792 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6793 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6794 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6795 ins->type = STACK_R4;
6796 ins->inst_p0 = (void*)&r4_0;
6798 MONO_ADD_INS (cfg->cbb, ins);
6799 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6800 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6801 ins->type = STACK_R8;
6802 ins->inst_p0 = (void*)&r8_0;
6804 MONO_ADD_INS (cfg->cbb, ins);
6805 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6806 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6807 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6808 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6809 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6811 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6816 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6820 rtype = mini_get_underlying_type (cfg, rtype);
6824 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6825 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6826 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6827 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6828 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6829 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6830 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6831 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6832 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6833 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6834 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6835 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6836 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6837 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6839 emit_init_rvar (cfg, dreg, rtype);
6843 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6845 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6847 MonoInst *var = cfg->locals [local];
6848 if (COMPILE_SOFT_FLOAT (cfg)) {
6850 int reg = alloc_dreg (cfg, var->type);
6851 emit_init_rvar (cfg, reg, type);
6852 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6855 emit_init_rvar (cfg, var->dreg, type);
6857 emit_dummy_init_rvar (cfg, var->dreg, type);
6864 * Return the cost of inlining CMETHOD.
6867 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6868 guchar *ip, guint real_offset, gboolean inline_always)
6870 MonoInst *ins, *rvar = NULL;
6871 MonoMethodHeader *cheader;
6872 MonoBasicBlock *ebblock, *sbblock;
6874 MonoMethod *prev_inlined_method;
6875 MonoInst **prev_locals, **prev_args;
6876 MonoType **prev_arg_types;
6877 guint prev_real_offset;
6878 GHashTable *prev_cbb_hash;
6879 MonoBasicBlock **prev_cil_offset_to_bb;
6880 MonoBasicBlock *prev_cbb;
6881 unsigned char* prev_cil_start;
6882 guint32 prev_cil_offset_to_bb_len;
6883 MonoMethod *prev_current_method;
6884 MonoGenericContext *prev_generic_context;
6885 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6887 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6889 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6890 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6893 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6894 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6899 fsig = mono_method_signature (cmethod);
6901 if (cfg->verbose_level > 2)
6902 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6904 if (!cmethod->inline_info) {
6905 cfg->stat_inlineable_methods++;
6906 cmethod->inline_info = 1;
6909 /* allocate local variables */
6910 cheader = mono_method_get_header (cmethod);
6912 if (cheader == NULL || mono_loader_get_last_error ()) {
6913 MonoLoaderError *error = mono_loader_get_last_error ();
6916 mono_metadata_free_mh (cheader);
6917 if (inline_always && error)
6918 mono_cfg_set_exception (cfg, error->exception_type);
6920 mono_loader_clear_error ();
6924 /*Must verify before creating locals as it can cause the JIT to assert.*/
6925 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6926 mono_metadata_free_mh (cheader);
6930 /* allocate space to store the return value */
6931 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6932 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6935 prev_locals = cfg->locals;
6936 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6937 for (i = 0; i < cheader->num_locals; ++i)
6938 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6940 /* allocate start and end blocks */
6941 /* This is needed so if the inline is aborted, we can clean up */
6942 NEW_BBLOCK (cfg, sbblock);
6943 sbblock->real_offset = real_offset;
6945 NEW_BBLOCK (cfg, ebblock);
6946 ebblock->block_num = cfg->num_bblocks++;
6947 ebblock->real_offset = real_offset;
6949 prev_args = cfg->args;
6950 prev_arg_types = cfg->arg_types;
6951 prev_inlined_method = cfg->inlined_method;
6952 cfg->inlined_method = cmethod;
6953 cfg->ret_var_set = FALSE;
6954 cfg->inline_depth ++;
6955 prev_real_offset = cfg->real_offset;
6956 prev_cbb_hash = cfg->cbb_hash;
6957 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6958 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6959 prev_cil_start = cfg->cil_start;
6960 prev_cbb = cfg->cbb;
6961 prev_current_method = cfg->current_method;
6962 prev_generic_context = cfg->generic_context;
6963 prev_ret_var_set = cfg->ret_var_set;
6964 prev_disable_inline = cfg->disable_inline;
6966 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6969 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6971 ret_var_set = cfg->ret_var_set;
6973 cfg->inlined_method = prev_inlined_method;
6974 cfg->real_offset = prev_real_offset;
6975 cfg->cbb_hash = prev_cbb_hash;
6976 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6977 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6978 cfg->cil_start = prev_cil_start;
6979 cfg->locals = prev_locals;
6980 cfg->args = prev_args;
6981 cfg->arg_types = prev_arg_types;
6982 cfg->current_method = prev_current_method;
6983 cfg->generic_context = prev_generic_context;
6984 cfg->ret_var_set = prev_ret_var_set;
6985 cfg->disable_inline = prev_disable_inline;
6986 cfg->inline_depth --;
6988 if ((costs >= 0 && costs < 60) || inline_always) {
6989 if (cfg->verbose_level > 2)
6990 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6992 cfg->stat_inlined_methods++;
6994 /* always add some code to avoid block split failures */
6995 MONO_INST_NEW (cfg, ins, OP_NOP);
6996 MONO_ADD_INS (prev_cbb, ins);
6998 prev_cbb->next_bb = sbblock;
6999 link_bblock (cfg, prev_cbb, sbblock);
7002 * Get rid of the begin and end bblocks if possible to aid local
7005 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7007 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7008 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7010 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7011 MonoBasicBlock *prev = ebblock->in_bb [0];
7012 mono_merge_basic_blocks (cfg, prev, ebblock);
7014 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7015 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7016 cfg->cbb = prev_cbb;
7020 * Its possible that the rvar is set in some prev bblock, but not in others.
7026 for (i = 0; i < ebblock->in_count; ++i) {
7027 bb = ebblock->in_bb [i];
7029 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7032 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7042 * If the inlined method contains only a throw, then the ret var is not
7043 * set, so set it to a dummy value.
7046 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7048 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7051 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7054 if (cfg->verbose_level > 2)
7055 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7056 cfg->exception_type = MONO_EXCEPTION_NONE;
7057 mono_loader_clear_error ();
7059 /* This gets rid of the newly added bblocks */
7060 cfg->cbb = prev_cbb;
7062 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7067 * Some of these comments may well be out-of-date.
7068 * Design decisions: we do a single pass over the IL code (and we do bblock
7069 * splitting/merging in the few cases when it's required: a back jump to an IL
7070 * address that was not already seen as bblock starting point).
7071 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7072 * Complex operations are decomposed in simpler ones right away. We need to let the
7073 * arch-specific code peek and poke inside this process somehow (except when the
7074 * optimizations can take advantage of the full semantic info of coarse opcodes).
7075 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7076 * MonoInst->opcode initially is the IL opcode or some simplification of that
7077 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7078 * opcode with value bigger than OP_LAST.
7079 * At this point the IR can be handed over to an interpreter, a dumb code generator
7080 * or to the optimizing code generator that will translate it to SSA form.
7082 * Profiling directed optimizations.
7083 * We may compile by default with few or no optimizations and instrument the code
7084 * or the user may indicate what methods to optimize the most either in a config file
7085 * or through repeated runs where the compiler applies offline the optimizations to
7086 * each method and then decides if it was worth it.
7089 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7090 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7091 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7092 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7093 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7094 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7095 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7096 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7098 /* offset from br.s -> br like opcodes */
7099 #define BIG_BRANCH_OFFSET 13
7102 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7104 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7106 return b == NULL || b == bb;
7110 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7112 unsigned char *ip = start;
7113 unsigned char *target;
7116 MonoBasicBlock *bblock;
7117 const MonoOpcode *opcode;
7120 cli_addr = ip - start;
7121 i = mono_opcode_value ((const guint8 **)&ip, end);
7124 opcode = &mono_opcodes [i];
7125 switch (opcode->argument) {
7126 case MonoInlineNone:
7129 case MonoInlineString:
7130 case MonoInlineType:
7131 case MonoInlineField:
7132 case MonoInlineMethod:
7135 case MonoShortInlineR:
7142 case MonoShortInlineVar:
7143 case MonoShortInlineI:
7146 case MonoShortInlineBrTarget:
7147 target = start + cli_addr + 2 + (signed char)ip [1];
7148 GET_BBLOCK (cfg, bblock, target);
7151 GET_BBLOCK (cfg, bblock, ip);
7153 case MonoInlineBrTarget:
7154 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7155 GET_BBLOCK (cfg, bblock, target);
7158 GET_BBLOCK (cfg, bblock, ip);
7160 case MonoInlineSwitch: {
7161 guint32 n = read32 (ip + 1);
7164 cli_addr += 5 + 4 * n;
7165 target = start + cli_addr;
7166 GET_BBLOCK (cfg, bblock, target);
7168 for (j = 0; j < n; ++j) {
7169 target = start + cli_addr + (gint32)read32 (ip);
7170 GET_BBLOCK (cfg, bblock, target);
7180 g_assert_not_reached ();
7183 if (i == CEE_THROW) {
7184 unsigned char *bb_start = ip - 1;
7186 /* Find the start of the bblock containing the throw */
7188 while ((bb_start >= start) && !bblock) {
7189 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7193 bblock->out_of_line = 1;
7203 static inline MonoMethod *
7204 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7208 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7209 method = mono_method_get_wrapper_data (m, token);
7212 method = mono_class_inflate_generic_method_checked (method, context, &error);
7213 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7216 method = mono_get_method_full (m->klass->image, token, klass, context);
7222 static inline MonoMethod *
7223 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7225 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7227 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7233 static inline MonoClass*
7234 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7239 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7240 klass = mono_method_get_wrapper_data (method, token);
7242 klass = mono_class_inflate_generic_class (klass, context);
7244 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7245 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7248 mono_class_init (klass);
7252 static inline MonoMethodSignature*
7253 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7255 MonoMethodSignature *fsig;
7257 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7260 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7262 fsig = mono_inflate_generic_signature (fsig, context, &error);
7264 g_assert (mono_error_ok (&error));
7267 fsig = mono_metadata_parse_signature (method->klass->image, token);
7273 throw_exception (void)
7275 static MonoMethod *method = NULL;
7278 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7279 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7286 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7288 MonoMethod *thrower = throw_exception ();
7291 EMIT_NEW_PCONST (cfg, args [0], ex);
7292 mono_emit_method_call (cfg, thrower, args, NULL);
7296 * Return the original method is a wrapper is specified. We can only access
7297 * the custom attributes from the original method.
7300 get_original_method (MonoMethod *method)
7302 if (method->wrapper_type == MONO_WRAPPER_NONE)
7305 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7306 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7309 /* in other cases we need to find the original method */
7310 return mono_marshal_method_from_wrapper (method);
7314 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7316 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7317 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7319 emit_throw_exception (cfg, ex);
7323 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7325 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7326 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7328 emit_throw_exception (cfg, ex);
7332 * Check that the IL instructions at ip are the array initialization
7333 * sequence and return the pointer to the data and the size.
7336 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7339 * newarr[System.Int32]
7341 * ldtoken field valuetype ...
7342 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7344 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7346 guint32 token = read32 (ip + 7);
7347 guint32 field_token = read32 (ip + 2);
7348 guint32 field_index = field_token & 0xffffff;
7350 const char *data_ptr;
7352 MonoMethod *cmethod;
7353 MonoClass *dummy_class;
7354 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7358 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7362 *out_field_token = field_token;
7364 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7367 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7369 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7370 case MONO_TYPE_BOOLEAN:
7374 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7375 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7376 case MONO_TYPE_CHAR:
7393 if (size > mono_type_size (field->type, &dummy_align))
7396 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7397 if (!image_is_dynamic (method->klass->image)) {
7398 field_index = read32 (ip + 2) & 0xffffff;
7399 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7400 data_ptr = mono_image_rva_map (method->klass->image, rva);
7401 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7402 /* for aot code we do the lookup on load */
7403 if (aot && data_ptr)
7404 return GUINT_TO_POINTER (rva);
7406 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7408 data_ptr = mono_field_get_data (field);
7416 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7418 char *method_fname = mono_method_full_name (method, TRUE);
7420 MonoMethodHeader *header = mono_method_get_header (method);
7422 if (header->code_size == 0)
7423 method_code = g_strdup ("method body is empty.");
7425 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7426 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7427 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7428 g_free (method_fname);
7429 g_free (method_code);
7430 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7434 set_exception_object (MonoCompile *cfg, MonoException *exception)
7436 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7437 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7438 cfg->exception_ptr = exception;
7442 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7445 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7446 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7447 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7448 /* Optimize reg-reg moves away */
7450 * Can't optimize other opcodes, since sp[0] might point to
7451 * the last ins of a decomposed opcode.
7453 sp [0]->dreg = (cfg)->locals [n]->dreg;
7455 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7460 * ldloca inhibits many optimizations so try to get rid of it in common
7463 static inline unsigned char *
7464 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7474 local = read16 (ip + 2);
7478 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7479 /* From the INITOBJ case */
7480 token = read32 (ip + 2);
7481 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7482 CHECK_TYPELOAD (klass);
7483 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7484 emit_init_local (cfg, local, type, TRUE);
7492 is_exception_class (MonoClass *class)
7495 if (class == mono_defaults.exception_class)
7497 class = class->parent;
7503 * is_jit_optimizer_disabled:
7505 * Determine whenever M's assembly has a DebuggableAttribute with the
7506 * IsJITOptimizerDisabled flag set.
7509 is_jit_optimizer_disabled (MonoMethod *m)
7511 MonoAssembly *ass = m->klass->image->assembly;
7512 MonoCustomAttrInfo* attrs;
7513 static MonoClass *klass;
7515 gboolean val = FALSE;
7518 if (ass->jit_optimizer_disabled_inited)
7519 return ass->jit_optimizer_disabled;
7522 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7525 ass->jit_optimizer_disabled = FALSE;
7526 mono_memory_barrier ();
7527 ass->jit_optimizer_disabled_inited = TRUE;
7531 attrs = mono_custom_attrs_from_assembly (ass);
7533 for (i = 0; i < attrs->num_attrs; ++i) {
7534 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7536 MonoMethodSignature *sig;
7538 if (!attr->ctor || attr->ctor->klass != klass)
7540 /* Decode the attribute. See reflection.c */
7541 p = (const char*)attr->data;
7542 g_assert (read16 (p) == 0x0001);
7545 // FIXME: Support named parameters
7546 sig = mono_method_signature (attr->ctor);
7547 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7549 /* Two boolean arguments */
7553 mono_custom_attrs_free (attrs);
7556 ass->jit_optimizer_disabled = val;
7557 mono_memory_barrier ();
7558 ass->jit_optimizer_disabled_inited = TRUE;
7564 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7566 gboolean supported_tail_call;
7569 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7570 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7572 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7575 for (i = 0; i < fsig->param_count; ++i) {
7576 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7577 /* These can point to the current method's stack */
7578 supported_tail_call = FALSE;
7580 if (fsig->hasthis && cmethod->klass->valuetype)
7581 /* this might point to the current method's stack */
7582 supported_tail_call = FALSE;
7583 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7584 supported_tail_call = FALSE;
7585 if (cfg->method->save_lmf)
7586 supported_tail_call = FALSE;
7587 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7588 supported_tail_call = FALSE;
7589 if (call_opcode != CEE_CALL)
7590 supported_tail_call = FALSE;
7592 /* Debugging support */
7594 if (supported_tail_call) {
7595 if (!mono_debug_count ())
7596 supported_tail_call = FALSE;
7600 return supported_tail_call;
7603 /* emits the code needed to access a managed tls var (like ThreadStatic)
7604 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7605 * pointer for the current thread.
7606 * Returns the MonoInst* representing the address of the tls var.
7609 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7612 int static_data_reg, array_reg, dreg;
7613 int offset2_reg, idx_reg;
7614 // inlined access to the tls data (see threads.c)
7615 static_data_reg = alloc_ireg (cfg);
7616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7617 idx_reg = alloc_ireg (cfg);
7618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7620 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7621 array_reg = alloc_ireg (cfg);
7622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7623 offset2_reg = alloc_ireg (cfg);
7624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7626 dreg = alloc_ireg (cfg);
7627 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7634 * Handle calls made to ctors from NEWOBJ opcodes.
7637 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7638 MonoInst **sp, guint8 *ip, int *inline_costs)
7640 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7642 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7643 mono_method_is_generic_sharable (cmethod, TRUE)) {
7644 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7645 mono_class_vtable (cfg->domain, cmethod->klass);
7646 CHECK_TYPELOAD (cmethod->klass);
7648 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7649 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7652 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7653 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7655 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7657 CHECK_TYPELOAD (cmethod->klass);
7658 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7663 /* Avoid virtual calls to ctors if possible */
7664 if (mono_class_is_marshalbyref (cmethod->klass))
7665 callvirt_this_arg = sp [0];
7667 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7668 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7669 CHECK_CFG_EXCEPTION;
7670 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7671 mono_method_check_inlining (cfg, cmethod) &&
7672 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7675 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7676 cfg->real_offset += 5;
7678 *inline_costs += costs - 5;
7680 INLINE_FAILURE ("inline failure");
7681 // FIXME-VT: Clean this up
7682 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7683 GSHAREDVT_FAILURE(*ip);
7684 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7686 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7689 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7690 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7691 } else if (context_used &&
7692 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7693 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7694 MonoInst *cmethod_addr;
7696 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7698 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7699 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7701 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7703 INLINE_FAILURE ("ctor call");
7704 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7705 callvirt_this_arg, NULL, vtable_arg);
7712 * mono_method_to_ir:
7714 * Translate the .net IL into linear IR.
7717 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7718 MonoInst *return_var, MonoInst **inline_args,
7719 guint inline_offset, gboolean is_virtual_call)
7722 MonoInst *ins, **sp, **stack_start;
7723 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7724 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7725 MonoMethod *cmethod, *method_definition;
7726 MonoInst **arg_array;
7727 MonoMethodHeader *header;
7729 guint32 token, ins_flag;
7731 MonoClass *constrained_class = NULL;
7732 unsigned char *ip, *end, *target, *err_pos;
7733 MonoMethodSignature *sig;
7734 MonoGenericContext *generic_context = NULL;
7735 MonoGenericContainer *generic_container = NULL;
7736 MonoType **param_types;
7737 int i, n, start_new_bblock, dreg;
7738 int num_calls = 0, inline_costs = 0;
7739 int breakpoint_id = 0;
7741 GSList *class_inits = NULL;
7742 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7744 gboolean init_locals, seq_points, skip_dead_blocks;
7745 gboolean sym_seq_points = FALSE;
7746 MonoDebugMethodInfo *minfo;
7747 MonoBitSet *seq_point_locs = NULL;
7748 MonoBitSet *seq_point_set_locs = NULL;
7750 cfg->disable_inline = is_jit_optimizer_disabled (method);
7752 /* serialization and xdomain stuff may need access to private fields and methods */
7753 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7754 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7755 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7756 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7757 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7758 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7760 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7761 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7762 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7763 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7764 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7766 image = method->klass->image;
7767 header = mono_method_get_header (method);
7769 MonoLoaderError *error;
7771 if ((error = mono_loader_get_last_error ())) {
7772 mono_cfg_set_exception (cfg, error->exception_type);
7774 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7775 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7777 goto exception_exit;
7779 generic_container = mono_method_get_generic_container (method);
7780 sig = mono_method_signature (method);
7781 num_args = sig->hasthis + sig->param_count;
7782 ip = (unsigned char*)header->code;
7783 cfg->cil_start = ip;
7784 end = ip + header->code_size;
7785 cfg->stat_cil_code_size += header->code_size;
7787 seq_points = cfg->gen_seq_points && cfg->method == method;
7789 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7790 /* We could hit a seq point before attaching to the JIT (#8338) */
7794 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7795 minfo = mono_debug_lookup_method (method);
7797 MonoSymSeqPoint *sps;
7798 int i, n_il_offsets;
7800 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7801 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7802 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7803 sym_seq_points = TRUE;
7804 for (i = 0; i < n_il_offsets; ++i) {
7805 if (sps [i].il_offset < header->code_size)
7806 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7809 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7810 /* Methods without line number info like auto-generated property accessors */
7811 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7812 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7813 sym_seq_points = TRUE;
7818 * Methods without init_locals set could cause asserts in various passes
7819 * (#497220). To work around this, we emit dummy initialization opcodes
7820 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7821 * on some platforms.
7823 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7824 init_locals = header->init_locals;
7828 method_definition = method;
7829 while (method_definition->is_inflated) {
7830 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7831 method_definition = imethod->declaring;
7834 /* SkipVerification is not allowed if core-clr is enabled */
7835 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7837 dont_verify_stloc = TRUE;
7840 if (sig->is_inflated)
7841 generic_context = mono_method_get_context (method);
7842 else if (generic_container)
7843 generic_context = &generic_container->context;
7844 cfg->generic_context = generic_context;
7846 if (!cfg->generic_sharing_context)
7847 g_assert (!sig->has_type_parameters);
7849 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7850 g_assert (method->is_inflated);
7851 g_assert (mono_method_get_context (method)->method_inst);
7853 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7854 g_assert (sig->generic_param_count);
7856 if (cfg->method == method) {
7857 cfg->real_offset = 0;
7859 cfg->real_offset = inline_offset;
7862 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7863 cfg->cil_offset_to_bb_len = header->code_size;
7865 cfg->current_method = method;
7867 if (cfg->verbose_level > 2)
7868 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7870 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7872 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7873 for (n = 0; n < sig->param_count; ++n)
7874 param_types [n + sig->hasthis] = sig->params [n];
7875 cfg->arg_types = param_types;
7877 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7878 if (cfg->method == method) {
7880 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7881 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7884 NEW_BBLOCK (cfg, start_bblock);
7885 cfg->bb_entry = start_bblock;
7886 start_bblock->cil_code = NULL;
7887 start_bblock->cil_length = 0;
7890 NEW_BBLOCK (cfg, end_bblock);
7891 cfg->bb_exit = end_bblock;
7892 end_bblock->cil_code = NULL;
7893 end_bblock->cil_length = 0;
7894 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7895 g_assert (cfg->num_bblocks == 2);
7897 arg_array = cfg->args;
7899 if (header->num_clauses) {
7900 cfg->spvars = g_hash_table_new (NULL, NULL);
7901 cfg->exvars = g_hash_table_new (NULL, NULL);
7903 /* handle exception clauses */
7904 for (i = 0; i < header->num_clauses; ++i) {
7905 MonoBasicBlock *try_bb;
7906 MonoExceptionClause *clause = &header->clauses [i];
7907 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7908 try_bb->real_offset = clause->try_offset;
7909 try_bb->try_start = TRUE;
7910 try_bb->region = ((i + 1) << 8) | clause->flags;
7911 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7912 tblock->real_offset = clause->handler_offset;
7913 tblock->flags |= BB_EXCEPTION_HANDLER;
7916 * Linking the try block with the EH block hinders inlining as we won't be able to
7917 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7919 if (COMPILE_LLVM (cfg))
7920 link_bblock (cfg, try_bb, tblock);
7922 if (*(ip + clause->handler_offset) == CEE_POP)
7923 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7925 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7926 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7927 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7928 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7929 MONO_ADD_INS (tblock, ins);
7931 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7932 /* finally clauses already have a seq point */
7933 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7934 MONO_ADD_INS (tblock, ins);
7937 /* todo: is a fault block unsafe to optimize? */
7938 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7939 tblock->flags |= BB_EXCEPTION_UNSAFE;
7942 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7944 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7946 /* catch and filter blocks get the exception object on the stack */
7947 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7948 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7950 /* mostly like handle_stack_args (), but just sets the input args */
7951 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7952 tblock->in_scount = 1;
7953 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7954 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7958 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7959 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7960 if (!cfg->compile_llvm) {
7961 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7962 ins->dreg = tblock->in_stack [0]->dreg;
7963 MONO_ADD_INS (tblock, ins);
7966 MonoInst *dummy_use;
7969 * Add a dummy use for the exvar so its liveness info will be
7972 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7975 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7976 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7977 tblock->flags |= BB_EXCEPTION_HANDLER;
7978 tblock->real_offset = clause->data.filter_offset;
7979 tblock->in_scount = 1;
7980 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7981 /* The filter block shares the exvar with the handler block */
7982 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7983 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7984 MONO_ADD_INS (tblock, ins);
7988 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7989 clause->data.catch_class &&
7990 cfg->generic_sharing_context &&
7991 mono_class_check_context_used (clause->data.catch_class)) {
7993 * In shared generic code with catch
7994 * clauses containing type variables
7995 * the exception handling code has to
7996 * be able to get to the rgctx.
7997 * Therefore we have to make sure that
7998 * the vtable/mrgctx argument (for
7999 * static or generic methods) or the
8000 * "this" argument (for non-static
8001 * methods) are live.
8003 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8004 mini_method_get_context (method)->method_inst ||
8005 method->klass->valuetype) {
8006 mono_get_vtable_var (cfg);
8008 MonoInst *dummy_use;
8010 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8015 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8016 cfg->cbb = start_bblock;
8017 cfg->args = arg_array;
8018 mono_save_args (cfg, sig, inline_args);
8021 /* FIRST CODE BLOCK */
8022 NEW_BBLOCK (cfg, tblock);
8023 tblock->cil_code = ip;
8027 ADD_BBLOCK (cfg, tblock);
8029 if (cfg->method == method) {
8030 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8031 if (breakpoint_id) {
8032 MONO_INST_NEW (cfg, ins, OP_BREAK);
8033 MONO_ADD_INS (cfg->cbb, ins);
8037 /* we use a separate basic block for the initialization code */
8038 NEW_BBLOCK (cfg, init_localsbb);
8039 cfg->bb_init = init_localsbb;
8040 init_localsbb->real_offset = cfg->real_offset;
8041 start_bblock->next_bb = init_localsbb;
8042 init_localsbb->next_bb = cfg->cbb;
8043 link_bblock (cfg, start_bblock, init_localsbb);
8044 link_bblock (cfg, init_localsbb, cfg->cbb);
8046 cfg->cbb = init_localsbb;
8048 if (cfg->gsharedvt && cfg->method == method) {
8049 MonoGSharedVtMethodInfo *info;
8050 MonoInst *var, *locals_var;
8053 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8054 info->method = cfg->method;
8055 info->count_entries = 16;
8056 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8057 cfg->gsharedvt_info = info;
8059 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8060 /* prevent it from being register allocated */
8061 //var->flags |= MONO_INST_VOLATILE;
8062 cfg->gsharedvt_info_var = var;
8064 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8065 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8067 /* Allocate locals */
8068 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8069 /* prevent it from being register allocated */
8070 //locals_var->flags |= MONO_INST_VOLATILE;
8071 cfg->gsharedvt_locals_var = locals_var;
8073 dreg = alloc_ireg (cfg);
8074 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8076 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8077 ins->dreg = locals_var->dreg;
8079 MONO_ADD_INS (cfg->cbb, ins);
8080 cfg->gsharedvt_locals_var_ins = ins;
8082 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8085 ins->flags |= MONO_INST_INIT;
8089 if (mono_security_core_clr_enabled ()) {
8090 /* check if this is native code, e.g. an icall or a p/invoke */
8091 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8092 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8094 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8095 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8097 /* if this ia a native call then it can only be JITted from platform code */
8098 if ((icall || pinvk) && method->klass && method->klass->image) {
8099 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8100 MonoException *ex = icall ? mono_get_exception_security () :
8101 mono_get_exception_method_access ();
8102 emit_throw_exception (cfg, ex);
8109 CHECK_CFG_EXCEPTION;
8111 if (header->code_size == 0)
8114 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8119 if (cfg->method == method)
8120 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8122 for (n = 0; n < header->num_locals; ++n) {
8123 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8128 /* We force the vtable variable here for all shared methods
8129 for the possibility that they might show up in a stack
8130 trace where their exact instantiation is needed. */
8131 if (cfg->generic_sharing_context && method == cfg->method) {
8132 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8133 mini_method_get_context (method)->method_inst ||
8134 method->klass->valuetype) {
8135 mono_get_vtable_var (cfg);
8137 /* FIXME: Is there a better way to do this?
8138 We need the variable live for the duration
8139 of the whole method. */
8140 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8144 /* add a check for this != NULL to inlined methods */
8145 if (is_virtual_call) {
8148 NEW_ARGLOAD (cfg, arg_ins, 0);
8149 MONO_ADD_INS (cfg->cbb, arg_ins);
8150 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8153 skip_dead_blocks = !dont_verify;
8154 if (skip_dead_blocks) {
8155 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8160 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8161 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8164 start_new_bblock = 0;
8166 if (cfg->method == method)
8167 cfg->real_offset = ip - header->code;
8169 cfg->real_offset = inline_offset;
8174 if (start_new_bblock) {
8175 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8176 if (start_new_bblock == 2) {
8177 g_assert (ip == tblock->cil_code);
8179 GET_BBLOCK (cfg, tblock, ip);
8181 cfg->cbb->next_bb = tblock;
8183 start_new_bblock = 0;
8184 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8185 if (cfg->verbose_level > 3)
8186 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8187 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8191 g_slist_free (class_inits);
8194 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8195 link_bblock (cfg, cfg->cbb, tblock);
8196 if (sp != stack_start) {
8197 handle_stack_args (cfg, stack_start, sp - stack_start);
8199 CHECK_UNVERIFIABLE (cfg);
8201 cfg->cbb->next_bb = tblock;
8203 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8204 if (cfg->verbose_level > 3)
8205 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8206 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8209 g_slist_free (class_inits);
8214 if (skip_dead_blocks) {
8215 int ip_offset = ip - header->code;
8217 if (ip_offset == bb->end)
8221 int op_size = mono_opcode_size (ip, end);
8222 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8224 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8226 if (ip_offset + op_size == bb->end) {
8227 MONO_INST_NEW (cfg, ins, OP_NOP);
8228 MONO_ADD_INS (cfg->cbb, ins);
8229 start_new_bblock = 1;
8237 * Sequence points are points where the debugger can place a breakpoint.
8238 * Currently, we generate these automatically at points where the IL
8241 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8243 * Make methods interruptable at the beginning, and at the targets of
8244 * backward branches.
8245 * Also, do this at the start of every bblock in methods with clauses too,
8246 * to be able to handle instructions with inprecise control flow like
8248 * Backward branches are handled at the end of method-to-ir ().
8250 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8251 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8253 /* Avoid sequence points on empty IL like .volatile */
8254 // FIXME: Enable this
8255 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8256 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8257 if ((sp != stack_start) && !sym_seq_point)
8258 ins->flags |= MONO_INST_NONEMPTY_STACK;
8259 MONO_ADD_INS (cfg->cbb, ins);
8262 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8265 cfg->cbb->real_offset = cfg->real_offset;
8267 if ((cfg->method == method) && cfg->coverage_info) {
8268 guint32 cil_offset = ip - header->code;
8269 cfg->coverage_info->data [cil_offset].cil_code = ip;
8271 /* TODO: Use an increment here */
8272 #if defined(TARGET_X86)
8273 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8274 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8276 MONO_ADD_INS (cfg->cbb, ins);
8278 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8279 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8283 if (cfg->verbose_level > 3)
8284 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8288 if (seq_points && !sym_seq_points && sp != stack_start) {
8290 * The C# compiler uses these nops to notify the JIT that it should
8291 * insert seq points.
8293 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8294 MONO_ADD_INS (cfg->cbb, ins);
8296 if (cfg->keep_cil_nops)
8297 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8299 MONO_INST_NEW (cfg, ins, OP_NOP);
8301 MONO_ADD_INS (cfg->cbb, ins);
8304 if (should_insert_brekpoint (cfg->method)) {
8305 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8307 MONO_INST_NEW (cfg, ins, OP_NOP);
8310 MONO_ADD_INS (cfg->cbb, ins);
8316 CHECK_STACK_OVF (1);
8317 n = (*ip)-CEE_LDARG_0;
8319 EMIT_NEW_ARGLOAD (cfg, ins, n);
8327 CHECK_STACK_OVF (1);
8328 n = (*ip)-CEE_LDLOC_0;
8330 EMIT_NEW_LOCLOAD (cfg, ins, n);
8339 n = (*ip)-CEE_STLOC_0;
8342 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8344 emit_stloc_ir (cfg, sp, header, n);
8351 CHECK_STACK_OVF (1);
8354 EMIT_NEW_ARGLOAD (cfg, ins, n);
8360 CHECK_STACK_OVF (1);
8363 NEW_ARGLOADA (cfg, ins, n);
8364 MONO_ADD_INS (cfg->cbb, ins);
8374 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8376 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8381 CHECK_STACK_OVF (1);
8384 EMIT_NEW_LOCLOAD (cfg, ins, n);
8388 case CEE_LDLOCA_S: {
8389 unsigned char *tmp_ip;
8391 CHECK_STACK_OVF (1);
8392 CHECK_LOCAL (ip [1]);
8394 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8400 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8409 CHECK_LOCAL (ip [1]);
8410 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8412 emit_stloc_ir (cfg, sp, header, ip [1]);
8417 CHECK_STACK_OVF (1);
8418 EMIT_NEW_PCONST (cfg, ins, NULL);
8419 ins->type = STACK_OBJ;
8424 CHECK_STACK_OVF (1);
8425 EMIT_NEW_ICONST (cfg, ins, -1);
8438 CHECK_STACK_OVF (1);
8439 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8445 CHECK_STACK_OVF (1);
8447 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8453 CHECK_STACK_OVF (1);
8454 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8460 CHECK_STACK_OVF (1);
8461 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8462 ins->type = STACK_I8;
8463 ins->dreg = alloc_dreg (cfg, STACK_I8);
8465 ins->inst_l = (gint64)read64 (ip);
8466 MONO_ADD_INS (cfg->cbb, ins);
8472 gboolean use_aotconst = FALSE;
8474 #ifdef TARGET_POWERPC
8475 /* FIXME: Clean this up */
8476 if (cfg->compile_aot)
8477 use_aotconst = TRUE;
8480 /* FIXME: we should really allocate this only late in the compilation process */
8481 f = mono_domain_alloc (cfg->domain, sizeof (float));
8483 CHECK_STACK_OVF (1);
8489 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8491 dreg = alloc_freg (cfg);
8492 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8493 ins->type = cfg->r4_stack_type;
8495 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8496 ins->type = cfg->r4_stack_type;
8497 ins->dreg = alloc_dreg (cfg, STACK_R8);
8499 MONO_ADD_INS (cfg->cbb, ins);
8509 gboolean use_aotconst = FALSE;
8511 #ifdef TARGET_POWERPC
8512 /* FIXME: Clean this up */
8513 if (cfg->compile_aot)
8514 use_aotconst = TRUE;
8517 /* FIXME: we should really allocate this only late in the compilation process */
8518 d = mono_domain_alloc (cfg->domain, sizeof (double));
8520 CHECK_STACK_OVF (1);
8526 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8528 dreg = alloc_freg (cfg);
8529 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8530 ins->type = STACK_R8;
8532 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8533 ins->type = STACK_R8;
8534 ins->dreg = alloc_dreg (cfg, STACK_R8);
8536 MONO_ADD_INS (cfg->cbb, ins);
8545 MonoInst *temp, *store;
8547 CHECK_STACK_OVF (1);
8551 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8552 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8554 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8557 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8570 if (sp [0]->type == STACK_R8)
8571 /* we need to pop the value from the x86 FP stack */
8572 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8578 INLINE_FAILURE ("jmp");
8579 GSHAREDVT_FAILURE (*ip);
8582 if (stack_start != sp)
8584 token = read32 (ip + 1);
8585 /* FIXME: check the signature matches */
8586 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8588 if (!cmethod || mono_loader_get_last_error ())
8591 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8592 GENERIC_SHARING_FAILURE (CEE_JMP);
8594 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8596 if (ARCH_HAVE_OP_TAIL_CALL) {
8597 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8600 /* Handle tail calls similarly to calls */
8601 n = fsig->param_count + fsig->hasthis;
8605 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8606 call->method = cmethod;
8607 call->tail_call = TRUE;
8608 call->signature = mono_method_signature (cmethod);
8609 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8610 call->inst.inst_p0 = cmethod;
8611 for (i = 0; i < n; ++i)
8612 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8614 mono_arch_emit_call (cfg, call);
8615 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8616 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8618 for (i = 0; i < num_args; ++i)
8619 /* Prevent arguments from being optimized away */
8620 arg_array [i]->flags |= MONO_INST_VOLATILE;
8622 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8623 ins = (MonoInst*)call;
8624 ins->inst_p0 = cmethod;
8625 MONO_ADD_INS (cfg->cbb, ins);
8629 start_new_bblock = 1;
8634 MonoMethodSignature *fsig;
8637 token = read32 (ip + 1);
8641 //GSHAREDVT_FAILURE (*ip);
8646 fsig = mini_get_signature (method, token, generic_context);
8648 if (method->dynamic && fsig->pinvoke) {
8652 * This is a call through a function pointer using a pinvoke
8653 * signature. Have to create a wrapper and call that instead.
8654 * FIXME: This is very slow, need to create a wrapper at JIT time
8655 * instead based on the signature.
8657 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8658 EMIT_NEW_PCONST (cfg, args [1], fsig);
8660 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8663 n = fsig->param_count + fsig->hasthis;
8667 //g_assert (!virtual || fsig->hasthis);
8671 inline_costs += 10 * num_calls++;
8674 * Making generic calls out of gsharedvt methods.
8675 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8676 * patching gshared method addresses into a gsharedvt method.
8678 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8680 * We pass the address to the gsharedvt trampoline in the rgctx reg
8682 MonoInst *callee = addr;
8684 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8686 GSHAREDVT_FAILURE (*ip);
8688 addr = emit_get_rgctx_sig (cfg, context_used,
8689 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8690 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8694 /* Prevent inlining of methods with indirect calls */
8695 INLINE_FAILURE ("indirect call");
8697 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8702 * Instead of emitting an indirect call, emit a direct call
8703 * with the contents of the aotconst as the patch info.
8705 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8706 info_type = addr->inst_c1;
8707 info_data = addr->inst_p0;
8709 info_type = addr->inst_right->inst_c1;
8710 info_data = addr->inst_right->inst_left;
8713 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8714 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8719 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8723 /* End of call, INS should contain the result of the call, if any */
8725 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8727 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8730 CHECK_CFG_EXCEPTION;
8734 constrained_class = NULL;
8738 case CEE_CALLVIRT: {
8739 MonoInst *addr = NULL;
8740 MonoMethodSignature *fsig = NULL;
8742 int virtual = *ip == CEE_CALLVIRT;
8743 gboolean pass_imt_from_rgctx = FALSE;
8744 MonoInst *imt_arg = NULL;
8745 MonoInst *keep_this_alive = NULL;
8746 gboolean pass_vtable = FALSE;
8747 gboolean pass_mrgctx = FALSE;
8748 MonoInst *vtable_arg = NULL;
8749 gboolean check_this = FALSE;
8750 gboolean supported_tail_call = FALSE;
8751 gboolean tail_call = FALSE;
8752 gboolean need_seq_point = FALSE;
8753 guint32 call_opcode = *ip;
8754 gboolean emit_widen = TRUE;
8755 gboolean push_res = TRUE;
8756 gboolean skip_ret = FALSE;
8757 gboolean delegate_invoke = FALSE;
8758 gboolean direct_icall = FALSE;
8759 gboolean constrained_partial_call = FALSE;
8760 MonoMethod *cil_method;
8763 token = read32 (ip + 1);
8767 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8768 cil_method = cmethod;
8770 if (constrained_class) {
8771 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8772 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8773 g_assert (!cmethod->klass->valuetype);
8774 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8775 constrained_partial_call = TRUE;
8779 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8780 if (cfg->verbose_level > 2)
8781 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8782 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8783 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8784 cfg->generic_sharing_context)) {
8785 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8789 if (cfg->verbose_level > 2)
8790 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8792 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8794 * This is needed since get_method_constrained can't find
8795 * the method in klass representing a type var.
8796 * The type var is guaranteed to be a reference type in this
8799 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8800 g_assert (!cmethod->klass->valuetype);
8802 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8808 if (!cmethod || mono_loader_get_last_error ())
8810 if (!dont_verify && !cfg->skip_visibility) {
8811 MonoMethod *target_method = cil_method;
8812 if (method->is_inflated) {
8813 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8815 if (!mono_method_can_access_method (method_definition, target_method) &&
8816 !mono_method_can_access_method (method, cil_method))
8817 METHOD_ACCESS_FAILURE (method, cil_method);
8820 if (mono_security_core_clr_enabled ())
8821 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8823 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8824 /* MS.NET seems to silently convert this to a callvirt */
8829 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8830 * converts to a callvirt.
8832 * tests/bug-515884.il is an example of this behavior
8834 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8835 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8836 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8840 if (!cmethod->klass->inited)
8841 if (!mono_class_init (cmethod->klass))
8842 TYPE_LOAD_ERROR (cmethod->klass);
8844 fsig = mono_method_signature (cmethod);
8847 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8848 mini_class_is_system_array (cmethod->klass)) {
8849 array_rank = cmethod->klass->rank;
8850 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8851 direct_icall = TRUE;
8852 } else if (fsig->pinvoke) {
8853 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8854 check_for_pending_exc, cfg->compile_aot);
8855 fsig = mono_method_signature (wrapper);
8856 } else if (constrained_class) {
8858 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8862 mono_save_token_info (cfg, image, token, cil_method);
8864 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8865 need_seq_point = TRUE;
8867 /* Don't support calls made using type arguments for now */
8869 if (cfg->gsharedvt) {
8870 if (mini_is_gsharedvt_signature (cfg, fsig))
8871 GSHAREDVT_FAILURE (*ip);
8875 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8876 g_assert_not_reached ();
8878 n = fsig->param_count + fsig->hasthis;
8880 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8883 if (!cfg->generic_sharing_context)
8884 g_assert (!mono_method_check_context_used (cmethod));
8888 //g_assert (!virtual || fsig->hasthis);
8892 if (constrained_class) {
8893 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8894 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8895 /* The 'Own method' case below */
8896 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8897 /* 'The type parameter is instantiated as a reference type' case below. */
8899 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8900 CHECK_CFG_EXCEPTION;
8907 * We have the `constrained.' prefix opcode.
8909 if (constrained_partial_call) {
8910 gboolean need_box = TRUE;
8913 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8914 * called method is not known at compile time either. The called method could end up being
8915 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8916 * to box the receiver.
8917 * A simple solution would be to box always and make a normal virtual call, but that would
8918 * be bad performance wise.
8920 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8922 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8929 MonoBasicBlock *is_ref_bb, *end_bb;
8930 MonoInst *nonbox_call;
8933 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8935 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8936 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8938 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8940 NEW_BBLOCK (cfg, is_ref_bb);
8941 NEW_BBLOCK (cfg, end_bb);
8943 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8944 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8948 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8953 MONO_START_BB (cfg, is_ref_bb);
8954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8955 ins->klass = constrained_class;
8956 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8957 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8961 MONO_START_BB (cfg, end_bb);
8964 nonbox_call->dreg = ins->dreg;
8966 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8967 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8968 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8971 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8973 * The type parameter is instantiated as a valuetype,
8974 * but that type doesn't override the method we're
8975 * calling, so we need to box `this'.
8977 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8978 ins->klass = constrained_class;
8979 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8980 CHECK_CFG_EXCEPTION;
8981 } else if (!constrained_class->valuetype) {
8982 int dreg = alloc_ireg_ref (cfg);
8985 * The type parameter is instantiated as a reference
8986 * type. We have a managed pointer on the stack, so
8987 * we need to dereference it here.
8989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8990 ins->type = STACK_OBJ;
8993 if (cmethod->klass->valuetype) {
8996 /* Interface method */
8999 mono_class_setup_vtable (constrained_class);
9000 CHECK_TYPELOAD (constrained_class);
9001 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9003 TYPE_LOAD_ERROR (constrained_class);
9004 slot = mono_method_get_vtable_slot (cmethod);
9006 TYPE_LOAD_ERROR (cmethod->klass);
9007 cmethod = constrained_class->vtable [ioffset + slot];
9009 if (cmethod->klass == mono_defaults.enum_class) {
9010 /* Enum implements some interfaces, so treat this as the first case */
9011 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9012 ins->klass = constrained_class;
9013 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9014 CHECK_CFG_EXCEPTION;
9019 constrained_class = NULL;
9022 if (check_call_signature (cfg, fsig, sp))
9025 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9026 delegate_invoke = TRUE;
9028 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9029 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9030 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9038 * If the callee is a shared method, then its static cctor
9039 * might not get called after the call was patched.
9041 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9042 emit_generic_class_init (cfg, cmethod->klass);
9043 CHECK_TYPELOAD (cmethod->klass);
9046 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9048 if (cfg->generic_sharing_context) {
9049 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9051 context_used = mini_method_check_context_used (cfg, cmethod);
9053 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9054 /* Generic method interface
9055 calls are resolved via a
9056 helper function and don't
9058 if (!cmethod_context || !cmethod_context->method_inst)
9059 pass_imt_from_rgctx = TRUE;
9063 * If a shared method calls another
9064 * shared method then the caller must
9065 * have a generic sharing context
9066 * because the magic trampoline
9067 * requires it. FIXME: We shouldn't
9068 * have to force the vtable/mrgctx
9069 * variable here. Instead there
9070 * should be a flag in the cfg to
9071 * request a generic sharing context.
9074 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9075 mono_get_vtable_var (cfg);
9080 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9082 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9084 CHECK_TYPELOAD (cmethod->klass);
9085 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9090 g_assert (!vtable_arg);
9092 if (!cfg->compile_aot) {
9094 * emit_get_rgctx_method () calls mono_class_vtable () so check
9095 * for type load errors before.
9097 mono_class_setup_vtable (cmethod->klass);
9098 CHECK_TYPELOAD (cmethod->klass);
9101 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9103 /* !marshalbyref is needed to properly handle generic methods + remoting */
9104 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9105 MONO_METHOD_IS_FINAL (cmethod)) &&
9106 !mono_class_is_marshalbyref (cmethod->klass)) {
9113 if (pass_imt_from_rgctx) {
9114 g_assert (!pass_vtable);
9116 imt_arg = emit_get_rgctx_method (cfg, context_used,
9117 cmethod, MONO_RGCTX_INFO_METHOD);
9121 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9123 /* Calling virtual generic methods */
9124 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9125 !(MONO_METHOD_IS_FINAL (cmethod) &&
9126 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9127 fsig->generic_param_count &&
9128 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9129 MonoInst *this_temp, *this_arg_temp, *store;
9130 MonoInst *iargs [4];
9131 gboolean use_imt = FALSE;
9133 g_assert (fsig->is_inflated);
9135 /* Prevent inlining of methods that contain indirect calls */
9136 INLINE_FAILURE ("virtual generic call");
9138 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9139 GSHAREDVT_FAILURE (*ip);
9141 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9142 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9147 g_assert (!imt_arg);
9149 g_assert (cmethod->is_inflated);
9150 imt_arg = emit_get_rgctx_method (cfg, context_used,
9151 cmethod, MONO_RGCTX_INFO_METHOD);
9152 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9154 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9155 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9156 MONO_ADD_INS (cfg->cbb, store);
9158 /* FIXME: This should be a managed pointer */
9159 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9161 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9162 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9163 cmethod, MONO_RGCTX_INFO_METHOD);
9164 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9165 addr = mono_emit_jit_icall (cfg,
9166 mono_helper_compile_generic_method, iargs);
9168 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9170 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9177 * Implement a workaround for the inherent races involved in locking:
9183 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9184 * try block, the Exit () won't be executed, see:
9185 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9186 * To work around this, we extend such try blocks to include the last x bytes
9187 * of the Monitor.Enter () call.
9189 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9190 MonoBasicBlock *tbb;
9192 GET_BBLOCK (cfg, tbb, ip + 5);
9194 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9195 * from Monitor.Enter like ArgumentNullException.
9197 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9198 /* Mark this bblock as needing to be extended */
9199 tbb->extend_try_block = TRUE;
9203 /* Conversion to a JIT intrinsic */
9204 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9205 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9206 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9213 if ((cfg->opt & MONO_OPT_INLINE) &&
9214 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9215 mono_method_check_inlining (cfg, cmethod)) {
9217 gboolean always = FALSE;
9219 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9220 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9221 /* Prevent inlining of methods that call wrappers */
9222 INLINE_FAILURE ("wrapper call");
9223 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9227 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9229 cfg->real_offset += 5;
9231 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9232 /* *sp is already set by inline_method */
9237 inline_costs += costs;
9243 /* Tail recursion elimination */
9244 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9245 gboolean has_vtargs = FALSE;
9248 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9249 INLINE_FAILURE ("tail call");
9251 /* keep it simple */
9252 for (i = fsig->param_count - 1; i >= 0; i--) {
9253 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9258 for (i = 0; i < n; ++i)
9259 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9260 MONO_INST_NEW (cfg, ins, OP_BR);
9261 MONO_ADD_INS (cfg->cbb, ins);
9262 tblock = start_bblock->out_bb [0];
9263 link_bblock (cfg, cfg->cbb, tblock);
9264 ins->inst_target_bb = tblock;
9265 start_new_bblock = 1;
9267 /* skip the CEE_RET, too */
9268 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9275 inline_costs += 10 * num_calls++;
9278 * Making generic calls out of gsharedvt methods.
9279 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9280 * patching gshared method addresses into a gsharedvt method.
9282 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9283 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9284 MonoRgctxInfoType info_type;
9287 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9288 //GSHAREDVT_FAILURE (*ip);
9289 // disable for possible remoting calls
9290 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9291 GSHAREDVT_FAILURE (*ip);
9292 if (fsig->generic_param_count) {
9293 /* virtual generic call */
9294 g_assert (!imt_arg);
9295 /* Same as the virtual generic case above */
9296 imt_arg = emit_get_rgctx_method (cfg, context_used,
9297 cmethod, MONO_RGCTX_INFO_METHOD);
9298 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9300 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9301 /* This can happen when we call a fully instantiated iface method */
9302 imt_arg = emit_get_rgctx_method (cfg, context_used,
9303 cmethod, MONO_RGCTX_INFO_METHOD);
9308 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9309 keep_this_alive = sp [0];
9311 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9312 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9314 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9315 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9317 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9321 /* Generic sharing */
9324 * Use this if the callee is gsharedvt sharable too, since
9325 * at runtime we might find an instantiation so the call cannot
9326 * be patched (the 'no_patch' code path in mini-trampolines.c).
9328 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9329 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9330 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9331 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9332 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9333 INLINE_FAILURE ("gshared");
9335 g_assert (cfg->generic_sharing_context && cmethod);
9339 * We are compiling a call to a
9340 * generic method from shared code,
9341 * which means that we have to look up
9342 * the method in the rgctx and do an
9346 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9348 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9349 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9353 /* Direct calls to icalls */
9355 MonoMethod *wrapper;
9358 /* Inline the wrapper */
9359 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9361 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9362 g_assert (costs > 0);
9363 cfg->real_offset += 5;
9365 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9366 /* *sp is already set by inline_method */
9371 inline_costs += costs;
9380 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9381 MonoInst *val = sp [fsig->param_count];
9383 if (val->type == STACK_OBJ) {
9384 MonoInst *iargs [2];
9389 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9392 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9394 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9395 emit_write_barrier (cfg, addr, val);
9396 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9397 GSHAREDVT_FAILURE (*ip);
9398 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9399 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9401 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9402 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9403 if (!cmethod->klass->element_class->valuetype && !readonly)
9404 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9405 CHECK_TYPELOAD (cmethod->klass);
9408 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9411 g_assert_not_reached ();
9418 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9422 /* Tail prefix / tail call optimization */
9424 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9425 /* FIXME: runtime generic context pointer for jumps? */
9426 /* FIXME: handle this for generic sharing eventually */
9427 if ((ins_flag & MONO_INST_TAILCALL) &&
9428 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9429 supported_tail_call = TRUE;
9431 if (supported_tail_call) {
9434 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9435 INLINE_FAILURE ("tail call");
9437 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9439 if (ARCH_HAVE_OP_TAIL_CALL) {
9440 /* Handle tail calls similarly to normal calls */
9443 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9445 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9446 call->tail_call = TRUE;
9447 call->method = cmethod;
9448 call->signature = mono_method_signature (cmethod);
9451 * We implement tail calls by storing the actual arguments into the
9452 * argument variables, then emitting a CEE_JMP.
9454 for (i = 0; i < n; ++i) {
9455 /* Prevent argument from being register allocated */
9456 arg_array [i]->flags |= MONO_INST_VOLATILE;
9457 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9459 ins = (MonoInst*)call;
9460 ins->inst_p0 = cmethod;
9461 ins->inst_p1 = arg_array [0];
9462 MONO_ADD_INS (cfg->cbb, ins);
9463 link_bblock (cfg, cfg->cbb, end_bblock);
9464 start_new_bblock = 1;
9466 // FIXME: Eliminate unreachable epilogs
9469 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9470 * only reachable from this call.
9472 GET_BBLOCK (cfg, tblock, ip + 5);
9473 if (tblock == cfg->cbb || tblock->in_count == 0)
9482 * Synchronized wrappers.
9483 * Its hard to determine where to replace a method with its synchronized
9484 * wrapper without causing an infinite recursion. The current solution is
9485 * to add the synchronized wrapper in the trampolines, and to
9486 * change the called method to a dummy wrapper, and resolve that wrapper
9487 * to the real method in mono_jit_compile_method ().
9489 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9490 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9491 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9492 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9496 INLINE_FAILURE ("call");
9497 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9498 imt_arg, vtable_arg);
9501 link_bblock (cfg, cfg->cbb, end_bblock);
9502 start_new_bblock = 1;
9504 // FIXME: Eliminate unreachable epilogs
9507 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9508 * only reachable from this call.
9510 GET_BBLOCK (cfg, tblock, ip + 5);
9511 if (tblock == cfg->cbb || tblock->in_count == 0)
9518 /* End of call, INS should contain the result of the call, if any */
9520 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9523 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9528 if (keep_this_alive) {
9529 MonoInst *dummy_use;
9531 /* See mono_emit_method_call_full () */
9532 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9535 CHECK_CFG_EXCEPTION;
9539 g_assert (*ip == CEE_RET);
9543 constrained_class = NULL;
9545 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9549 if (cfg->method != method) {
9550 /* return from inlined method */
9552 * If in_count == 0, that means the ret is unreachable due to
9553 * being preceeded by a throw. In that case, inline_method () will
9554 * handle setting the return value
9555 * (test case: test_0_inline_throw ()).
9557 if (return_var && cfg->cbb->in_count) {
9558 MonoType *ret_type = mono_method_signature (method)->ret;
9564 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9567 //g_assert (returnvar != -1);
9568 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9569 cfg->ret_var_set = TRUE;
9572 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9574 if (cfg->lmf_var && cfg->cbb->in_count)
9578 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9580 if (seq_points && !sym_seq_points) {
9582 * Place a seq point here too even through the IL stack is not
9583 * empty, so a step over on
9586 * will work correctly.
9588 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9589 MONO_ADD_INS (cfg->cbb, ins);
9592 g_assert (!return_var);
9596 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9599 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9602 if (!cfg->vret_addr) {
9605 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9607 EMIT_NEW_RETLOADA (cfg, ret_addr);
9609 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9610 ins->klass = mono_class_from_mono_type (ret_type);
9613 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9614 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9615 MonoInst *iargs [1];
9619 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9620 mono_arch_emit_setret (cfg, method, conv);
9622 mono_arch_emit_setret (cfg, method, *sp);
9625 mono_arch_emit_setret (cfg, method, *sp);
9630 if (sp != stack_start)
9632 MONO_INST_NEW (cfg, ins, OP_BR);
9634 ins->inst_target_bb = end_bblock;
9635 MONO_ADD_INS (cfg->cbb, ins);
9636 link_bblock (cfg, cfg->cbb, end_bblock);
9637 start_new_bblock = 1;
9641 MONO_INST_NEW (cfg, ins, OP_BR);
9643 target = ip + 1 + (signed char)(*ip);
9645 GET_BBLOCK (cfg, tblock, target);
9646 link_bblock (cfg, cfg->cbb, tblock);
9647 ins->inst_target_bb = tblock;
9648 if (sp != stack_start) {
9649 handle_stack_args (cfg, stack_start, sp - stack_start);
9651 CHECK_UNVERIFIABLE (cfg);
9653 MONO_ADD_INS (cfg->cbb, ins);
9654 start_new_bblock = 1;
9655 inline_costs += BRANCH_COST;
9669 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9671 target = ip + 1 + *(signed char*)ip;
9677 inline_costs += BRANCH_COST;
9681 MONO_INST_NEW (cfg, ins, OP_BR);
9684 target = ip + 4 + (gint32)read32(ip);
9686 GET_BBLOCK (cfg, tblock, target);
9687 link_bblock (cfg, cfg->cbb, tblock);
9688 ins->inst_target_bb = tblock;
9689 if (sp != stack_start) {
9690 handle_stack_args (cfg, stack_start, sp - stack_start);
9692 CHECK_UNVERIFIABLE (cfg);
9695 MONO_ADD_INS (cfg->cbb, ins);
9697 start_new_bblock = 1;
9698 inline_costs += BRANCH_COST;
9705 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9706 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9707 guint32 opsize = is_short ? 1 : 4;
9709 CHECK_OPSIZE (opsize);
9711 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9714 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9719 GET_BBLOCK (cfg, tblock, target);
9720 link_bblock (cfg, cfg->cbb, tblock);
9721 GET_BBLOCK (cfg, tblock, ip);
9722 link_bblock (cfg, cfg->cbb, tblock);
9724 if (sp != stack_start) {
9725 handle_stack_args (cfg, stack_start, sp - stack_start);
9726 CHECK_UNVERIFIABLE (cfg);
9729 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9730 cmp->sreg1 = sp [0]->dreg;
9731 type_from_op (cfg, cmp, sp [0], NULL);
9734 #if SIZEOF_REGISTER == 4
9735 if (cmp->opcode == OP_LCOMPARE_IMM) {
9736 /* Convert it to OP_LCOMPARE */
9737 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9738 ins->type = STACK_I8;
9739 ins->dreg = alloc_dreg (cfg, STACK_I8);
9741 MONO_ADD_INS (cfg->cbb, ins);
9742 cmp->opcode = OP_LCOMPARE;
9743 cmp->sreg2 = ins->dreg;
9746 MONO_ADD_INS (cfg->cbb, cmp);
9748 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9749 type_from_op (cfg, ins, sp [0], NULL);
9750 MONO_ADD_INS (cfg->cbb, ins);
9751 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9752 GET_BBLOCK (cfg, tblock, target);
9753 ins->inst_true_bb = tblock;
9754 GET_BBLOCK (cfg, tblock, ip);
9755 ins->inst_false_bb = tblock;
9756 start_new_bblock = 2;
9759 inline_costs += BRANCH_COST;
9774 MONO_INST_NEW (cfg, ins, *ip);
9776 target = ip + 4 + (gint32)read32(ip);
9782 inline_costs += BRANCH_COST;
9786 MonoBasicBlock **targets;
9787 MonoBasicBlock *default_bblock;
9788 MonoJumpInfoBBTable *table;
9789 int offset_reg = alloc_preg (cfg);
9790 int target_reg = alloc_preg (cfg);
9791 int table_reg = alloc_preg (cfg);
9792 int sum_reg = alloc_preg (cfg);
9793 gboolean use_op_switch;
9797 n = read32 (ip + 1);
9800 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9804 CHECK_OPSIZE (n * sizeof (guint32));
9805 target = ip + n * sizeof (guint32);
9807 GET_BBLOCK (cfg, default_bblock, target);
9808 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9810 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9811 for (i = 0; i < n; ++i) {
9812 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9813 targets [i] = tblock;
9814 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9818 if (sp != stack_start) {
9820 * Link the current bb with the targets as well, so handle_stack_args
9821 * will set their in_stack correctly.
9823 link_bblock (cfg, cfg->cbb, default_bblock);
9824 for (i = 0; i < n; ++i)
9825 link_bblock (cfg, cfg->cbb, targets [i]);
9827 handle_stack_args (cfg, stack_start, sp - stack_start);
9829 CHECK_UNVERIFIABLE (cfg);
9832 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9833 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9835 for (i = 0; i < n; ++i)
9836 link_bblock (cfg, cfg->cbb, targets [i]);
9838 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9839 table->table = targets;
9840 table->table_size = n;
9842 use_op_switch = FALSE;
9844 /* ARM implements SWITCH statements differently */
9845 /* FIXME: Make it use the generic implementation */
9846 if (!cfg->compile_aot)
9847 use_op_switch = TRUE;
9850 if (COMPILE_LLVM (cfg))
9851 use_op_switch = TRUE;
9853 cfg->cbb->has_jump_table = 1;
9855 if (use_op_switch) {
9856 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9857 ins->sreg1 = src1->dreg;
9858 ins->inst_p0 = table;
9859 ins->inst_many_bb = targets;
9860 ins->klass = GUINT_TO_POINTER (n);
9861 MONO_ADD_INS (cfg->cbb, ins);
9863 if (sizeof (gpointer) == 8)
9864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9868 #if SIZEOF_REGISTER == 8
9869 /* The upper word might not be zero, and we add it to a 64 bit address later */
9870 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9873 if (cfg->compile_aot) {
9874 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9876 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9877 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9878 ins->inst_p0 = table;
9879 ins->dreg = table_reg;
9880 MONO_ADD_INS (cfg->cbb, ins);
9883 /* FIXME: Use load_memindex */
9884 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9886 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9888 start_new_bblock = 1;
9889 inline_costs += (BRANCH_COST * 2);
9909 dreg = alloc_freg (cfg);
9912 dreg = alloc_lreg (cfg);
9915 dreg = alloc_ireg_ref (cfg);
9918 dreg = alloc_preg (cfg);
9921 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9922 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9923 if (*ip == CEE_LDIND_R4)
9924 ins->type = cfg->r4_stack_type;
9925 ins->flags |= ins_flag;
9926 MONO_ADD_INS (cfg->cbb, ins);
9928 if (ins_flag & MONO_INST_VOLATILE) {
9929 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9930 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9946 if (ins_flag & MONO_INST_VOLATILE) {
9947 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9948 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9951 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9952 ins->flags |= ins_flag;
9955 MONO_ADD_INS (cfg->cbb, ins);
9957 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9958 emit_write_barrier (cfg, sp [0], sp [1]);
9967 MONO_INST_NEW (cfg, ins, (*ip));
9969 ins->sreg1 = sp [0]->dreg;
9970 ins->sreg2 = sp [1]->dreg;
9971 type_from_op (cfg, ins, sp [0], sp [1]);
9973 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9975 /* Use the immediate opcodes if possible */
9976 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9977 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9978 if (imm_opcode != -1) {
9979 ins->opcode = imm_opcode;
9980 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9983 NULLIFY_INS (sp [1]);
9987 MONO_ADD_INS ((cfg)->cbb, (ins));
9989 *sp++ = mono_decompose_opcode (cfg, ins);
10006 MONO_INST_NEW (cfg, ins, (*ip));
10008 ins->sreg1 = sp [0]->dreg;
10009 ins->sreg2 = sp [1]->dreg;
10010 type_from_op (cfg, ins, sp [0], sp [1]);
10012 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10013 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10015 /* FIXME: Pass opcode to is_inst_imm */
10017 /* Use the immediate opcodes if possible */
10018 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10021 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10022 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10023 /* Keep emulated opcodes which are optimized away later */
10024 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10025 imm_opcode = mono_op_to_op_imm (ins->opcode);
10028 if (imm_opcode != -1) {
10029 ins->opcode = imm_opcode;
10030 if (sp [1]->opcode == OP_I8CONST) {
10031 #if SIZEOF_REGISTER == 8
10032 ins->inst_imm = sp [1]->inst_l;
10034 ins->inst_ls_word = sp [1]->inst_ls_word;
10035 ins->inst_ms_word = sp [1]->inst_ms_word;
10039 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10042 /* Might be followed by an instruction added by add_widen_op */
10043 if (sp [1]->next == NULL)
10044 NULLIFY_INS (sp [1]);
10047 MONO_ADD_INS ((cfg)->cbb, (ins));
10049 *sp++ = mono_decompose_opcode (cfg, ins);
10062 case CEE_CONV_OVF_I8:
10063 case CEE_CONV_OVF_U8:
10064 case CEE_CONV_R_UN:
10067 /* Special case this earlier so we have long constants in the IR */
10068 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10069 int data = sp [-1]->inst_c0;
10070 sp [-1]->opcode = OP_I8CONST;
10071 sp [-1]->type = STACK_I8;
10072 #if SIZEOF_REGISTER == 8
10073 if ((*ip) == CEE_CONV_U8)
10074 sp [-1]->inst_c0 = (guint32)data;
10076 sp [-1]->inst_c0 = data;
10078 sp [-1]->inst_ls_word = data;
10079 if ((*ip) == CEE_CONV_U8)
10080 sp [-1]->inst_ms_word = 0;
10082 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10084 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10091 case CEE_CONV_OVF_I4:
10092 case CEE_CONV_OVF_I1:
10093 case CEE_CONV_OVF_I2:
10094 case CEE_CONV_OVF_I:
10095 case CEE_CONV_OVF_U:
10098 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10099 ADD_UNOP (CEE_CONV_OVF_I8);
10106 case CEE_CONV_OVF_U1:
10107 case CEE_CONV_OVF_U2:
10108 case CEE_CONV_OVF_U4:
10111 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10112 ADD_UNOP (CEE_CONV_OVF_U8);
10119 case CEE_CONV_OVF_I1_UN:
10120 case CEE_CONV_OVF_I2_UN:
10121 case CEE_CONV_OVF_I4_UN:
10122 case CEE_CONV_OVF_I8_UN:
10123 case CEE_CONV_OVF_U1_UN:
10124 case CEE_CONV_OVF_U2_UN:
10125 case CEE_CONV_OVF_U4_UN:
10126 case CEE_CONV_OVF_U8_UN:
10127 case CEE_CONV_OVF_I_UN:
10128 case CEE_CONV_OVF_U_UN:
10135 CHECK_CFG_EXCEPTION;
10139 case CEE_ADD_OVF_UN:
10141 case CEE_MUL_OVF_UN:
10143 case CEE_SUB_OVF_UN:
10149 GSHAREDVT_FAILURE (*ip);
10152 token = read32 (ip + 1);
10153 klass = mini_get_class (method, token, generic_context);
10154 CHECK_TYPELOAD (klass);
10156 if (generic_class_is_reference_type (cfg, klass)) {
10157 MonoInst *store, *load;
10158 int dreg = alloc_ireg_ref (cfg);
10160 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10161 load->flags |= ins_flag;
10162 MONO_ADD_INS (cfg->cbb, load);
10164 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10165 store->flags |= ins_flag;
10166 MONO_ADD_INS (cfg->cbb, store);
10168 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10169 emit_write_barrier (cfg, sp [0], sp [1]);
10171 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10177 int loc_index = -1;
10183 token = read32 (ip + 1);
10184 klass = mini_get_class (method, token, generic_context);
10185 CHECK_TYPELOAD (klass);
10187 /* Optimize the common ldobj+stloc combination */
10190 loc_index = ip [6];
10197 loc_index = ip [5] - CEE_STLOC_0;
10204 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10205 CHECK_LOCAL (loc_index);
10207 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10208 ins->dreg = cfg->locals [loc_index]->dreg;
10209 ins->flags |= ins_flag;
10212 if (ins_flag & MONO_INST_VOLATILE) {
10213 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10214 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10220 /* Optimize the ldobj+stobj combination */
10221 /* The reference case ends up being a load+store anyway */
10222 /* Skip this if the operation is volatile. */
10223 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10228 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10235 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10236 ins->flags |= ins_flag;
10239 if (ins_flag & MONO_INST_VOLATILE) {
10240 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10241 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10250 CHECK_STACK_OVF (1);
10252 n = read32 (ip + 1);
10254 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10255 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10256 ins->type = STACK_OBJ;
10259 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10260 MonoInst *iargs [1];
10261 char *str = mono_method_get_wrapper_data (method, n);
10263 if (cfg->compile_aot)
10264 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10266 EMIT_NEW_PCONST (cfg, iargs [0], str);
10267 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10269 if (cfg->opt & MONO_OPT_SHARED) {
10270 MonoInst *iargs [3];
10272 if (cfg->compile_aot) {
10273 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10275 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10276 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10277 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10278 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10279 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10281 if (cfg->cbb->out_of_line) {
10282 MonoInst *iargs [2];
10284 if (image == mono_defaults.corlib) {
10286 * Avoid relocations in AOT and save some space by using a
10287 * version of helper_ldstr specialized to mscorlib.
10289 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10290 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10292 /* Avoid creating the string object */
10293 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10294 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10295 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10299 if (cfg->compile_aot) {
10300 NEW_LDSTRCONST (cfg, ins, image, n);
10302 MONO_ADD_INS (cfg->cbb, ins);
10305 NEW_PCONST (cfg, ins, NULL);
10306 ins->type = STACK_OBJ;
10307 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10309 OUT_OF_MEMORY_FAILURE;
10312 MONO_ADD_INS (cfg->cbb, ins);
10321 MonoInst *iargs [2];
10322 MonoMethodSignature *fsig;
10325 MonoInst *vtable_arg = NULL;
10328 token = read32 (ip + 1);
10329 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10330 if (!cmethod || mono_loader_get_last_error ())
10332 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10335 mono_save_token_info (cfg, image, token, cmethod);
10337 if (!mono_class_init (cmethod->klass))
10338 TYPE_LOAD_ERROR (cmethod->klass);
10340 context_used = mini_method_check_context_used (cfg, cmethod);
10342 if (mono_security_core_clr_enabled ())
10343 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10345 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10346 emit_generic_class_init (cfg, cmethod->klass);
10347 CHECK_TYPELOAD (cmethod->klass);
10351 if (cfg->gsharedvt) {
10352 if (mini_is_gsharedvt_variable_signature (sig))
10353 GSHAREDVT_FAILURE (*ip);
10357 n = fsig->param_count;
10361 * Generate smaller code for the common newobj <exception> instruction in
10362 * argument checking code.
10364 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10365 is_exception_class (cmethod->klass) && n <= 2 &&
10366 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10367 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10368 MonoInst *iargs [3];
10372 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10375 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10378 iargs [1] = sp [0];
10379 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10382 iargs [1] = sp [0];
10383 iargs [2] = sp [1];
10384 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10387 g_assert_not_reached ();
10395 /* move the args to allow room for 'this' in the first position */
10401 /* check_call_signature () requires sp[0] to be set */
10402 this_ins.type = STACK_OBJ;
10403 sp [0] = &this_ins;
10404 if (check_call_signature (cfg, fsig, sp))
10409 if (mini_class_is_system_array (cmethod->klass)) {
10410 *sp = emit_get_rgctx_method (cfg, context_used,
10411 cmethod, MONO_RGCTX_INFO_METHOD);
10413 /* Avoid varargs in the common case */
10414 if (fsig->param_count == 1)
10415 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10416 else if (fsig->param_count == 2)
10417 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10418 else if (fsig->param_count == 3)
10419 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10420 else if (fsig->param_count == 4)
10421 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10423 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10424 } else if (cmethod->string_ctor) {
10425 g_assert (!context_used);
10426 g_assert (!vtable_arg);
10427 /* we simply pass a null pointer */
10428 EMIT_NEW_PCONST (cfg, *sp, NULL);
10429 /* now call the string ctor */
10430 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10432 if (cmethod->klass->valuetype) {
10433 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10434 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10435 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10440 * The code generated by mini_emit_virtual_call () expects
10441 * iargs [0] to be a boxed instance, but luckily the vcall
10442 * will be transformed into a normal call there.
10444 } else if (context_used) {
10445 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10448 MonoVTable *vtable = NULL;
10450 if (!cfg->compile_aot)
10451 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10452 CHECK_TYPELOAD (cmethod->klass);
10455 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10456 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10457 * As a workaround, we call class cctors before allocating objects.
10459 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10460 emit_class_init (cfg, cmethod->klass);
10461 if (cfg->verbose_level > 2)
10462 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10463 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10466 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10469 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10472 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10474 /* Now call the actual ctor */
10475 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10476 CHECK_CFG_EXCEPTION;
10479 if (alloc == NULL) {
10481 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10482 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10490 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10491 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10494 case CEE_CASTCLASS:
10498 token = read32 (ip + 1);
10499 klass = mini_get_class (method, token, generic_context);
10500 CHECK_TYPELOAD (klass);
10501 if (sp [0]->type != STACK_OBJ)
10504 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10505 CHECK_CFG_EXCEPTION;
10514 token = read32 (ip + 1);
10515 klass = mini_get_class (method, token, generic_context);
10516 CHECK_TYPELOAD (klass);
10517 if (sp [0]->type != STACK_OBJ)
10520 context_used = mini_class_check_context_used (cfg, klass);
10522 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10523 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10524 MonoInst *args [3];
10531 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10534 if (cfg->compile_aot) {
10535 idx = get_castclass_cache_idx (cfg);
10536 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10538 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10541 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10544 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10545 MonoMethod *mono_isinst;
10546 MonoInst *iargs [1];
10549 mono_isinst = mono_marshal_get_isinst (klass);
10550 iargs [0] = sp [0];
10552 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10553 iargs, ip, cfg->real_offset, TRUE);
10554 CHECK_CFG_EXCEPTION;
10555 g_assert (costs > 0);
10558 cfg->real_offset += 5;
10562 inline_costs += costs;
10565 ins = handle_isinst (cfg, klass, *sp, context_used);
10566 CHECK_CFG_EXCEPTION;
10572 case CEE_UNBOX_ANY: {
10573 MonoInst *res, *addr;
10578 token = read32 (ip + 1);
10579 klass = mini_get_class (method, token, generic_context);
10580 CHECK_TYPELOAD (klass);
10582 mono_save_token_info (cfg, image, token, klass);
10584 context_used = mini_class_check_context_used (cfg, klass);
10586 if (mini_is_gsharedvt_klass (cfg, klass)) {
10587 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10589 } else if (generic_class_is_reference_type (cfg, klass)) {
10590 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10591 CHECK_CFG_EXCEPTION;
10592 } else if (mono_class_is_nullable (klass)) {
10593 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10595 addr = handle_unbox (cfg, klass, sp, context_used);
10597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10608 MonoClass *enum_class;
10609 MonoMethod *has_flag;
10615 token = read32 (ip + 1);
10616 klass = mini_get_class (method, token, generic_context);
10617 CHECK_TYPELOAD (klass);
10619 mono_save_token_info (cfg, image, token, klass);
10621 context_used = mini_class_check_context_used (cfg, klass);
10623 if (generic_class_is_reference_type (cfg, klass)) {
10629 if (klass == mono_defaults.void_class)
10631 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10633 /* frequent check in generic code: box (struct), brtrue */
10638 * <push int/long ptr>
10641 * constrained. MyFlags
10642 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10644 * If we find this sequence and the operand types on box and constrained
10645 * are equal, we can emit a specialized instruction sequence instead of
10646 * the very slow HasFlag () call.
10648 if ((cfg->opt & MONO_OPT_INTRINS) &&
10649 /* Cheap checks first. */
10650 ip + 5 + 6 + 5 < end &&
10651 ip [5] == CEE_PREFIX1 &&
10652 ip [6] == CEE_CONSTRAINED_ &&
10653 ip [11] == CEE_CALLVIRT &&
10654 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10655 mono_class_is_enum (klass) &&
10656 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10657 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10658 has_flag->klass == mono_defaults.enum_class &&
10659 !strcmp (has_flag->name, "HasFlag") &&
10660 has_flag->signature->hasthis &&
10661 has_flag->signature->param_count == 1) {
10662 CHECK_TYPELOAD (enum_class);
10664 if (enum_class == klass) {
10665 MonoInst *enum_this, *enum_flag;
10670 enum_this = sp [0];
10671 enum_flag = sp [1];
10673 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10678 // FIXME: LLVM can't handle the inconsistent bb linking
10679 if (!mono_class_is_nullable (klass) &&
10680 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10681 (ip [5] == CEE_BRTRUE ||
10682 ip [5] == CEE_BRTRUE_S ||
10683 ip [5] == CEE_BRFALSE ||
10684 ip [5] == CEE_BRFALSE_S)) {
10685 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10687 MonoBasicBlock *true_bb, *false_bb;
10691 if (cfg->verbose_level > 3) {
10692 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10693 printf ("<box+brtrue opt>\n");
10698 case CEE_BRFALSE_S:
10701 target = ip + 1 + (signed char)(*ip);
10708 target = ip + 4 + (gint)(read32 (ip));
10712 g_assert_not_reached ();
10716 * We need to link both bblocks, since it is needed for handling stack
10717 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10718 * Branching to only one of them would lead to inconsistencies, so
10719 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10721 GET_BBLOCK (cfg, true_bb, target);
10722 GET_BBLOCK (cfg, false_bb, ip);
10724 mono_link_bblock (cfg, cfg->cbb, true_bb);
10725 mono_link_bblock (cfg, cfg->cbb, false_bb);
10727 if (sp != stack_start) {
10728 handle_stack_args (cfg, stack_start, sp - stack_start);
10730 CHECK_UNVERIFIABLE (cfg);
10733 if (COMPILE_LLVM (cfg)) {
10734 dreg = alloc_ireg (cfg);
10735 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10738 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10740 /* The JIT can't eliminate the iconst+compare */
10741 MONO_INST_NEW (cfg, ins, OP_BR);
10742 ins->inst_target_bb = is_true ? true_bb : false_bb;
10743 MONO_ADD_INS (cfg->cbb, ins);
10746 start_new_bblock = 1;
10750 *sp++ = handle_box (cfg, val, klass, context_used);
10752 CHECK_CFG_EXCEPTION;
10761 token = read32 (ip + 1);
10762 klass = mini_get_class (method, token, generic_context);
10763 CHECK_TYPELOAD (klass);
10765 mono_save_token_info (cfg, image, token, klass);
10767 context_used = mini_class_check_context_used (cfg, klass);
10769 if (mono_class_is_nullable (klass)) {
10772 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10773 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10777 ins = handle_unbox (cfg, klass, sp, context_used);
10790 MonoClassField *field;
10791 #ifndef DISABLE_REMOTING
10795 gboolean is_instance;
10797 gpointer addr = NULL;
10798 gboolean is_special_static;
10800 MonoInst *store_val = NULL;
10801 MonoInst *thread_ins;
10804 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10806 if (op == CEE_STFLD) {
10809 store_val = sp [1];
10814 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10816 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10819 if (op == CEE_STSFLD) {
10822 store_val = sp [0];
10827 token = read32 (ip + 1);
10828 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10829 field = mono_method_get_wrapper_data (method, token);
10830 klass = field->parent;
10833 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10836 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10837 FIELD_ACCESS_FAILURE (method, field);
10838 mono_class_init (klass);
10840 /* if the class is Critical then transparent code cannot access it's fields */
10841 if (!is_instance && mono_security_core_clr_enabled ())
10842 ensure_method_is_allowed_to_access_field (cfg, method, field);
10844 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10845 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10846 if (mono_security_core_clr_enabled ())
10847 ensure_method_is_allowed_to_access_field (cfg, method, field);
10851 * LDFLD etc. is usable on static fields as well, so convert those cases to
10854 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10866 g_assert_not_reached ();
10868 is_instance = FALSE;
10871 context_used = mini_class_check_context_used (cfg, klass);
10873 /* INSTANCE CASE */
10875 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10876 if (op == CEE_STFLD) {
10877 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10879 #ifndef DISABLE_REMOTING
10880 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10881 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10882 MonoInst *iargs [5];
10884 GSHAREDVT_FAILURE (op);
10886 iargs [0] = sp [0];
10887 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10888 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10889 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10891 iargs [4] = sp [1];
10893 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10894 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10895 iargs, ip, cfg->real_offset, TRUE);
10896 CHECK_CFG_EXCEPTION;
10897 g_assert (costs > 0);
10899 cfg->real_offset += 5;
10901 inline_costs += costs;
10903 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10910 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10912 if (mini_is_gsharedvt_klass (cfg, klass)) {
10913 MonoInst *offset_ins;
10915 context_used = mini_class_check_context_used (cfg, klass);
10917 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10918 dreg = alloc_ireg_mp (cfg);
10919 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10920 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10921 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10923 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10925 if (sp [0]->opcode != OP_LDADDR)
10926 store->flags |= MONO_INST_FAULT;
10928 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10929 /* insert call to write barrier */
10933 dreg = alloc_ireg_mp (cfg);
10934 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10935 emit_write_barrier (cfg, ptr, sp [1]);
10938 store->flags |= ins_flag;
10945 #ifndef DISABLE_REMOTING
10946 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10947 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10948 MonoInst *iargs [4];
10950 GSHAREDVT_FAILURE (op);
10952 iargs [0] = sp [0];
10953 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10954 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10955 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10956 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10957 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10958 iargs, ip, cfg->real_offset, TRUE);
10959 CHECK_CFG_EXCEPTION;
10960 g_assert (costs > 0);
10962 cfg->real_offset += 5;
10966 inline_costs += costs;
10968 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10974 if (sp [0]->type == STACK_VTYPE) {
10977 /* Have to compute the address of the variable */
10979 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10981 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10983 g_assert (var->klass == klass);
10985 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10989 if (op == CEE_LDFLDA) {
10990 if (sp [0]->type == STACK_OBJ) {
10991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10992 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10995 dreg = alloc_ireg_mp (cfg);
10997 if (mini_is_gsharedvt_klass (cfg, klass)) {
10998 MonoInst *offset_ins;
11000 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11001 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11003 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11005 ins->klass = mono_class_from_mono_type (field->type);
11006 ins->type = STACK_MP;
11011 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11013 if (mini_is_gsharedvt_klass (cfg, klass)) {
11014 MonoInst *offset_ins;
11016 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11017 dreg = alloc_ireg_mp (cfg);
11018 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11019 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11023 load->flags |= ins_flag;
11024 if (sp [0]->opcode != OP_LDADDR)
11025 load->flags |= MONO_INST_FAULT;
11037 context_used = mini_class_check_context_used (cfg, klass);
11039 ftype = mono_field_get_type (field);
11041 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11044 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11045 * to be called here.
11047 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11048 mono_class_vtable (cfg->domain, klass);
11049 CHECK_TYPELOAD (klass);
11051 mono_domain_lock (cfg->domain);
11052 if (cfg->domain->special_static_fields)
11053 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11054 mono_domain_unlock (cfg->domain);
11056 is_special_static = mono_class_field_is_special_static (field);
11058 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11059 thread_ins = mono_get_thread_intrinsic (cfg);
11063 /* Generate IR to compute the field address */
11064 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11066 * Fast access to TLS data
11067 * Inline version of get_thread_static_data () in
11071 int idx, static_data_reg, array_reg, dreg;
11073 GSHAREDVT_FAILURE (op);
11075 MONO_ADD_INS (cfg->cbb, thread_ins);
11076 static_data_reg = alloc_ireg (cfg);
11077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11079 if (cfg->compile_aot) {
11080 int offset_reg, offset2_reg, idx_reg;
11082 /* For TLS variables, this will return the TLS offset */
11083 EMIT_NEW_SFLDACONST (cfg, ins, field);
11084 offset_reg = ins->dreg;
11085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11086 idx_reg = alloc_ireg (cfg);
11087 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11089 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11090 array_reg = alloc_ireg (cfg);
11091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11092 offset2_reg = alloc_ireg (cfg);
11093 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11095 dreg = alloc_ireg (cfg);
11096 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11098 offset = (gsize)addr & 0x7fffffff;
11099 idx = offset & 0x3f;
11101 array_reg = alloc_ireg (cfg);
11102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11103 dreg = alloc_ireg (cfg);
11104 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11106 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11107 (cfg->compile_aot && is_special_static) ||
11108 (context_used && is_special_static)) {
11109 MonoInst *iargs [2];
11111 g_assert (field->parent);
11112 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11113 if (context_used) {
11114 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11115 field, MONO_RGCTX_INFO_CLASS_FIELD);
11117 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11119 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11120 } else if (context_used) {
11121 MonoInst *static_data;
11124 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11125 method->klass->name_space, method->klass->name, method->name,
11126 depth, field->offset);
11129 if (mono_class_needs_cctor_run (klass, method))
11130 emit_generic_class_init (cfg, klass);
11133 * The pointer we're computing here is
11135 * super_info.static_data + field->offset
11137 static_data = emit_get_rgctx_klass (cfg, context_used,
11138 klass, MONO_RGCTX_INFO_STATIC_DATA);
11140 if (mini_is_gsharedvt_klass (cfg, klass)) {
11141 MonoInst *offset_ins;
11143 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11144 dreg = alloc_ireg_mp (cfg);
11145 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11146 } else if (field->offset == 0) {
11149 int addr_reg = mono_alloc_preg (cfg);
11150 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11152 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11153 MonoInst *iargs [2];
11155 g_assert (field->parent);
11156 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11157 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11158 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11160 MonoVTable *vtable = NULL;
11162 if (!cfg->compile_aot)
11163 vtable = mono_class_vtable (cfg->domain, klass);
11164 CHECK_TYPELOAD (klass);
11167 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11168 if (!(g_slist_find (class_inits, klass))) {
11169 emit_class_init (cfg, klass);
11170 if (cfg->verbose_level > 2)
11171 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11172 class_inits = g_slist_prepend (class_inits, klass);
11175 if (cfg->run_cctors) {
11177 /* This makes so that inline cannot trigger */
11178 /* .cctors: too many apps depend on them */
11179 /* running with a specific order... */
11181 if (! vtable->initialized)
11182 INLINE_FAILURE ("class init");
11183 ex = mono_runtime_class_init_full (vtable, FALSE);
11185 set_exception_object (cfg, ex);
11186 goto exception_exit;
11190 if (cfg->compile_aot)
11191 EMIT_NEW_SFLDACONST (cfg, ins, field);
11194 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11196 EMIT_NEW_PCONST (cfg, ins, addr);
11199 MonoInst *iargs [1];
11200 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11201 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11205 /* Generate IR to do the actual load/store operation */
11207 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11208 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11209 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11212 if (op == CEE_LDSFLDA) {
11213 ins->klass = mono_class_from_mono_type (ftype);
11214 ins->type = STACK_PTR;
11216 } else if (op == CEE_STSFLD) {
11219 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11220 store->flags |= ins_flag;
11222 gboolean is_const = FALSE;
11223 MonoVTable *vtable = NULL;
11224 gpointer addr = NULL;
11226 if (!context_used) {
11227 vtable = mono_class_vtable (cfg->domain, klass);
11228 CHECK_TYPELOAD (klass);
11230 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11231 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11232 int ro_type = ftype->type;
11234 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11235 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11236 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11239 GSHAREDVT_FAILURE (op);
11241 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11244 case MONO_TYPE_BOOLEAN:
11246 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11250 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11253 case MONO_TYPE_CHAR:
11255 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11259 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11264 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11268 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11273 case MONO_TYPE_PTR:
11274 case MONO_TYPE_FNPTR:
11275 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11276 type_to_eval_stack_type ((cfg), field->type, *sp);
11279 case MONO_TYPE_STRING:
11280 case MONO_TYPE_OBJECT:
11281 case MONO_TYPE_CLASS:
11282 case MONO_TYPE_SZARRAY:
11283 case MONO_TYPE_ARRAY:
11284 if (!mono_gc_is_moving ()) {
11285 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11286 type_to_eval_stack_type ((cfg), field->type, *sp);
11294 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11299 case MONO_TYPE_VALUETYPE:
11309 CHECK_STACK_OVF (1);
11311 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11312 load->flags |= ins_flag;
11318 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11319 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11320 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11331 token = read32 (ip + 1);
11332 klass = mini_get_class (method, token, generic_context);
11333 CHECK_TYPELOAD (klass);
11334 if (ins_flag & MONO_INST_VOLATILE) {
11335 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11336 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11338 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11339 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11340 ins->flags |= ins_flag;
11341 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11342 generic_class_is_reference_type (cfg, klass)) {
11343 /* insert call to write barrier */
11344 emit_write_barrier (cfg, sp [0], sp [1]);
11356 const char *data_ptr;
11358 guint32 field_token;
11364 token = read32 (ip + 1);
11366 klass = mini_get_class (method, token, generic_context);
11367 CHECK_TYPELOAD (klass);
11369 context_used = mini_class_check_context_used (cfg, klass);
11371 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11372 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11373 ins->sreg1 = sp [0]->dreg;
11374 ins->type = STACK_I4;
11375 ins->dreg = alloc_ireg (cfg);
11376 MONO_ADD_INS (cfg->cbb, ins);
11377 *sp = mono_decompose_opcode (cfg, ins);
11380 if (context_used) {
11381 MonoInst *args [3];
11382 MonoClass *array_class = mono_array_class_get (klass, 1);
11383 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11385 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11388 args [0] = emit_get_rgctx_klass (cfg, context_used,
11389 array_class, MONO_RGCTX_INFO_VTABLE);
11394 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11396 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11398 if (cfg->opt & MONO_OPT_SHARED) {
11399 /* Decompose now to avoid problems with references to the domainvar */
11400 MonoInst *iargs [3];
11402 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11403 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11404 iargs [2] = sp [0];
11406 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11408 /* Decompose later since it is needed by abcrem */
11409 MonoClass *array_type = mono_array_class_get (klass, 1);
11410 mono_class_vtable (cfg->domain, array_type);
11411 CHECK_TYPELOAD (array_type);
11413 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11414 ins->dreg = alloc_ireg_ref (cfg);
11415 ins->sreg1 = sp [0]->dreg;
11416 ins->inst_newa_class = klass;
11417 ins->type = STACK_OBJ;
11418 ins->klass = array_type;
11419 MONO_ADD_INS (cfg->cbb, ins);
11420 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11421 cfg->cbb->has_array_access = TRUE;
11423 /* Needed so mono_emit_load_get_addr () gets called */
11424 mono_get_got_var (cfg);
11434 * we inline/optimize the initialization sequence if possible.
11435 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11436 * for small sizes open code the memcpy
11437 * ensure the rva field is big enough
11439 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11440 MonoMethod *memcpy_method = get_memcpy_method ();
11441 MonoInst *iargs [3];
11442 int add_reg = alloc_ireg_mp (cfg);
11444 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11445 if (cfg->compile_aot) {
11446 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11448 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11450 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11451 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11460 if (sp [0]->type != STACK_OBJ)
11463 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11464 ins->dreg = alloc_preg (cfg);
11465 ins->sreg1 = sp [0]->dreg;
11466 ins->type = STACK_I4;
11467 /* This flag will be inherited by the decomposition */
11468 ins->flags |= MONO_INST_FAULT;
11469 MONO_ADD_INS (cfg->cbb, ins);
11470 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11471 cfg->cbb->has_array_access = TRUE;
11479 if (sp [0]->type != STACK_OBJ)
11482 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11484 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11485 CHECK_TYPELOAD (klass);
11486 /* we need to make sure that this array is exactly the type it needs
11487 * to be for correctness. the wrappers are lax with their usage
11488 * so we need to ignore them here
11490 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11491 MonoClass *array_class = mono_array_class_get (klass, 1);
11492 mini_emit_check_array_type (cfg, sp [0], array_class);
11493 CHECK_TYPELOAD (array_class);
11497 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11502 case CEE_LDELEM_I1:
11503 case CEE_LDELEM_U1:
11504 case CEE_LDELEM_I2:
11505 case CEE_LDELEM_U2:
11506 case CEE_LDELEM_I4:
11507 case CEE_LDELEM_U4:
11508 case CEE_LDELEM_I8:
11510 case CEE_LDELEM_R4:
11511 case CEE_LDELEM_R8:
11512 case CEE_LDELEM_REF: {
11518 if (*ip == CEE_LDELEM) {
11520 token = read32 (ip + 1);
11521 klass = mini_get_class (method, token, generic_context);
11522 CHECK_TYPELOAD (klass);
11523 mono_class_init (klass);
11526 klass = array_access_to_klass (*ip);
11528 if (sp [0]->type != STACK_OBJ)
11531 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11533 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11534 // FIXME-VT: OP_ICONST optimization
11535 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11536 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11537 ins->opcode = OP_LOADV_MEMBASE;
11538 } else if (sp [1]->opcode == OP_ICONST) {
11539 int array_reg = sp [0]->dreg;
11540 int index_reg = sp [1]->dreg;
11541 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11543 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11544 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11546 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11550 if (*ip == CEE_LDELEM)
11557 case CEE_STELEM_I1:
11558 case CEE_STELEM_I2:
11559 case CEE_STELEM_I4:
11560 case CEE_STELEM_I8:
11561 case CEE_STELEM_R4:
11562 case CEE_STELEM_R8:
11563 case CEE_STELEM_REF:
11568 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11570 if (*ip == CEE_STELEM) {
11572 token = read32 (ip + 1);
11573 klass = mini_get_class (method, token, generic_context);
11574 CHECK_TYPELOAD (klass);
11575 mono_class_init (klass);
11578 klass = array_access_to_klass (*ip);
11580 if (sp [0]->type != STACK_OBJ)
11583 emit_array_store (cfg, klass, sp, TRUE);
11585 if (*ip == CEE_STELEM)
11592 case CEE_CKFINITE: {
11596 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11597 ins->sreg1 = sp [0]->dreg;
11598 ins->dreg = alloc_freg (cfg);
11599 ins->type = STACK_R8;
11600 MONO_ADD_INS (cfg->cbb, ins);
11602 *sp++ = mono_decompose_opcode (cfg, ins);
11607 case CEE_REFANYVAL: {
11608 MonoInst *src_var, *src;
11610 int klass_reg = alloc_preg (cfg);
11611 int dreg = alloc_preg (cfg);
11613 GSHAREDVT_FAILURE (*ip);
11616 MONO_INST_NEW (cfg, ins, *ip);
11619 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11620 CHECK_TYPELOAD (klass);
11622 context_used = mini_class_check_context_used (cfg, klass);
11625 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11627 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11628 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11631 if (context_used) {
11632 MonoInst *klass_ins;
11634 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11635 klass, MONO_RGCTX_INFO_KLASS);
11638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11639 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11641 mini_emit_class_check (cfg, klass_reg, klass);
11643 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11644 ins->type = STACK_MP;
11645 ins->klass = klass;
11650 case CEE_MKREFANY: {
11651 MonoInst *loc, *addr;
11653 GSHAREDVT_FAILURE (*ip);
11656 MONO_INST_NEW (cfg, ins, *ip);
11659 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11660 CHECK_TYPELOAD (klass);
11662 context_used = mini_class_check_context_used (cfg, klass);
11664 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11665 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11667 if (context_used) {
11668 MonoInst *const_ins;
11669 int type_reg = alloc_preg (cfg);
11671 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11675 } else if (cfg->compile_aot) {
11676 int const_reg = alloc_preg (cfg);
11677 int type_reg = alloc_preg (cfg);
11679 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11685 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11689 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11690 ins->type = STACK_VTYPE;
11691 ins->klass = mono_defaults.typed_reference_class;
11696 case CEE_LDTOKEN: {
11698 MonoClass *handle_class;
11700 CHECK_STACK_OVF (1);
11703 n = read32 (ip + 1);
11705 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11706 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11707 handle = mono_method_get_wrapper_data (method, n);
11708 handle_class = mono_method_get_wrapper_data (method, n + 1);
11709 if (handle_class == mono_defaults.typehandle_class)
11710 handle = &((MonoClass*)handle)->byval_arg;
11713 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11718 mono_class_init (handle_class);
11719 if (cfg->generic_sharing_context) {
11720 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11721 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11722 /* This case handles ldtoken
11723 of an open type, like for
11726 } else if (handle_class == mono_defaults.typehandle_class) {
11727 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11728 } else if (handle_class == mono_defaults.fieldhandle_class)
11729 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11730 else if (handle_class == mono_defaults.methodhandle_class)
11731 context_used = mini_method_check_context_used (cfg, handle);
11733 g_assert_not_reached ();
11736 if ((cfg->opt & MONO_OPT_SHARED) &&
11737 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11738 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11739 MonoInst *addr, *vtvar, *iargs [3];
11740 int method_context_used;
11742 method_context_used = mini_method_check_context_used (cfg, method);
11744 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11746 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11747 EMIT_NEW_ICONST (cfg, iargs [1], n);
11748 if (method_context_used) {
11749 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11750 method, MONO_RGCTX_INFO_METHOD);
11751 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11753 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11754 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11756 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11758 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11760 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11762 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11763 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11764 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11765 (cmethod->klass == mono_defaults.systemtype_class) &&
11766 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11767 MonoClass *tclass = mono_class_from_mono_type (handle);
11769 mono_class_init (tclass);
11770 if (context_used) {
11771 ins = emit_get_rgctx_klass (cfg, context_used,
11772 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11773 } else if (cfg->compile_aot) {
11774 if (method->wrapper_type) {
11775 mono_error_init (&error); //got to do it since there are multiple conditionals below
11776 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11777 /* Special case for static synchronized wrappers */
11778 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11780 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11781 /* FIXME: n is not a normal token */
11783 EMIT_NEW_PCONST (cfg, ins, NULL);
11786 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11789 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11791 ins->type = STACK_OBJ;
11792 ins->klass = cmethod->klass;
11795 MonoInst *addr, *vtvar;
11797 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11799 if (context_used) {
11800 if (handle_class == mono_defaults.typehandle_class) {
11801 ins = emit_get_rgctx_klass (cfg, context_used,
11802 mono_class_from_mono_type (handle),
11803 MONO_RGCTX_INFO_TYPE);
11804 } else if (handle_class == mono_defaults.methodhandle_class) {
11805 ins = emit_get_rgctx_method (cfg, context_used,
11806 handle, MONO_RGCTX_INFO_METHOD);
11807 } else if (handle_class == mono_defaults.fieldhandle_class) {
11808 ins = emit_get_rgctx_field (cfg, context_used,
11809 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11811 g_assert_not_reached ();
11813 } else if (cfg->compile_aot) {
11814 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11816 EMIT_NEW_PCONST (cfg, ins, handle);
11818 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11819 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11820 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11830 MONO_INST_NEW (cfg, ins, OP_THROW);
11832 ins->sreg1 = sp [0]->dreg;
11834 cfg->cbb->out_of_line = TRUE;
11835 MONO_ADD_INS (cfg->cbb, ins);
11836 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11837 MONO_ADD_INS (cfg->cbb, ins);
11840 link_bblock (cfg, cfg->cbb, end_bblock);
11841 start_new_bblock = 1;
11843 case CEE_ENDFINALLY:
11844 /* mono_save_seq_point_info () depends on this */
11845 if (sp != stack_start)
11846 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11847 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11848 MONO_ADD_INS (cfg->cbb, ins);
11850 start_new_bblock = 1;
11853 * Control will leave the method so empty the stack, otherwise
11854 * the next basic block will start with a nonempty stack.
11856 while (sp != stack_start) {
11861 case CEE_LEAVE_S: {
11864 if (*ip == CEE_LEAVE) {
11866 target = ip + 5 + (gint32)read32(ip + 1);
11869 target = ip + 2 + (signed char)(ip [1]);
11872 /* empty the stack */
11873 while (sp != stack_start) {
11878 * If this leave statement is in a catch block, check for a
11879 * pending exception, and rethrow it if necessary.
11880 * We avoid doing this in runtime invoke wrappers, since those are called
11881 * by native code which excepts the wrapper to catch all exceptions.
11883 for (i = 0; i < header->num_clauses; ++i) {
11884 MonoExceptionClause *clause = &header->clauses [i];
11887 * Use <= in the final comparison to handle clauses with multiple
11888 * leave statements, like in bug #78024.
11889 * The ordering of the exception clauses guarantees that we find the
11890 * innermost clause.
11892 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11894 MonoBasicBlock *dont_throw;
11899 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11902 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11904 NEW_BBLOCK (cfg, dont_throw);
11907 * Currently, we always rethrow the abort exception, despite the
11908 * fact that this is not correct. See thread6.cs for an example.
11909 * But propagating the abort exception is more important than
11910 * getting the sematics right.
11912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11913 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11914 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11916 MONO_START_BB (cfg, dont_throw);
11920 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11922 MonoExceptionClause *clause;
11924 for (tmp = handlers; tmp; tmp = tmp->next) {
11925 clause = tmp->data;
11926 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11928 link_bblock (cfg, cfg->cbb, tblock);
11929 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11930 ins->inst_target_bb = tblock;
11931 ins->inst_eh_block = clause;
11932 MONO_ADD_INS (cfg->cbb, ins);
11933 cfg->cbb->has_call_handler = 1;
11934 if (COMPILE_LLVM (cfg)) {
11935 MonoBasicBlock *target_bb;
11938 * Link the finally bblock with the target, since it will
11939 * conceptually branch there.
11940 * FIXME: Have to link the bblock containing the endfinally.
11942 GET_BBLOCK (cfg, target_bb, target);
11943 link_bblock (cfg, tblock, target_bb);
11946 g_list_free (handlers);
11949 MONO_INST_NEW (cfg, ins, OP_BR);
11950 MONO_ADD_INS (cfg->cbb, ins);
11951 GET_BBLOCK (cfg, tblock, target);
11952 link_bblock (cfg, cfg->cbb, tblock);
11953 ins->inst_target_bb = tblock;
11954 start_new_bblock = 1;
11956 if (*ip == CEE_LEAVE)
11965 * Mono specific opcodes
11967 case MONO_CUSTOM_PREFIX: {
11969 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11973 case CEE_MONO_ICALL: {
11975 MonoJitICallInfo *info;
11977 token = read32 (ip + 2);
11978 func = mono_method_get_wrapper_data (method, token);
11979 info = mono_find_jit_icall_by_addr (func);
11981 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11984 CHECK_STACK (info->sig->param_count);
11985 sp -= info->sig->param_count;
11987 ins = mono_emit_jit_icall (cfg, info->func, sp);
11988 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11992 inline_costs += 10 * num_calls++;
11996 case CEE_MONO_LDPTR_CARD_TABLE: {
11998 gpointer card_mask;
11999 CHECK_STACK_OVF (1);
12001 if (cfg->compile_aot)
12002 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12004 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12008 inline_costs += 10 * num_calls++;
12011 case CEE_MONO_LDPTR_NURSERY_START: {
12014 CHECK_STACK_OVF (1);
12016 if (cfg->compile_aot)
12017 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12019 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12023 inline_costs += 10 * num_calls++;
12026 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12027 CHECK_STACK_OVF (1);
12029 if (cfg->compile_aot)
12030 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12032 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12036 inline_costs += 10 * num_calls++;
12039 case CEE_MONO_LDPTR: {
12042 CHECK_STACK_OVF (1);
12044 token = read32 (ip + 2);
12046 ptr = mono_method_get_wrapper_data (method, token);
12047 EMIT_NEW_PCONST (cfg, ins, ptr);
12050 inline_costs += 10 * num_calls++;
12051 /* Can't embed random pointers into AOT code */
12055 case CEE_MONO_JIT_ICALL_ADDR: {
12056 MonoJitICallInfo *callinfo;
12059 CHECK_STACK_OVF (1);
12061 token = read32 (ip + 2);
12063 ptr = mono_method_get_wrapper_data (method, token);
12064 callinfo = mono_find_jit_icall_by_addr (ptr);
12065 g_assert (callinfo);
12066 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12069 inline_costs += 10 * num_calls++;
12072 case CEE_MONO_ICALL_ADDR: {
12073 MonoMethod *cmethod;
12076 CHECK_STACK_OVF (1);
12078 token = read32 (ip + 2);
12080 cmethod = mono_method_get_wrapper_data (method, token);
12082 if (cfg->compile_aot) {
12083 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12085 ptr = mono_lookup_internal_call (cmethod);
12087 EMIT_NEW_PCONST (cfg, ins, ptr);
12093 case CEE_MONO_VTADDR: {
12094 MonoInst *src_var, *src;
12100 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12101 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12106 case CEE_MONO_NEWOBJ: {
12107 MonoInst *iargs [2];
12109 CHECK_STACK_OVF (1);
12111 token = read32 (ip + 2);
12112 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12113 mono_class_init (klass);
12114 NEW_DOMAINCONST (cfg, iargs [0]);
12115 MONO_ADD_INS (cfg->cbb, iargs [0]);
12116 NEW_CLASSCONST (cfg, iargs [1], klass);
12117 MONO_ADD_INS (cfg->cbb, iargs [1]);
12118 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12120 inline_costs += 10 * num_calls++;
12123 case CEE_MONO_OBJADDR:
12126 MONO_INST_NEW (cfg, ins, OP_MOVE);
12127 ins->dreg = alloc_ireg_mp (cfg);
12128 ins->sreg1 = sp [0]->dreg;
12129 ins->type = STACK_MP;
12130 MONO_ADD_INS (cfg->cbb, ins);
12134 case CEE_MONO_LDNATIVEOBJ:
12136 * Similar to LDOBJ, but instead load the unmanaged
12137 * representation of the vtype to the stack.
12142 token = read32 (ip + 2);
12143 klass = mono_method_get_wrapper_data (method, token);
12144 g_assert (klass->valuetype);
12145 mono_class_init (klass);
12148 MonoInst *src, *dest, *temp;
12151 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12152 temp->backend.is_pinvoke = 1;
12153 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12154 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12156 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12157 dest->type = STACK_VTYPE;
12158 dest->klass = klass;
12164 case CEE_MONO_RETOBJ: {
12166 * Same as RET, but return the native representation of a vtype
12169 g_assert (cfg->ret);
12170 g_assert (mono_method_signature (method)->pinvoke);
12175 token = read32 (ip + 2);
12176 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12178 if (!cfg->vret_addr) {
12179 g_assert (cfg->ret_var_is_local);
12181 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12183 EMIT_NEW_RETLOADA (cfg, ins);
12185 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12187 if (sp != stack_start)
12190 MONO_INST_NEW (cfg, ins, OP_BR);
12191 ins->inst_target_bb = end_bblock;
12192 MONO_ADD_INS (cfg->cbb, ins);
12193 link_bblock (cfg, cfg->cbb, end_bblock);
12194 start_new_bblock = 1;
12198 case CEE_MONO_CISINST:
12199 case CEE_MONO_CCASTCLASS: {
12204 token = read32 (ip + 2);
12205 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12206 if (ip [1] == CEE_MONO_CISINST)
12207 ins = handle_cisinst (cfg, klass, sp [0]);
12209 ins = handle_ccastclass (cfg, klass, sp [0]);
12214 case CEE_MONO_SAVE_LMF:
12215 case CEE_MONO_RESTORE_LMF:
12216 #ifdef MONO_ARCH_HAVE_LMF_OPS
12217 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12218 MONO_ADD_INS (cfg->cbb, ins);
12219 cfg->need_lmf_area = TRUE;
12223 case CEE_MONO_CLASSCONST:
12224 CHECK_STACK_OVF (1);
12226 token = read32 (ip + 2);
12227 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12230 inline_costs += 10 * num_calls++;
12232 case CEE_MONO_NOT_TAKEN:
12233 cfg->cbb->out_of_line = TRUE;
12236 case CEE_MONO_TLS: {
12239 CHECK_STACK_OVF (1);
12241 key = (gint32)read32 (ip + 2);
12242 g_assert (key < TLS_KEY_NUM);
12244 ins = mono_create_tls_get (cfg, key);
12246 if (cfg->compile_aot) {
12248 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12249 ins->dreg = alloc_preg (cfg);
12250 ins->type = STACK_PTR;
12252 g_assert_not_reached ();
12255 ins->type = STACK_PTR;
12256 MONO_ADD_INS (cfg->cbb, ins);
12261 case CEE_MONO_DYN_CALL: {
12262 MonoCallInst *call;
12264 /* It would be easier to call a trampoline, but that would put an
12265 * extra frame on the stack, confusing exception handling. So
12266 * implement it inline using an opcode for now.
12269 if (!cfg->dyn_call_var) {
12270 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12271 /* prevent it from being register allocated */
12272 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12275 /* Has to use a call inst since it local regalloc expects it */
12276 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12277 ins = (MonoInst*)call;
12279 ins->sreg1 = sp [0]->dreg;
12280 ins->sreg2 = sp [1]->dreg;
12281 MONO_ADD_INS (cfg->cbb, ins);
12283 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12286 inline_costs += 10 * num_calls++;
12290 case CEE_MONO_MEMORY_BARRIER: {
12292 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12296 case CEE_MONO_JIT_ATTACH: {
12297 MonoInst *args [16], *domain_ins;
12298 MonoInst *ad_ins, *jit_tls_ins;
12299 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12301 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12303 EMIT_NEW_PCONST (cfg, ins, NULL);
12304 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12306 ad_ins = mono_get_domain_intrinsic (cfg);
12307 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12309 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12310 NEW_BBLOCK (cfg, next_bb);
12311 NEW_BBLOCK (cfg, call_bb);
12313 if (cfg->compile_aot) {
12314 /* AOT code is only used in the root domain */
12315 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12317 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12319 MONO_ADD_INS (cfg->cbb, ad_ins);
12320 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12323 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12328 MONO_START_BB (cfg, call_bb);
12331 if (cfg->compile_aot) {
12332 /* AOT code is only used in the root domain */
12333 EMIT_NEW_PCONST (cfg, args [0], NULL);
12335 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12337 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12338 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12341 MONO_START_BB (cfg, next_bb);
12345 case CEE_MONO_JIT_DETACH: {
12346 MonoInst *args [16];
12348 /* Restore the original domain */
12349 dreg = alloc_ireg (cfg);
12350 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12351 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12356 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12362 case CEE_PREFIX1: {
12365 case CEE_ARGLIST: {
12366 /* somewhat similar to LDTOKEN */
12367 MonoInst *addr, *vtvar;
12368 CHECK_STACK_OVF (1);
12369 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12371 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12372 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12374 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12375 ins->type = STACK_VTYPE;
12376 ins->klass = mono_defaults.argumenthandle_class;
12386 MonoInst *cmp, *arg1, *arg2;
12394 * The following transforms:
12395 * CEE_CEQ into OP_CEQ
12396 * CEE_CGT into OP_CGT
12397 * CEE_CGT_UN into OP_CGT_UN
12398 * CEE_CLT into OP_CLT
12399 * CEE_CLT_UN into OP_CLT_UN
12401 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12403 MONO_INST_NEW (cfg, ins, cmp->opcode);
12404 cmp->sreg1 = arg1->dreg;
12405 cmp->sreg2 = arg2->dreg;
12406 type_from_op (cfg, cmp, arg1, arg2);
12408 add_widen_op (cfg, cmp, &arg1, &arg2);
12409 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12410 cmp->opcode = OP_LCOMPARE;
12411 else if (arg1->type == STACK_R4)
12412 cmp->opcode = OP_RCOMPARE;
12413 else if (arg1->type == STACK_R8)
12414 cmp->opcode = OP_FCOMPARE;
12416 cmp->opcode = OP_ICOMPARE;
12417 MONO_ADD_INS (cfg->cbb, cmp);
12418 ins->type = STACK_I4;
12419 ins->dreg = alloc_dreg (cfg, ins->type);
12420 type_from_op (cfg, ins, arg1, arg2);
12422 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12424 * The backends expect the fceq opcodes to do the
12427 ins->sreg1 = cmp->sreg1;
12428 ins->sreg2 = cmp->sreg2;
12431 MONO_ADD_INS (cfg->cbb, ins);
12437 MonoInst *argconst;
12438 MonoMethod *cil_method;
12440 CHECK_STACK_OVF (1);
12442 n = read32 (ip + 2);
12443 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12444 if (!cmethod || mono_loader_get_last_error ())
12446 mono_class_init (cmethod->klass);
12448 mono_save_token_info (cfg, image, n, cmethod);
12450 context_used = mini_method_check_context_used (cfg, cmethod);
12452 cil_method = cmethod;
12453 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12454 METHOD_ACCESS_FAILURE (method, cil_method);
12456 if (mono_security_core_clr_enabled ())
12457 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12460 * Optimize the common case of ldftn+delegate creation
12462 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12463 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12464 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12465 MonoInst *target_ins, *handle_ins;
12466 MonoMethod *invoke;
12467 int invoke_context_used;
12469 invoke = mono_get_delegate_invoke (ctor_method->klass);
12470 if (!invoke || !mono_method_signature (invoke))
12473 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12475 target_ins = sp [-1];
12477 if (mono_security_core_clr_enabled ())
12478 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12480 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12481 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12482 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12483 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12484 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12488 /* FIXME: SGEN support */
12489 if (invoke_context_used == 0) {
12491 if (cfg->verbose_level > 3)
12492 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12493 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12496 CHECK_CFG_EXCEPTION;
12506 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12507 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12511 inline_costs += 10 * num_calls++;
12514 case CEE_LDVIRTFTN: {
12515 MonoInst *args [2];
12519 n = read32 (ip + 2);
12520 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12521 if (!cmethod || mono_loader_get_last_error ())
12523 mono_class_init (cmethod->klass);
12525 context_used = mini_method_check_context_used (cfg, cmethod);
12527 if (mono_security_core_clr_enabled ())
12528 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12531 * Optimize the common case of ldvirtftn+delegate creation
12533 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12534 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12535 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12536 MonoInst *target_ins, *handle_ins;
12537 MonoMethod *invoke;
12538 int invoke_context_used;
12539 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12541 invoke = mono_get_delegate_invoke (ctor_method->klass);
12542 if (!invoke || !mono_method_signature (invoke))
12545 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12547 target_ins = sp [-1];
12549 if (mono_security_core_clr_enabled ())
12550 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12552 /* FIXME: SGEN support */
12553 if (invoke_context_used == 0) {
12555 if (cfg->verbose_level > 3)
12556 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12557 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12560 CHECK_CFG_EXCEPTION;
12573 args [1] = emit_get_rgctx_method (cfg, context_used,
12574 cmethod, MONO_RGCTX_INFO_METHOD);
12577 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12579 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12582 inline_costs += 10 * num_calls++;
12586 CHECK_STACK_OVF (1);
12588 n = read16 (ip + 2);
12590 EMIT_NEW_ARGLOAD (cfg, ins, n);
12595 CHECK_STACK_OVF (1);
12597 n = read16 (ip + 2);
12599 NEW_ARGLOADA (cfg, ins, n);
12600 MONO_ADD_INS (cfg->cbb, ins);
12608 n = read16 (ip + 2);
12610 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12612 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12616 CHECK_STACK_OVF (1);
12618 n = read16 (ip + 2);
12620 EMIT_NEW_LOCLOAD (cfg, ins, n);
12625 unsigned char *tmp_ip;
12626 CHECK_STACK_OVF (1);
12628 n = read16 (ip + 2);
12631 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12637 EMIT_NEW_LOCLOADA (cfg, ins, n);
12646 n = read16 (ip + 2);
12648 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12650 emit_stloc_ir (cfg, sp, header, n);
12657 if (sp != stack_start)
12659 if (cfg->method != method)
12661 * Inlining this into a loop in a parent could lead to
12662 * stack overflows which is different behavior than the
12663 * non-inlined case, thus disable inlining in this case.
12665 INLINE_FAILURE("localloc");
12667 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12668 ins->dreg = alloc_preg (cfg);
12669 ins->sreg1 = sp [0]->dreg;
12670 ins->type = STACK_PTR;
12671 MONO_ADD_INS (cfg->cbb, ins);
12673 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12675 ins->flags |= MONO_INST_INIT;
12680 case CEE_ENDFILTER: {
12681 MonoExceptionClause *clause, *nearest;
12686 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12688 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12689 ins->sreg1 = (*sp)->dreg;
12690 MONO_ADD_INS (cfg->cbb, ins);
12691 start_new_bblock = 1;
12695 for (cc = 0; cc < header->num_clauses; ++cc) {
12696 clause = &header->clauses [cc];
12697 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12698 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12699 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12702 g_assert (nearest);
12703 if ((ip - header->code) != nearest->handler_offset)
12708 case CEE_UNALIGNED_:
12709 ins_flag |= MONO_INST_UNALIGNED;
12710 /* FIXME: record alignment? we can assume 1 for now */
12714 case CEE_VOLATILE_:
12715 ins_flag |= MONO_INST_VOLATILE;
12719 ins_flag |= MONO_INST_TAILCALL;
12720 cfg->flags |= MONO_CFG_HAS_TAIL;
12721 /* Can't inline tail calls at this time */
12722 inline_costs += 100000;
12729 token = read32 (ip + 2);
12730 klass = mini_get_class (method, token, generic_context);
12731 CHECK_TYPELOAD (klass);
12732 if (generic_class_is_reference_type (cfg, klass))
12733 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12735 mini_emit_initobj (cfg, *sp, NULL, klass);
12739 case CEE_CONSTRAINED_:
12741 token = read32 (ip + 2);
12742 constrained_class = mini_get_class (method, token, generic_context);
12743 CHECK_TYPELOAD (constrained_class);
12747 case CEE_INITBLK: {
12748 MonoInst *iargs [3];
12752 /* Skip optimized paths for volatile operations. */
12753 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12754 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12755 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12756 /* emit_memset only works when val == 0 */
12757 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12760 iargs [0] = sp [0];
12761 iargs [1] = sp [1];
12762 iargs [2] = sp [2];
12763 if (ip [1] == CEE_CPBLK) {
12765 * FIXME: It's unclear whether we should be emitting both the acquire
12766 * and release barriers for cpblk. It is technically both a load and
12767 * store operation, so it seems like that's the sensible thing to do.
12769 * FIXME: We emit full barriers on both sides of the operation for
12770 * simplicity. We should have a separate atomic memcpy method instead.
12772 MonoMethod *memcpy_method = get_memcpy_method ();
12774 if (ins_flag & MONO_INST_VOLATILE)
12775 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12777 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12778 call->flags |= ins_flag;
12780 if (ins_flag & MONO_INST_VOLATILE)
12781 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12783 MonoMethod *memset_method = get_memset_method ();
12784 if (ins_flag & MONO_INST_VOLATILE) {
12785 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12786 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12788 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12789 call->flags |= ins_flag;
12800 ins_flag |= MONO_INST_NOTYPECHECK;
12802 ins_flag |= MONO_INST_NORANGECHECK;
12803 /* we ignore the no-nullcheck for now since we
12804 * really do it explicitly only when doing callvirt->call
12808 case CEE_RETHROW: {
12810 int handler_offset = -1;
12812 for (i = 0; i < header->num_clauses; ++i) {
12813 MonoExceptionClause *clause = &header->clauses [i];
12814 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12815 handler_offset = clause->handler_offset;
12820 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12822 if (handler_offset == -1)
12825 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12826 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12827 ins->sreg1 = load->dreg;
12828 MONO_ADD_INS (cfg->cbb, ins);
12830 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12831 MONO_ADD_INS (cfg->cbb, ins);
12834 link_bblock (cfg, cfg->cbb, end_bblock);
12835 start_new_bblock = 1;
12843 CHECK_STACK_OVF (1);
12845 token = read32 (ip + 2);
12846 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12847 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12850 val = mono_type_size (type, &ialign);
12852 MonoClass *klass = mini_get_class (method, token, generic_context);
12853 CHECK_TYPELOAD (klass);
12855 val = mono_type_size (&klass->byval_arg, &ialign);
12857 if (mini_is_gsharedvt_klass (cfg, klass))
12858 GSHAREDVT_FAILURE (*ip);
12860 EMIT_NEW_ICONST (cfg, ins, val);
12865 case CEE_REFANYTYPE: {
12866 MonoInst *src_var, *src;
12868 GSHAREDVT_FAILURE (*ip);
12874 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12876 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12877 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12878 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12883 case CEE_READONLY_:
12896 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12906 g_warning ("opcode 0x%02x not handled", *ip);
12910 if (start_new_bblock != 1)
12913 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12914 if (cfg->cbb->next_bb) {
12915 /* This could already be set because of inlining, #693905 */
12916 MonoBasicBlock *bb = cfg->cbb;
12918 while (bb->next_bb)
12920 bb->next_bb = end_bblock;
12922 cfg->cbb->next_bb = end_bblock;
12925 if (cfg->method == method && cfg->domainvar) {
12927 MonoInst *get_domain;
12929 cfg->cbb = init_localsbb;
12931 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12932 MONO_ADD_INS (cfg->cbb, get_domain);
12934 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12936 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12937 MONO_ADD_INS (cfg->cbb, store);
12940 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12941 if (cfg->compile_aot)
12942 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12943 mono_get_got_var (cfg);
12946 if (cfg->method == method && cfg->got_var)
12947 mono_emit_load_got_addr (cfg);
12949 if (init_localsbb) {
12950 cfg->cbb = init_localsbb;
12952 for (i = 0; i < header->num_locals; ++i) {
12953 emit_init_local (cfg, i, header->locals [i], init_locals);
12957 if (cfg->init_ref_vars && cfg->method == method) {
12958 /* Emit initialization for ref vars */
12959 // FIXME: Avoid duplication initialization for IL locals.
12960 for (i = 0; i < cfg->num_varinfo; ++i) {
12961 MonoInst *ins = cfg->varinfo [i];
12963 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12964 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12968 if (cfg->lmf_var && cfg->method == method) {
12969 cfg->cbb = init_localsbb;
12970 emit_push_lmf (cfg);
12973 cfg->cbb = init_localsbb;
12974 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12977 MonoBasicBlock *bb;
12980 * Make seq points at backward branch targets interruptable.
12982 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12983 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12984 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12987 /* Add a sequence point for method entry/exit events */
12988 if (seq_points && cfg->gen_sdb_seq_points) {
12989 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12990 MONO_ADD_INS (init_localsbb, ins);
12991 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12992 MONO_ADD_INS (cfg->bb_exit, ins);
12996 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12997 * the code they refer to was dead (#11880).
12999 if (sym_seq_points) {
13000 for (i = 0; i < header->code_size; ++i) {
13001 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13004 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13005 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13012 if (cfg->method == method) {
13013 MonoBasicBlock *bb;
13014 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13015 bb->region = mono_find_block_region (cfg, bb->real_offset);
13017 mono_create_spvar_for_region (cfg, bb->region);
13018 if (cfg->verbose_level > 2)
13019 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13023 if (inline_costs < 0) {
13026 /* Method is too large */
13027 mname = mono_method_full_name (method, TRUE);
13028 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13029 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13033 if ((cfg->verbose_level > 2) && (cfg->method == method))
13034 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13039 g_assert (!mono_error_ok (&cfg->error));
13043 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13047 set_exception_type_from_invalid_il (cfg, method, ip);
13051 g_slist_free (class_inits);
13052 mono_basic_block_free (original_bb);
13053 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13054 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13055 if (cfg->exception_type)
13058 return inline_costs;
13062 store_membase_reg_to_store_membase_imm (int opcode)
13065 case OP_STORE_MEMBASE_REG:
13066 return OP_STORE_MEMBASE_IMM;
13067 case OP_STOREI1_MEMBASE_REG:
13068 return OP_STOREI1_MEMBASE_IMM;
13069 case OP_STOREI2_MEMBASE_REG:
13070 return OP_STOREI2_MEMBASE_IMM;
13071 case OP_STOREI4_MEMBASE_REG:
13072 return OP_STOREI4_MEMBASE_IMM;
13073 case OP_STOREI8_MEMBASE_REG:
13074 return OP_STOREI8_MEMBASE_IMM;
13076 g_assert_not_reached ();
13083 mono_op_to_op_imm (int opcode)
13087 return OP_IADD_IMM;
13089 return OP_ISUB_IMM;
13091 return OP_IDIV_IMM;
13093 return OP_IDIV_UN_IMM;
13095 return OP_IREM_IMM;
13097 return OP_IREM_UN_IMM;
13099 return OP_IMUL_IMM;
13101 return OP_IAND_IMM;
13105 return OP_IXOR_IMM;
13107 return OP_ISHL_IMM;
13109 return OP_ISHR_IMM;
13111 return OP_ISHR_UN_IMM;
13114 return OP_LADD_IMM;
13116 return OP_LSUB_IMM;
13118 return OP_LAND_IMM;
13122 return OP_LXOR_IMM;
13124 return OP_LSHL_IMM;
13126 return OP_LSHR_IMM;
13128 return OP_LSHR_UN_IMM;
13129 #if SIZEOF_REGISTER == 8
13131 return OP_LREM_IMM;
13135 return OP_COMPARE_IMM;
13137 return OP_ICOMPARE_IMM;
13139 return OP_LCOMPARE_IMM;
13141 case OP_STORE_MEMBASE_REG:
13142 return OP_STORE_MEMBASE_IMM;
13143 case OP_STOREI1_MEMBASE_REG:
13144 return OP_STOREI1_MEMBASE_IMM;
13145 case OP_STOREI2_MEMBASE_REG:
13146 return OP_STOREI2_MEMBASE_IMM;
13147 case OP_STOREI4_MEMBASE_REG:
13148 return OP_STOREI4_MEMBASE_IMM;
13150 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13152 return OP_X86_PUSH_IMM;
13153 case OP_X86_COMPARE_MEMBASE_REG:
13154 return OP_X86_COMPARE_MEMBASE_IMM;
13156 #if defined(TARGET_AMD64)
13157 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13158 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13160 case OP_VOIDCALL_REG:
13161 return OP_VOIDCALL;
13169 return OP_LOCALLOC_IMM;
13176 ldind_to_load_membase (int opcode)
13180 return OP_LOADI1_MEMBASE;
13182 return OP_LOADU1_MEMBASE;
13184 return OP_LOADI2_MEMBASE;
13186 return OP_LOADU2_MEMBASE;
13188 return OP_LOADI4_MEMBASE;
13190 return OP_LOADU4_MEMBASE;
13192 return OP_LOAD_MEMBASE;
13193 case CEE_LDIND_REF:
13194 return OP_LOAD_MEMBASE;
13196 return OP_LOADI8_MEMBASE;
13198 return OP_LOADR4_MEMBASE;
13200 return OP_LOADR8_MEMBASE;
13202 g_assert_not_reached ();
13209 stind_to_store_membase (int opcode)
13213 return OP_STOREI1_MEMBASE_REG;
13215 return OP_STOREI2_MEMBASE_REG;
13217 return OP_STOREI4_MEMBASE_REG;
13219 case CEE_STIND_REF:
13220 return OP_STORE_MEMBASE_REG;
13222 return OP_STOREI8_MEMBASE_REG;
13224 return OP_STORER4_MEMBASE_REG;
13226 return OP_STORER8_MEMBASE_REG;
13228 g_assert_not_reached ();
13235 mono_load_membase_to_load_mem (int opcode)
13237 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13238 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13240 case OP_LOAD_MEMBASE:
13241 return OP_LOAD_MEM;
13242 case OP_LOADU1_MEMBASE:
13243 return OP_LOADU1_MEM;
13244 case OP_LOADU2_MEMBASE:
13245 return OP_LOADU2_MEM;
13246 case OP_LOADI4_MEMBASE:
13247 return OP_LOADI4_MEM;
13248 case OP_LOADU4_MEMBASE:
13249 return OP_LOADU4_MEM;
13250 #if SIZEOF_REGISTER == 8
13251 case OP_LOADI8_MEMBASE:
13252 return OP_LOADI8_MEM;
13261 op_to_op_dest_membase (int store_opcode, int opcode)
13263 #if defined(TARGET_X86)
13264 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13269 return OP_X86_ADD_MEMBASE_REG;
13271 return OP_X86_SUB_MEMBASE_REG;
13273 return OP_X86_AND_MEMBASE_REG;
13275 return OP_X86_OR_MEMBASE_REG;
13277 return OP_X86_XOR_MEMBASE_REG;
13280 return OP_X86_ADD_MEMBASE_IMM;
13283 return OP_X86_SUB_MEMBASE_IMM;
13286 return OP_X86_AND_MEMBASE_IMM;
13289 return OP_X86_OR_MEMBASE_IMM;
13292 return OP_X86_XOR_MEMBASE_IMM;
13298 #if defined(TARGET_AMD64)
13299 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13304 return OP_X86_ADD_MEMBASE_REG;
13306 return OP_X86_SUB_MEMBASE_REG;
13308 return OP_X86_AND_MEMBASE_REG;
13310 return OP_X86_OR_MEMBASE_REG;
13312 return OP_X86_XOR_MEMBASE_REG;
13314 return OP_X86_ADD_MEMBASE_IMM;
13316 return OP_X86_SUB_MEMBASE_IMM;
13318 return OP_X86_AND_MEMBASE_IMM;
13320 return OP_X86_OR_MEMBASE_IMM;
13322 return OP_X86_XOR_MEMBASE_IMM;
13324 return OP_AMD64_ADD_MEMBASE_REG;
13326 return OP_AMD64_SUB_MEMBASE_REG;
13328 return OP_AMD64_AND_MEMBASE_REG;
13330 return OP_AMD64_OR_MEMBASE_REG;
13332 return OP_AMD64_XOR_MEMBASE_REG;
13335 return OP_AMD64_ADD_MEMBASE_IMM;
13338 return OP_AMD64_SUB_MEMBASE_IMM;
13341 return OP_AMD64_AND_MEMBASE_IMM;
13344 return OP_AMD64_OR_MEMBASE_IMM;
13347 return OP_AMD64_XOR_MEMBASE_IMM;
13357 op_to_op_store_membase (int store_opcode, int opcode)
13359 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13362 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13363 return OP_X86_SETEQ_MEMBASE;
13365 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13366 return OP_X86_SETNE_MEMBASE;
13374 op_to_op_src1_membase (int load_opcode, int opcode)
13377 /* FIXME: This has sign extension issues */
13379 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13380 return OP_X86_COMPARE_MEMBASE8_IMM;
13383 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13388 return OP_X86_PUSH_MEMBASE;
13389 case OP_COMPARE_IMM:
13390 case OP_ICOMPARE_IMM:
13391 return OP_X86_COMPARE_MEMBASE_IMM;
13394 return OP_X86_COMPARE_MEMBASE_REG;
13398 #ifdef TARGET_AMD64
13399 /* FIXME: This has sign extension issues */
13401 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13402 return OP_X86_COMPARE_MEMBASE8_IMM;
13407 #ifdef __mono_ilp32__
13408 if (load_opcode == OP_LOADI8_MEMBASE)
13410 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13412 return OP_X86_PUSH_MEMBASE;
13414 /* FIXME: This only works for 32 bit immediates
13415 case OP_COMPARE_IMM:
13416 case OP_LCOMPARE_IMM:
13417 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13418 return OP_AMD64_COMPARE_MEMBASE_IMM;
13420 case OP_ICOMPARE_IMM:
13421 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13422 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13426 #ifdef __mono_ilp32__
13427 if (load_opcode == OP_LOAD_MEMBASE)
13428 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13429 if (load_opcode == OP_LOADI8_MEMBASE)
13431 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13433 return OP_AMD64_COMPARE_MEMBASE_REG;
13436 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13437 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13446 op_to_op_src2_membase (int load_opcode, int opcode)
13449 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13455 return OP_X86_COMPARE_REG_MEMBASE;
13457 return OP_X86_ADD_REG_MEMBASE;
13459 return OP_X86_SUB_REG_MEMBASE;
13461 return OP_X86_AND_REG_MEMBASE;
13463 return OP_X86_OR_REG_MEMBASE;
13465 return OP_X86_XOR_REG_MEMBASE;
13469 #ifdef TARGET_AMD64
13470 #ifdef __mono_ilp32__
13471 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13473 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13477 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13479 return OP_X86_ADD_REG_MEMBASE;
13481 return OP_X86_SUB_REG_MEMBASE;
13483 return OP_X86_AND_REG_MEMBASE;
13485 return OP_X86_OR_REG_MEMBASE;
13487 return OP_X86_XOR_REG_MEMBASE;
13489 #ifdef __mono_ilp32__
13490 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13492 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13497 return OP_AMD64_COMPARE_REG_MEMBASE;
13499 return OP_AMD64_ADD_REG_MEMBASE;
13501 return OP_AMD64_SUB_REG_MEMBASE;
13503 return OP_AMD64_AND_REG_MEMBASE;
13505 return OP_AMD64_OR_REG_MEMBASE;
13507 return OP_AMD64_XOR_REG_MEMBASE;
13516 mono_op_to_op_imm_noemul (int opcode)
13519 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13525 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13532 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13537 return mono_op_to_op_imm (opcode);
13542 * mono_handle_global_vregs:
13544 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13548 mono_handle_global_vregs (MonoCompile *cfg)
13550 gint32 *vreg_to_bb;
13551 MonoBasicBlock *bb;
13554 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13556 #ifdef MONO_ARCH_SIMD_INTRINSICS
13557 if (cfg->uses_simd_intrinsics)
13558 mono_simd_simplify_indirection (cfg);
13561 /* Find local vregs used in more than one bb */
13562 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13563 MonoInst *ins = bb->code;
13564 int block_num = bb->block_num;
13566 if (cfg->verbose_level > 2)
13567 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13570 for (; ins; ins = ins->next) {
13571 const char *spec = INS_INFO (ins->opcode);
13572 int regtype = 0, regindex;
13575 if (G_UNLIKELY (cfg->verbose_level > 2))
13576 mono_print_ins (ins);
13578 g_assert (ins->opcode >= MONO_CEE_LAST);
13580 for (regindex = 0; regindex < 4; regindex ++) {
13583 if (regindex == 0) {
13584 regtype = spec [MONO_INST_DEST];
13585 if (regtype == ' ')
13588 } else if (regindex == 1) {
13589 regtype = spec [MONO_INST_SRC1];
13590 if (regtype == ' ')
13593 } else if (regindex == 2) {
13594 regtype = spec [MONO_INST_SRC2];
13595 if (regtype == ' ')
13598 } else if (regindex == 3) {
13599 regtype = spec [MONO_INST_SRC3];
13600 if (regtype == ' ')
13605 #if SIZEOF_REGISTER == 4
13606 /* In the LLVM case, the long opcodes are not decomposed */
13607 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13609 * Since some instructions reference the original long vreg,
13610 * and some reference the two component vregs, it is quite hard
13611 * to determine when it needs to be global. So be conservative.
13613 if (!get_vreg_to_inst (cfg, vreg)) {
13614 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13616 if (cfg->verbose_level > 2)
13617 printf ("LONG VREG R%d made global.\n", vreg);
13621 * Make the component vregs volatile since the optimizations can
13622 * get confused otherwise.
13624 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13625 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13629 g_assert (vreg != -1);
13631 prev_bb = vreg_to_bb [vreg];
13632 if (prev_bb == 0) {
13633 /* 0 is a valid block num */
13634 vreg_to_bb [vreg] = block_num + 1;
13635 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13636 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13639 if (!get_vreg_to_inst (cfg, vreg)) {
13640 if (G_UNLIKELY (cfg->verbose_level > 2))
13641 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13645 if (vreg_is_ref (cfg, vreg))
13646 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13648 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13651 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13654 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13657 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13660 g_assert_not_reached ();
13664 /* Flag as having been used in more than one bb */
13665 vreg_to_bb [vreg] = -1;
13671 /* If a variable is used in only one bblock, convert it into a local vreg */
13672 for (i = 0; i < cfg->num_varinfo; i++) {
13673 MonoInst *var = cfg->varinfo [i];
13674 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13676 switch (var->type) {
13682 #if SIZEOF_REGISTER == 8
13685 #if !defined(TARGET_X86)
13686 /* Enabling this screws up the fp stack on x86 */
13689 if (mono_arch_is_soft_float ())
13692 /* Arguments are implicitly global */
13693 /* Putting R4 vars into registers doesn't work currently */
13694 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13695 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13697 * Make that the variable's liveness interval doesn't contain a call, since
13698 * that would cause the lvreg to be spilled, making the whole optimization
13701 /* This is too slow for JIT compilation */
13703 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13705 int def_index, call_index, ins_index;
13706 gboolean spilled = FALSE;
13711 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13712 const char *spec = INS_INFO (ins->opcode);
13714 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13715 def_index = ins_index;
13717 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13718 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13719 if (call_index > def_index) {
13725 if (MONO_IS_CALL (ins))
13726 call_index = ins_index;
13736 if (G_UNLIKELY (cfg->verbose_level > 2))
13737 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13738 var->flags |= MONO_INST_IS_DEAD;
13739 cfg->vreg_to_inst [var->dreg] = NULL;
13746 * Compress the varinfo and vars tables so the liveness computation is faster and
13747 * takes up less space.
13750 for (i = 0; i < cfg->num_varinfo; ++i) {
13751 MonoInst *var = cfg->varinfo [i];
13752 if (pos < i && cfg->locals_start == i)
13753 cfg->locals_start = pos;
13754 if (!(var->flags & MONO_INST_IS_DEAD)) {
13756 cfg->varinfo [pos] = cfg->varinfo [i];
13757 cfg->varinfo [pos]->inst_c0 = pos;
13758 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13759 cfg->vars [pos].idx = pos;
13760 #if SIZEOF_REGISTER == 4
13761 if (cfg->varinfo [pos]->type == STACK_I8) {
13762 /* Modify the two component vars too */
13765 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13766 var1->inst_c0 = pos;
13767 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13768 var1->inst_c0 = pos;
13775 cfg->num_varinfo = pos;
13776 if (cfg->locals_start > cfg->num_varinfo)
13777 cfg->locals_start = cfg->num_varinfo;
13781 * mono_spill_global_vars:
13783 * Generate spill code for variables which are not allocated to registers,
13784 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13785 * code is generated which could be optimized by the local optimization passes.
13788 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13790 MonoBasicBlock *bb;
13792 int orig_next_vreg;
13793 guint32 *vreg_to_lvreg;
13795 guint32 i, lvregs_len;
13796 gboolean dest_has_lvreg = FALSE;
13797 guint32 stacktypes [128];
13798 MonoInst **live_range_start, **live_range_end;
13799 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13800 int *gsharedvt_vreg_to_idx = NULL;
13802 *need_local_opts = FALSE;
13804 memset (spec2, 0, sizeof (spec2));
13806 /* FIXME: Move this function to mini.c */
13807 stacktypes ['i'] = STACK_PTR;
13808 stacktypes ['l'] = STACK_I8;
13809 stacktypes ['f'] = STACK_R8;
13810 #ifdef MONO_ARCH_SIMD_INTRINSICS
13811 stacktypes ['x'] = STACK_VTYPE;
13814 #if SIZEOF_REGISTER == 4
13815 /* Create MonoInsts for longs */
13816 for (i = 0; i < cfg->num_varinfo; i++) {
13817 MonoInst *ins = cfg->varinfo [i];
13819 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13820 switch (ins->type) {
13825 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13828 g_assert (ins->opcode == OP_REGOFFSET);
13830 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13832 tree->opcode = OP_REGOFFSET;
13833 tree->inst_basereg = ins->inst_basereg;
13834 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13836 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13838 tree->opcode = OP_REGOFFSET;
13839 tree->inst_basereg = ins->inst_basereg;
13840 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13850 if (cfg->compute_gc_maps) {
13851 /* registers need liveness info even for !non refs */
13852 for (i = 0; i < cfg->num_varinfo; i++) {
13853 MonoInst *ins = cfg->varinfo [i];
13855 if (ins->opcode == OP_REGVAR)
13856 ins->flags |= MONO_INST_GC_TRACK;
13860 if (cfg->gsharedvt) {
13861 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13863 for (i = 0; i < cfg->num_varinfo; ++i) {
13864 MonoInst *ins = cfg->varinfo [i];
13867 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13868 if (i >= cfg->locals_start) {
13870 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13871 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13872 ins->opcode = OP_GSHAREDVT_LOCAL;
13873 ins->inst_imm = idx;
13876 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13877 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13883 /* FIXME: widening and truncation */
13886 * As an optimization, when a variable allocated to the stack is first loaded into
13887 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13888 * the variable again.
13890 orig_next_vreg = cfg->next_vreg;
13891 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13892 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13896 * These arrays contain the first and last instructions accessing a given
13898 * Since we emit bblocks in the same order we process them here, and we
13899 * don't split live ranges, these will precisely describe the live range of
13900 * the variable, i.e. the instruction range where a valid value can be found
13901 * in the variables location.
13902 * The live range is computed using the liveness info computed by the liveness pass.
13903 * We can't use vmv->range, since that is an abstract live range, and we need
13904 * one which is instruction precise.
13905 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13907 /* FIXME: Only do this if debugging info is requested */
13908 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13909 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13910 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13911 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13913 /* Add spill loads/stores */
13914 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13917 if (cfg->verbose_level > 2)
13918 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13920 /* Clear vreg_to_lvreg array */
13921 for (i = 0; i < lvregs_len; i++)
13922 vreg_to_lvreg [lvregs [i]] = 0;
13926 MONO_BB_FOR_EACH_INS (bb, ins) {
13927 const char *spec = INS_INFO (ins->opcode);
13928 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13929 gboolean store, no_lvreg;
13930 int sregs [MONO_MAX_SRC_REGS];
13932 if (G_UNLIKELY (cfg->verbose_level > 2))
13933 mono_print_ins (ins);
13935 if (ins->opcode == OP_NOP)
13939 * We handle LDADDR here as well, since it can only be decomposed
13940 * when variable addresses are known.
13942 if (ins->opcode == OP_LDADDR) {
13943 MonoInst *var = ins->inst_p0;
13945 if (var->opcode == OP_VTARG_ADDR) {
13946 /* Happens on SPARC/S390 where vtypes are passed by reference */
13947 MonoInst *vtaddr = var->inst_left;
13948 if (vtaddr->opcode == OP_REGVAR) {
13949 ins->opcode = OP_MOVE;
13950 ins->sreg1 = vtaddr->dreg;
13952 else if (var->inst_left->opcode == OP_REGOFFSET) {
13953 ins->opcode = OP_LOAD_MEMBASE;
13954 ins->inst_basereg = vtaddr->inst_basereg;
13955 ins->inst_offset = vtaddr->inst_offset;
13958 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13959 /* gsharedvt arg passed by ref */
13960 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13962 ins->opcode = OP_LOAD_MEMBASE;
13963 ins->inst_basereg = var->inst_basereg;
13964 ins->inst_offset = var->inst_offset;
13965 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13966 MonoInst *load, *load2, *load3;
13967 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13968 int reg1, reg2, reg3;
13969 MonoInst *info_var = cfg->gsharedvt_info_var;
13970 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13974 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13977 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13979 g_assert (info_var);
13980 g_assert (locals_var);
13982 /* Mark the instruction used to compute the locals var as used */
13983 cfg->gsharedvt_locals_var_ins = NULL;
13985 /* Load the offset */
13986 if (info_var->opcode == OP_REGOFFSET) {
13987 reg1 = alloc_ireg (cfg);
13988 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13989 } else if (info_var->opcode == OP_REGVAR) {
13991 reg1 = info_var->dreg;
13993 g_assert_not_reached ();
13995 reg2 = alloc_ireg (cfg);
13996 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13997 /* Load the locals area address */
13998 reg3 = alloc_ireg (cfg);
13999 if (locals_var->opcode == OP_REGOFFSET) {
14000 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14001 } else if (locals_var->opcode == OP_REGVAR) {
14002 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14004 g_assert_not_reached ();
14006 /* Compute the address */
14007 ins->opcode = OP_PADD;
14011 mono_bblock_insert_before_ins (bb, ins, load3);
14012 mono_bblock_insert_before_ins (bb, load3, load2);
14014 mono_bblock_insert_before_ins (bb, load2, load);
14016 g_assert (var->opcode == OP_REGOFFSET);
14018 ins->opcode = OP_ADD_IMM;
14019 ins->sreg1 = var->inst_basereg;
14020 ins->inst_imm = var->inst_offset;
14023 *need_local_opts = TRUE;
14024 spec = INS_INFO (ins->opcode);
14027 if (ins->opcode < MONO_CEE_LAST) {
14028 mono_print_ins (ins);
14029 g_assert_not_reached ();
14033 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14037 if (MONO_IS_STORE_MEMBASE (ins)) {
14038 tmp_reg = ins->dreg;
14039 ins->dreg = ins->sreg2;
14040 ins->sreg2 = tmp_reg;
14043 spec2 [MONO_INST_DEST] = ' ';
14044 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14045 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14046 spec2 [MONO_INST_SRC3] = ' ';
14048 } else if (MONO_IS_STORE_MEMINDEX (ins))
14049 g_assert_not_reached ();
14054 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14055 printf ("\t %.3s %d", spec, ins->dreg);
14056 num_sregs = mono_inst_get_src_registers (ins, sregs);
14057 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14058 printf (" %d", sregs [srcindex]);
14065 regtype = spec [MONO_INST_DEST];
14066 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14069 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14070 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14071 MonoInst *store_ins;
14073 MonoInst *def_ins = ins;
14074 int dreg = ins->dreg; /* The original vreg */
14076 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14078 if (var->opcode == OP_REGVAR) {
14079 ins->dreg = var->dreg;
14080 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14082 * Instead of emitting a load+store, use a _membase opcode.
14084 g_assert (var->opcode == OP_REGOFFSET);
14085 if (ins->opcode == OP_MOVE) {
14089 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14090 ins->inst_basereg = var->inst_basereg;
14091 ins->inst_offset = var->inst_offset;
14094 spec = INS_INFO (ins->opcode);
14098 g_assert (var->opcode == OP_REGOFFSET);
14100 prev_dreg = ins->dreg;
14102 /* Invalidate any previous lvreg for this vreg */
14103 vreg_to_lvreg [ins->dreg] = 0;
14107 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14109 store_opcode = OP_STOREI8_MEMBASE_REG;
14112 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14114 #if SIZEOF_REGISTER != 8
14115 if (regtype == 'l') {
14116 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14117 mono_bblock_insert_after_ins (bb, ins, store_ins);
14118 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14119 mono_bblock_insert_after_ins (bb, ins, store_ins);
14120 def_ins = store_ins;
14125 g_assert (store_opcode != OP_STOREV_MEMBASE);
14127 /* Try to fuse the store into the instruction itself */
14128 /* FIXME: Add more instructions */
14129 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14130 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14131 ins->inst_imm = ins->inst_c0;
14132 ins->inst_destbasereg = var->inst_basereg;
14133 ins->inst_offset = var->inst_offset;
14134 spec = INS_INFO (ins->opcode);
14135 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14136 ins->opcode = store_opcode;
14137 ins->inst_destbasereg = var->inst_basereg;
14138 ins->inst_offset = var->inst_offset;
14142 tmp_reg = ins->dreg;
14143 ins->dreg = ins->sreg2;
14144 ins->sreg2 = tmp_reg;
14147 spec2 [MONO_INST_DEST] = ' ';
14148 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14149 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14150 spec2 [MONO_INST_SRC3] = ' ';
14152 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14153 // FIXME: The backends expect the base reg to be in inst_basereg
14154 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14156 ins->inst_basereg = var->inst_basereg;
14157 ins->inst_offset = var->inst_offset;
14158 spec = INS_INFO (ins->opcode);
14160 /* printf ("INS: "); mono_print_ins (ins); */
14161 /* Create a store instruction */
14162 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14164 /* Insert it after the instruction */
14165 mono_bblock_insert_after_ins (bb, ins, store_ins);
14167 def_ins = store_ins;
14170 * We can't assign ins->dreg to var->dreg here, since the
14171 * sregs could use it. So set a flag, and do it after
14174 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14175 dest_has_lvreg = TRUE;
14180 if (def_ins && !live_range_start [dreg]) {
14181 live_range_start [dreg] = def_ins;
14182 live_range_start_bb [dreg] = bb;
14185 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14188 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14189 tmp->inst_c1 = dreg;
14190 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14197 num_sregs = mono_inst_get_src_registers (ins, sregs);
14198 for (srcindex = 0; srcindex < 3; ++srcindex) {
14199 regtype = spec [MONO_INST_SRC1 + srcindex];
14200 sreg = sregs [srcindex];
14202 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14203 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14204 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14205 MonoInst *use_ins = ins;
14206 MonoInst *load_ins;
14207 guint32 load_opcode;
14209 if (var->opcode == OP_REGVAR) {
14210 sregs [srcindex] = var->dreg;
14211 //mono_inst_set_src_registers (ins, sregs);
14212 live_range_end [sreg] = use_ins;
14213 live_range_end_bb [sreg] = bb;
14215 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14218 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14219 /* var->dreg is a hreg */
14220 tmp->inst_c1 = sreg;
14221 mono_bblock_insert_after_ins (bb, ins, tmp);
14227 g_assert (var->opcode == OP_REGOFFSET);
14229 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14231 g_assert (load_opcode != OP_LOADV_MEMBASE);
14233 if (vreg_to_lvreg [sreg]) {
14234 g_assert (vreg_to_lvreg [sreg] != -1);
14236 /* The variable is already loaded to an lvreg */
14237 if (G_UNLIKELY (cfg->verbose_level > 2))
14238 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14239 sregs [srcindex] = vreg_to_lvreg [sreg];
14240 //mono_inst_set_src_registers (ins, sregs);
14244 /* Try to fuse the load into the instruction */
14245 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14246 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14247 sregs [0] = var->inst_basereg;
14248 //mono_inst_set_src_registers (ins, sregs);
14249 ins->inst_offset = var->inst_offset;
14250 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14251 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14252 sregs [1] = var->inst_basereg;
14253 //mono_inst_set_src_registers (ins, sregs);
14254 ins->inst_offset = var->inst_offset;
14256 if (MONO_IS_REAL_MOVE (ins)) {
14257 ins->opcode = OP_NOP;
14260 //printf ("%d ", srcindex); mono_print_ins (ins);
14262 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14264 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14265 if (var->dreg == prev_dreg) {
14267 * sreg refers to the value loaded by the load
14268 * emitted below, but we need to use ins->dreg
14269 * since it refers to the store emitted earlier.
14273 g_assert (sreg != -1);
14274 vreg_to_lvreg [var->dreg] = sreg;
14275 g_assert (lvregs_len < 1024);
14276 lvregs [lvregs_len ++] = var->dreg;
14280 sregs [srcindex] = sreg;
14281 //mono_inst_set_src_registers (ins, sregs);
14283 #if SIZEOF_REGISTER != 8
14284 if (regtype == 'l') {
14285 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14286 mono_bblock_insert_before_ins (bb, ins, load_ins);
14287 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14288 mono_bblock_insert_before_ins (bb, ins, load_ins);
14289 use_ins = load_ins;
14294 #if SIZEOF_REGISTER == 4
14295 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14297 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14298 mono_bblock_insert_before_ins (bb, ins, load_ins);
14299 use_ins = load_ins;
14303 if (var->dreg < orig_next_vreg) {
14304 live_range_end [var->dreg] = use_ins;
14305 live_range_end_bb [var->dreg] = bb;
14308 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14311 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14312 tmp->inst_c1 = var->dreg;
14313 mono_bblock_insert_after_ins (bb, ins, tmp);
14317 mono_inst_set_src_registers (ins, sregs);
14319 if (dest_has_lvreg) {
14320 g_assert (ins->dreg != -1);
14321 vreg_to_lvreg [prev_dreg] = ins->dreg;
14322 g_assert (lvregs_len < 1024);
14323 lvregs [lvregs_len ++] = prev_dreg;
14324 dest_has_lvreg = FALSE;
14328 tmp_reg = ins->dreg;
14329 ins->dreg = ins->sreg2;
14330 ins->sreg2 = tmp_reg;
14333 if (MONO_IS_CALL (ins)) {
14334 /* Clear vreg_to_lvreg array */
14335 for (i = 0; i < lvregs_len; i++)
14336 vreg_to_lvreg [lvregs [i]] = 0;
14338 } else if (ins->opcode == OP_NOP) {
14340 MONO_INST_NULLIFY_SREGS (ins);
14343 if (cfg->verbose_level > 2)
14344 mono_print_ins_index (1, ins);
14347 /* Extend the live range based on the liveness info */
14348 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14349 for (i = 0; i < cfg->num_varinfo; i ++) {
14350 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14352 if (vreg_is_volatile (cfg, vi->vreg))
14353 /* The liveness info is incomplete */
14356 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14357 /* Live from at least the first ins of this bb */
14358 live_range_start [vi->vreg] = bb->code;
14359 live_range_start_bb [vi->vreg] = bb;
14362 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14363 /* Live at least until the last ins of this bb */
14364 live_range_end [vi->vreg] = bb->last_ins;
14365 live_range_end_bb [vi->vreg] = bb;
14371 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14373 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14374 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14376 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14377 for (i = 0; i < cfg->num_varinfo; ++i) {
14378 int vreg = MONO_VARINFO (cfg, i)->vreg;
14381 if (live_range_start [vreg]) {
14382 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14384 ins->inst_c1 = vreg;
14385 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14387 if (live_range_end [vreg]) {
14388 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14390 ins->inst_c1 = vreg;
14391 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14392 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14394 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14400 if (cfg->gsharedvt_locals_var_ins) {
14401 /* Nullify if unused */
14402 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14403 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14406 g_free (live_range_start);
14407 g_free (live_range_end);
14408 g_free (live_range_start_bb);
14409 g_free (live_range_end_bb);
14414 * - use 'iadd' instead of 'int_add'
14415 * - handling ovf opcodes: decompose in method_to_ir.
14416 * - unify iregs/fregs
14417 * -> partly done, the missing parts are:
14418 * - a more complete unification would involve unifying the hregs as well, so
14419 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14420 * would no longer map to the machine hregs, so the code generators would need to
14421 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14422 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14423 * fp/non-fp branches speeds it up by about 15%.
14424 * - use sext/zext opcodes instead of shifts
14426 * - get rid of TEMPLOADs if possible and use vregs instead
14427 * - clean up usage of OP_P/OP_ opcodes
14428 * - cleanup usage of DUMMY_USE
14429 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14431 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14432 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14433 * - make sure handle_stack_args () is called before the branch is emitted
14434 * - when the new IR is done, get rid of all unused stuff
14435 * - COMPARE/BEQ as separate instructions or unify them ?
14436 * - keeping them separate allows specialized compare instructions like
14437 * compare_imm, compare_membase
14438 * - most back ends unify fp compare+branch, fp compare+ceq
14439 * - integrate mono_save_args into inline_method
14440 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14441 * - handle long shift opts on 32 bit platforms somehow: they require
14442 * 3 sregs (2 for arg1 and 1 for arg2)
14443 * - make byref a 'normal' type.
14444 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14445 * variable if needed.
14446 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14447 * like inline_method.
14448 * - remove inlining restrictions
14449 * - fix LNEG and enable cfold of INEG
14450 * - generalize x86 optimizations like ldelema as a peephole optimization
14451 * - add store_mem_imm for amd64
14452 * - optimize the loading of the interruption flag in the managed->native wrappers
14453 * - avoid special handling of OP_NOP in passes
14454 * - move code inserting instructions into one function/macro.
14455 * - try a coalescing phase after liveness analysis
14456 * - add float -> vreg conversion + local optimizations on !x86
14457 * - figure out how to handle decomposed branches during optimizations, ie.
14458 * compare+branch, op_jump_table+op_br etc.
14459 * - promote RuntimeXHandles to vregs
14460 * - vtype cleanups:
14461 * - add a NEW_VARLOADA_VREG macro
14462 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14463 * accessing vtype fields.
14464 * - get rid of I8CONST on 64 bit platforms
14465 * - dealing with the increase in code size due to branches created during opcode
14467 * - use extended basic blocks
14468 * - all parts of the JIT
14469 * - handle_global_vregs () && local regalloc
14470 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14471 * - sources of increase in code size:
14474 * - isinst and castclass
14475 * - lvregs not allocated to global registers even if used multiple times
14476 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14478 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14479 * - add all micro optimizations from the old JIT
14480 * - put tree optimizations into the deadce pass
14481 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14482 * specific function.
14483 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14484 * fcompare + branchCC.
14485 * - create a helper function for allocating a stack slot, taking into account
14486 * MONO_CFG_HAS_SPILLUP.
14488 * - merge the ia64 switch changes.
14489 * - optimize mono_regstate2_alloc_int/float.
14490 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14491 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14492 * parts of the tree could be separated by other instructions, killing the tree
14493 * arguments, or stores killing loads etc. Also, should we fold loads into other
14494 * instructions if the result of the load is used multiple times ?
14495 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14496 * - LAST MERGE: 108395.
14497 * - when returning vtypes in registers, generate IR and append it to the end of the
14498 * last bb instead of doing it in the epilog.
14499 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14507 - When to decompose opcodes:
14508 - earlier: this makes some optimizations hard to implement, since the low level IR
14509 no longer contains the neccessary information. But it is easier to do.
14510 - later: harder to implement, enables more optimizations.
14511 - Branches inside bblocks:
14512 - created when decomposing complex opcodes.
14513 - branches to another bblock: harmless, but not tracked by the branch
14514 optimizations, so need to branch to a label at the start of the bblock.
14515 - branches to inside the same bblock: very problematic, trips up the local
14516 reg allocator. Can be fixed by spitting the current bblock, but that is a
14517 complex operation, since some local vregs can become global vregs etc.
14518 - Local/global vregs:
14519 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14520 local register allocator.
14521 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14522 structure, created by mono_create_var (). Assigned to hregs or the stack by
14523 the global register allocator.
14524 - When to do optimizations like alu->alu_imm:
14525 - earlier -> saves work later on since the IR will be smaller/simpler
14526 - later -> can work on more instructions
14527 - Handling of valuetypes:
14528 - When a vtype is pushed on the stack, a new temporary is created, an
14529 instruction computing its address (LDADDR) is emitted and pushed on
14530 the stack. Need to optimize cases when the vtype is used immediately as in
14531 argument passing, stloc etc.
14532 - Instead of the to_end stuff in the old JIT, simply call the function handling
14533 the values on the stack before emitting the last instruction of the bb.
14536 #endif /* DISABLE_JIT */