2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
155 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
158 * Instruction metadata
166 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
167 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
173 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
178 /* keep in sync with the enum in mini.h */
181 #include "mini-ops.h"
186 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
187 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
189 * This should contain the index of the last sreg + 1. This is not the same
190 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
192 const gint8 ins_sreg_counts[] = {
193 #include "mini-ops.h"
198 #define MONO_INIT_VARINFO(vi,id) do { \
199 (vi)->range.first_use.pos.bid = 0xffff; \
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (cfg, type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 type = &type->data.generic_class->container_class->byval_arg;
327 g_assert (cfg->generic_sharing_context);
328 if (mini_type_var_is_vt (cfg, type))
331 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
333 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
339 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 printf ("\n%s %d: [IN: ", msg, bb->block_num);
345 for (i = 0; i < bb->in_count; ++i)
346 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
348 for (i = 0; i < bb->out_count; ++i)
349 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
351 for (tree = bb->code; tree; tree = tree->next)
352 mono_print_ins_index (-1, tree);
356 mono_create_helper_signatures (void)
358 helper_sig_domain_get = mono_create_icall_signature ("ptr");
359 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
360 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
361 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
362 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
363 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
366 static MONO_NEVER_INLINE void
367 break_on_unverified (void)
369 if (mini_get_debug_options ()->break_on_unverified)
373 static MONO_NEVER_INLINE void
374 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
376 char *method_fname = mono_method_full_name (method, TRUE);
377 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
379 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
380 g_free (method_fname);
381 g_free (cil_method_fname);
384 static MONO_NEVER_INLINE void
385 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
387 char *method_fname = mono_method_full_name (method, TRUE);
388 char *field_fname = mono_field_full_name (field);
389 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
390 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
391 g_free (method_fname);
392 g_free (field_fname);
395 static MONO_NEVER_INLINE void
396 inline_failure (MonoCompile *cfg, const char *msg)
398 if (cfg->verbose_level >= 2)
399 printf ("inline failed: %s\n", msg);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
403 static MONO_NEVER_INLINE void
404 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 if (cfg->verbose_level > 2) \
407 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
411 static MONO_NEVER_INLINE void
412 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
414 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
415 if (cfg->verbose_level >= 2)
416 printf ("%s\n", cfg->exception_message);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
421 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
422 * foo<T> (int i) { ldarg.0; box T; }
424 #define UNVERIFIED do { \
425 if (cfg->gsharedvt) { \
426 if (cfg->verbose_level > 2) \
427 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
429 goto exception_exit; \
431 break_on_unverified (); \
435 #define GET_BBLOCK(cfg,tblock,ip) do { \
436 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
438 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
439 NEW_BBLOCK (cfg, (tblock)); \
440 (tblock)->cil_code = (ip); \
441 ADD_BBLOCK (cfg, (tblock)); \
445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
446 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
447 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
448 (dest)->dreg = alloc_ireg_mp ((cfg)); \
449 (dest)->sreg1 = (sr1); \
450 (dest)->sreg2 = (sr2); \
451 (dest)->inst_imm = (imm); \
452 (dest)->backend.shift_amount = (shift); \
453 MONO_ADD_INS ((cfg)->cbb, (dest)); \
457 /* Emit conversions so both operands of a binary opcode are of the same type */
459 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
461 MonoInst *arg1 = *arg1_ref;
462 MonoInst *arg2 = *arg2_ref;
465 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
466 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
469 /* Mixing r4/r8 is allowed by the spec */
470 if (arg1->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
474 conv->type = STACK_R8;
478 if (arg2->type == STACK_R4) {
479 int dreg = alloc_freg (cfg);
481 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
482 conv->type = STACK_R8;
488 #if SIZEOF_REGISTER == 8
489 /* FIXME: Need to add many more cases */
490 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
493 int dr = alloc_preg (cfg);
494 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
495 (ins)->sreg2 = widen->dreg;
500 #define ADD_BINOP(op) do { \
501 MONO_INST_NEW (cfg, ins, (op)); \
503 ins->sreg1 = sp [0]->dreg; \
504 ins->sreg2 = sp [1]->dreg; \
505 type_from_op (cfg, ins, sp [0], sp [1]); \
507 /* Have to insert a widening op */ \
508 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
509 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
510 MONO_ADD_INS ((cfg)->cbb, (ins)); \
511 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
514 #define ADD_UNOP(op) do { \
515 MONO_INST_NEW (cfg, ins, (op)); \
517 ins->sreg1 = sp [0]->dreg; \
518 type_from_op (cfg, ins, sp [0], NULL); \
520 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
521 MONO_ADD_INS ((cfg)->cbb, (ins)); \
522 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
525 #define ADD_BINCOND(next_block) do { \
528 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
529 cmp->sreg1 = sp [0]->dreg; \
530 cmp->sreg2 = sp [1]->dreg; \
531 type_from_op (cfg, cmp, sp [0], sp [1]); \
533 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
536 GET_BBLOCK (cfg, tblock, target); \
537 link_bblock (cfg, bblock, tblock); \
538 ins->inst_true_bb = tblock; \
539 if ((next_block)) { \
540 link_bblock (cfg, bblock, (next_block)); \
541 ins->inst_false_bb = (next_block); \
542 start_new_bblock = 1; \
544 GET_BBLOCK (cfg, tblock, ip); \
545 link_bblock (cfg, bblock, tblock); \
546 ins->inst_false_bb = tblock; \
547 start_new_bblock = 2; \
549 if (sp != stack_start) { \
550 handle_stack_args (cfg, stack_start, sp - stack_start); \
551 CHECK_UNVERIFIABLE (cfg); \
553 MONO_ADD_INS (bblock, cmp); \
554 MONO_ADD_INS (bblock, ins); \
558 * link_bblock: Links two basic blocks
560 * links two basic blocks in the control flow graph, the 'from'
561 * argument is the starting block and the 'to' argument is the block
562 * the control flow ends to after 'from'.
565 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
567 MonoBasicBlock **newa;
571 if (from->cil_code) {
573 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
575 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
578 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
580 printf ("edge from entry to exit\n");
585 for (i = 0; i < from->out_count; ++i) {
586 if (to == from->out_bb [i]) {
592 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
593 for (i = 0; i < from->out_count; ++i) {
594 newa [i] = from->out_bb [i];
602 for (i = 0; i < to->in_count; ++i) {
603 if (from == to->in_bb [i]) {
609 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
610 for (i = 0; i < to->in_count; ++i) {
611 newa [i] = to->in_bb [i];
620 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
622 link_bblock (cfg, from, to);
626 * mono_find_block_region:
628 * We mark each basic block with a region ID. We use that to avoid BB
629 * optimizations when blocks are in different regions.
632 * A region token that encodes where this region is, and information
633 * about the clause owner for this block.
635 * The region encodes the try/catch/filter clause that owns this block
636 * as well as the type. -1 is a special value that represents a block
637 * that is in none of try/catch/filter.
640 mono_find_block_region (MonoCompile *cfg, int offset)
642 MonoMethodHeader *header = cfg->header;
643 MonoExceptionClause *clause;
646 for (i = 0; i < header->num_clauses; ++i) {
647 clause = &header->clauses [i];
648 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
649 (offset < (clause->handler_offset)))
650 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
652 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
653 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
654 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
655 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
656 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
658 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
661 for (i = 0; i < header->num_clauses; ++i) {
662 clause = &header->clauses [i];
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (cfg, type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->generic_sharing_context);
801 if (mini_is_gsharedvt_type (cfg, type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1167 switch (args->type) {
1177 for (i = 0; i < sig->param_count; ++i) {
1178 switch (args [i].type) {
1182 if (!sig->params [i]->byref)
1186 if (sig->params [i]->byref)
1188 switch (sig->params [i]->type) {
1189 case MONO_TYPE_CLASS:
1190 case MONO_TYPE_STRING:
1191 case MONO_TYPE_OBJECT:
1192 case MONO_TYPE_SZARRAY:
1193 case MONO_TYPE_ARRAY:
1200 if (sig->params [i]->byref)
1202 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1211 /*if (!param_table [args [i].type] [sig->params [i]->type])
1219 * When we need a pointer to the current domain many times in a method, we
1220 * call mono_domain_get() once and we store the result in a local variable.
1221 * This function returns the variable that represents the MonoDomain*.
1223 inline static MonoInst *
1224 mono_get_domainvar (MonoCompile *cfg)
1226 if (!cfg->domainvar)
1227 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1228 return cfg->domainvar;
1232 * The got_var contains the address of the Global Offset Table when AOT
1236 mono_get_got_var (MonoCompile *cfg)
1238 #ifdef MONO_ARCH_NEED_GOT_VAR
1239 if (!cfg->compile_aot)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1251 mono_get_vtable_var (MonoCompile *cfg)
1253 g_assert (cfg->generic_sharing_context);
1255 if (!cfg->rgctx_var) {
1256 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 /* force the var to be stack allocated */
1258 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1261 return cfg->rgctx_var;
1265 type_from_stack_type (MonoInst *ins) {
1266 switch (ins->type) {
1267 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1268 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1269 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1270 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1271 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1273 return &ins->klass->this_arg;
1274 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1275 case STACK_VTYPE: return &ins->klass->byval_arg;
1277 g_error ("stack type %d to monotype not handled\n", ins->type);
1282 static G_GNUC_UNUSED int
1283 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1285 t = mono_type_get_underlying_type (t);
1297 case MONO_TYPE_FNPTR:
1299 case MONO_TYPE_CLASS:
1300 case MONO_TYPE_STRING:
1301 case MONO_TYPE_OBJECT:
1302 case MONO_TYPE_SZARRAY:
1303 case MONO_TYPE_ARRAY:
1309 return cfg->r4_stack_type;
1312 case MONO_TYPE_VALUETYPE:
1313 case MONO_TYPE_TYPEDBYREF:
1315 case MONO_TYPE_GENERICINST:
1316 if (mono_type_generic_inst_is_valuetype (t))
1322 g_assert_not_reached ();
1329 array_access_to_klass (int opcode)
1333 return mono_defaults.byte_class;
1335 return mono_defaults.uint16_class;
1338 return mono_defaults.int_class;
1341 return mono_defaults.sbyte_class;
1344 return mono_defaults.int16_class;
1347 return mono_defaults.int32_class;
1349 return mono_defaults.uint32_class;
1352 return mono_defaults.int64_class;
1355 return mono_defaults.single_class;
1358 return mono_defaults.double_class;
1359 case CEE_LDELEM_REF:
1360 case CEE_STELEM_REF:
1361 return mono_defaults.object_class;
1363 g_assert_not_reached ();
1369 * We try to share variables when possible
1372 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1377 /* inlining can result in deeper stacks */
1378 if (slot >= cfg->header->max_stack)
1379 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 pos = ins->type - 1 + slot * STACK_MAX;
1383 switch (ins->type) {
1390 if ((vnum = cfg->intvars [pos]))
1391 return cfg->varinfo [vnum];
1392 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 cfg->intvars [pos] = res->inst_c0;
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1402 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1405 * Don't use this if a generic_context is set, since that means AOT can't
1406 * look up the method using just the image+token.
1407 * table == 0 means this is a reference made from a wrapper.
1409 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1410 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1411 jump_info_token->image = image;
1412 jump_info_token->token = token;
1413 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1418 * This function is called to handle items that are left on the evaluation stack
1419 * at basic block boundaries. What happens is that we save the values to local variables
1420 * and we reload them later when first entering the target basic block (with the
1421 * handle_loaded_temps () function).
1422 * A single joint point will use the same variables (stored in the array bb->out_stack or
1423 * bb->in_stack, if the basic block is before or after the joint point).
1425 * This function needs to be called _before_ emitting the last instruction of
1426 * the bb (i.e. before emitting a branch).
1427 * If the stack merge fails at a join point, cfg->unverifiable is set.
1430 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1433 MonoBasicBlock *bb = cfg->cbb;
1434 MonoBasicBlock *outb;
1435 MonoInst *inst, **locals;
1440 if (cfg->verbose_level > 3)
1441 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1442 if (!bb->out_scount) {
1443 bb->out_scount = count;
1444 //printf ("bblock %d has out:", bb->block_num);
1446 for (i = 0; i < bb->out_count; ++i) {
1447 outb = bb->out_bb [i];
1448 /* exception handlers are linked, but they should not be considered for stack args */
1449 if (outb->flags & BB_EXCEPTION_HANDLER)
1451 //printf (" %d", outb->block_num);
1452 if (outb->in_stack) {
1454 bb->out_stack = outb->in_stack;
1460 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1461 for (i = 0; i < count; ++i) {
1463 * try to reuse temps already allocated for this purpouse, if they occupy the same
1464 * stack slot and if they are of the same type.
1465 * This won't cause conflicts since if 'local' is used to
1466 * store one of the values in the in_stack of a bblock, then
1467 * the same variable will be used for the same outgoing stack
1469 * This doesn't work when inlining methods, since the bblocks
1470 * in the inlined methods do not inherit their in_stack from
1471 * the bblock they are inlined to. See bug #58863 for an
1474 if (cfg->inlined_method)
1475 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1477 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1482 for (i = 0; i < bb->out_count; ++i) {
1483 outb = bb->out_bb [i];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER)
1487 if (outb->in_scount) {
1488 if (outb->in_scount != bb->out_scount) {
1489 cfg->unverifiable = TRUE;
1492 continue; /* check they are the same locals */
1494 outb->in_scount = count;
1495 outb->in_stack = bb->out_stack;
1498 locals = bb->out_stack;
1500 for (i = 0; i < count; ++i) {
1501 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1502 inst->cil_code = sp [i]->cil_code;
1503 sp [i] = locals [i];
1504 if (cfg->verbose_level > 3)
1505 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1509 * It is possible that the out bblocks already have in_stack assigned, and
1510 * the in_stacks differ. In this case, we will store to all the different
1517 /* Find a bblock which has a different in_stack */
1519 while (bindex < bb->out_count) {
1520 outb = bb->out_bb [bindex];
1521 /* exception handlers are linked, but they should not be considered for stack args */
1522 if (outb->flags & BB_EXCEPTION_HANDLER) {
1526 if (outb->in_stack != locals) {
1527 for (i = 0; i < count; ++i) {
1528 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1529 inst->cil_code = sp [i]->cil_code;
1530 sp [i] = locals [i];
1531 if (cfg->verbose_level > 3)
1532 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1534 locals = outb->in_stack;
1544 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1546 int ibitmap_reg = alloc_preg (cfg);
1547 #ifdef COMPRESSED_INTERFACE_BITMAP
1549 MonoInst *res, *ins;
1550 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1551 MONO_ADD_INS (cfg->cbb, ins);
1553 if (cfg->compile_aot)
1554 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1556 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1557 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1558 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1560 int ibitmap_byte_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1564 if (cfg->compile_aot) {
1565 int iid_reg = alloc_preg (cfg);
1566 int shifted_iid_reg = alloc_preg (cfg);
1567 int ibitmap_byte_address_reg = alloc_preg (cfg);
1568 int masked_iid_reg = alloc_preg (cfg);
1569 int iid_one_bit_reg = alloc_preg (cfg);
1570 int iid_bit_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1573 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1576 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1587 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1588 * stored in "klass_reg" implements the interface "klass".
1591 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1593 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1597 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1598 * stored in "vtable_reg" implements the interface "klass".
1601 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1603 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1607 * Emit code which checks whenever the interface id of @klass is smaller than
1608 * than the value given by max_iid_reg.
1611 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 if (cfg->compile_aot) {
1615 int iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1617 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 /* Same as above, but obtains max_iid from a vtable */
1629 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1630 MonoBasicBlock *false_target)
1632 int max_iid_reg = alloc_preg (cfg);
1634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1635 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 /* Same as above, but obtains max_iid from a klass */
1640 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1641 MonoBasicBlock *false_target)
1643 int max_iid_reg = alloc_preg (cfg);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1646 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1650 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1652 int idepth_reg = alloc_preg (cfg);
1653 int stypes_reg = alloc_preg (cfg);
1654 int stype = alloc_preg (cfg);
1656 mono_class_setup_supertypes (klass);
1658 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1666 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1667 } else if (cfg->compile_aot) {
1668 int const_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1678 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1680 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1684 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1686 int intf_reg = alloc_preg (cfg);
1688 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1689 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1694 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1698 * Variant of the above that takes a register to the class, not the vtable.
1701 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1703 int intf_bit_reg = alloc_preg (cfg);
1705 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1706 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1715 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1719 } else if (cfg->compile_aot) {
1720 int const_reg = alloc_preg (cfg);
1721 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1730 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1732 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1736 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1738 if (cfg->compile_aot) {
1739 int const_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1749 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1752 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1755 int rank_reg = alloc_preg (cfg);
1756 int eclass_reg = alloc_preg (cfg);
1758 g_assert (!klass_inst);
1759 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1761 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1762 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1764 if (klass->cast_class == mono_defaults.object_class) {
1765 int parent_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1767 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1768 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1769 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1770 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1771 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1772 } else if (klass->cast_class == mono_defaults.enum_class) {
1773 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1774 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1775 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1777 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1778 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1781 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1782 /* Check that the object is a vector too */
1783 int bounds_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1789 int idepth_reg = alloc_preg (cfg);
1790 int stypes_reg = alloc_preg (cfg);
1791 int stype = alloc_preg (cfg);
1793 mono_class_setup_supertypes (klass);
1795 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1796 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1798 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1802 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1807 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1809 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1813 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1817 g_assert (val == 0);
1822 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1825 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1828 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1833 #if SIZEOF_REGISTER == 8
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1841 val_reg = alloc_preg (cfg);
1843 if (SIZEOF_REGISTER == 8)
1844 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1846 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1849 /* This could be optimized further if neccesary */
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1858 #if !NO_UNALIGNED_ACCESS
1859 if (SIZEOF_REGISTER == 8) {
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1874 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1891 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1898 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1899 g_assert (size < 10000);
1902 /* This could be optimized further if neccesary */
1904 cur_reg = alloc_preg (cfg);
1905 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1913 #if !NO_UNALIGNED_ACCESS
1914 if (SIZEOF_REGISTER == 8) {
1916 cur_reg = alloc_preg (cfg);
1917 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1943 cur_reg = alloc_preg (cfg);
1944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1953 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1957 if (cfg->compile_aot) {
1958 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1959 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1961 ins->sreg2 = c->dreg;
1962 MONO_ADD_INS (cfg->cbb, ins);
1964 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1966 ins->inst_offset = mini_get_tls_offset (tls_key);
1967 MONO_ADD_INS (cfg->cbb, ins);
1974 * Emit IR to push the current LMF onto the LMF stack.
1977 emit_push_lmf (MonoCompile *cfg)
1980 * Emit IR to push the LMF:
1981 * lmf_addr = <lmf_addr from tls>
1982 * lmf->lmf_addr = lmf_addr
1983 * lmf->prev_lmf = *lmf_addr
1986 int lmf_reg, prev_lmf_reg;
1987 MonoInst *ins, *lmf_ins;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load current lmf */
1994 lmf_ins = mono_get_lmf_intrinsic (cfg);
1996 MONO_ADD_INS (cfg->cbb, lmf_ins);
1997 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1998 lmf_reg = ins->dreg;
1999 /* Save previous_lmf */
2000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2002 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2005 * Store lmf_addr in a variable, so it can be allocated to a global register.
2007 if (!cfg->lmf_addr_var)
2008 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2011 ins = mono_get_jit_tls_intrinsic (cfg);
2013 int jit_tls_dreg = ins->dreg;
2015 MONO_ADD_INS (cfg->cbb, ins);
2016 lmf_reg = alloc_preg (cfg);
2017 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2022 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2024 MONO_ADD_INS (cfg->cbb, lmf_ins);
2027 MonoInst *args [16], *jit_tls_ins, *ins;
2029 /* Inline mono_get_lmf_addr () */
2030 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2032 /* Load mono_jit_tls_id */
2033 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2034 /* call pthread_getspecific () */
2035 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2036 /* lmf_addr = &jit_tls->lmf */
2037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2040 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2046 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2047 lmf_reg = ins->dreg;
2049 prev_lmf_reg = alloc_preg (cfg);
2050 /* Save previous_lmf */
2051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2052 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2054 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2061 * Emit IR to pop the current LMF from the LMF stack.
2064 emit_pop_lmf (MonoCompile *cfg)
2066 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2072 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2073 lmf_reg = ins->dreg;
2075 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2076 /* Load previous_lmf */
2077 prev_lmf_reg = alloc_preg (cfg);
2078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2083 * Emit IR to pop the LMF:
2084 * *(lmf->lmf_addr) = lmf->prev_lmf
2086 /* This could be called before emit_push_lmf () */
2087 if (!cfg->lmf_addr_var)
2088 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2089 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2091 prev_lmf_reg = alloc_preg (cfg);
2092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2093 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2098 emit_instrumentation_call (MonoCompile *cfg, void *func)
2100 MonoInst *iargs [1];
2103 * Avoid instrumenting inlined methods since it can
2104 * distort profiling results.
2106 if (cfg->method != cfg->current_method)
2109 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2110 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2111 mono_emit_jit_icall (cfg, func, iargs);
2116 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2119 type = mini_get_underlying_type (cfg, type);
2120 switch (type->type) {
2121 case MONO_TYPE_VOID:
2122 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2129 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2133 case MONO_TYPE_FNPTR:
2134 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2135 case MONO_TYPE_CLASS:
2136 case MONO_TYPE_STRING:
2137 case MONO_TYPE_OBJECT:
2138 case MONO_TYPE_SZARRAY:
2139 case MONO_TYPE_ARRAY:
2140 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2143 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2146 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2148 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2150 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2151 case MONO_TYPE_VALUETYPE:
2152 if (type->data.klass->enumtype) {
2153 type = mono_class_enum_basetype (type->data.klass);
2156 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2157 case MONO_TYPE_TYPEDBYREF:
2158 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2159 case MONO_TYPE_GENERICINST:
2160 type = &type->data.generic_class->container_class->byval_arg;
2163 case MONO_TYPE_MVAR:
2165 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2167 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2173 * target_type_is_incompatible:
2174 * @cfg: MonoCompile context
2176 * Check that the item @arg on the evaluation stack can be stored
2177 * in the target type (can be a local, or field, etc).
2178 * The cfg arg can be used to check if we need verification or just
2181 * Returns: non-0 value if arg can't be stored on a target.
2184 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2186 MonoType *simple_type;
2189 if (target->byref) {
2190 /* FIXME: check that the pointed to types match */
2191 if (arg->type == STACK_MP)
2192 return arg->klass != mono_class_from_mono_type (target);
2193 if (arg->type == STACK_PTR)
2198 simple_type = mini_get_underlying_type (cfg, target);
2199 switch (simple_type->type) {
2200 case MONO_TYPE_VOID:
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2212 /* STACK_MP is needed when setting pinned locals */
2213 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2218 case MONO_TYPE_FNPTR:
2220 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2221 * in native int. (#688008).
2223 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2226 case MONO_TYPE_CLASS:
2227 case MONO_TYPE_STRING:
2228 case MONO_TYPE_OBJECT:
2229 case MONO_TYPE_SZARRAY:
2230 case MONO_TYPE_ARRAY:
2231 if (arg->type != STACK_OBJ)
2233 /* FIXME: check type compatibility */
2237 if (arg->type != STACK_I8)
2241 if (arg->type != cfg->r4_stack_type)
2245 if (arg->type != STACK_R8)
2248 case MONO_TYPE_VALUETYPE:
2249 if (arg->type != STACK_VTYPE)
2251 klass = mono_class_from_mono_type (simple_type);
2252 if (klass != arg->klass)
2255 case MONO_TYPE_TYPEDBYREF:
2256 if (arg->type != STACK_VTYPE)
2258 klass = mono_class_from_mono_type (simple_type);
2259 if (klass != arg->klass)
2262 case MONO_TYPE_GENERICINST:
2263 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2266 klass = mono_class_from_mono_type (simple_type);
2267 /* The second cases is needed when doing partial sharing */
2268 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2272 if (arg->type != STACK_OBJ)
2274 /* FIXME: check type compatibility */
2278 case MONO_TYPE_MVAR:
2279 g_assert (cfg->generic_sharing_context);
2280 if (mini_type_var_is_vt (cfg, simple_type)) {
2281 if (arg->type != STACK_VTYPE)
2284 if (arg->type != STACK_OBJ)
2289 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2295 * Prepare arguments for passing to a function call.
2296 * Return a non-zero value if the arguments can't be passed to the given
2298 * The type checks are not yet complete and some conversions may need
2299 * casts on 32 or 64 bit architectures.
2301 * FIXME: implement this using target_type_is_incompatible ()
2304 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2306 MonoType *simple_type;
2310 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2314 for (i = 0; i < sig->param_count; ++i) {
2315 if (sig->params [i]->byref) {
2316 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2320 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2322 switch (simple_type->type) {
2323 case MONO_TYPE_VOID:
2332 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2338 case MONO_TYPE_FNPTR:
2339 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2342 case MONO_TYPE_CLASS:
2343 case MONO_TYPE_STRING:
2344 case MONO_TYPE_OBJECT:
2345 case MONO_TYPE_SZARRAY:
2346 case MONO_TYPE_ARRAY:
2347 if (args [i]->type != STACK_OBJ)
2352 if (args [i]->type != STACK_I8)
2356 if (args [i]->type != cfg->r4_stack_type)
2360 if (args [i]->type != STACK_R8)
2363 case MONO_TYPE_VALUETYPE:
2364 if (simple_type->data.klass->enumtype) {
2365 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2368 if (args [i]->type != STACK_VTYPE)
2371 case MONO_TYPE_TYPEDBYREF:
2372 if (args [i]->type != STACK_VTYPE)
2375 case MONO_TYPE_GENERICINST:
2376 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2379 case MONO_TYPE_MVAR:
2381 if (args [i]->type != STACK_VTYPE)
2385 g_error ("unknown type 0x%02x in check_call_signature",
2393 callvirt_to_call (int opcode)
2396 case OP_CALL_MEMBASE:
2398 case OP_VOIDCALL_MEMBASE:
2400 case OP_FCALL_MEMBASE:
2402 case OP_RCALL_MEMBASE:
2404 case OP_VCALL_MEMBASE:
2406 case OP_LCALL_MEMBASE:
2409 g_assert_not_reached ();
2415 /* Either METHOD or IMT_ARG needs to be set */
2417 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2421 if (COMPILE_LLVM (cfg)) {
2422 method_reg = alloc_preg (cfg);
2425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2426 } else if (cfg->compile_aot) {
2427 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2430 MONO_INST_NEW (cfg, ins, OP_PCONST);
2431 ins->inst_p0 = method;
2432 ins->dreg = method_reg;
2433 MONO_ADD_INS (cfg->cbb, ins);
2437 call->imt_arg_reg = method_reg;
2439 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2457 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2460 static MonoJumpInfo *
2461 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2463 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2467 ji->data.target = target;
2473 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2475 if (cfg->generic_sharing_context)
2476 return mono_class_check_context_used (klass);
2482 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2484 if (cfg->generic_sharing_context)
2485 return mono_method_check_context_used (method);
2491 * check_method_sharing:
2493 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2496 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2498 gboolean pass_vtable = FALSE;
2499 gboolean pass_mrgctx = FALSE;
2501 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2502 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2503 gboolean sharable = FALSE;
2505 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2508 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2509 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2510 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2512 sharable = sharing_enabled && context_sharable;
2516 * Pass vtable iff target method might
2517 * be shared, which means that sharing
2518 * is enabled for its class and its
2519 * context is sharable (and it's not a
2522 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2526 if (mini_method_get_context (cmethod) &&
2527 mini_method_get_context (cmethod)->method_inst) {
2528 g_assert (!pass_vtable);
2530 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2533 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2534 MonoGenericContext *context = mini_method_get_context (cmethod);
2535 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2537 if (sharing_enabled && context_sharable)
2539 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2544 if (out_pass_vtable)
2545 *out_pass_vtable = pass_vtable;
2546 if (out_pass_mrgctx)
2547 *out_pass_mrgctx = pass_mrgctx;
2550 inline static MonoCallInst *
2551 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2552 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2556 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2561 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2565 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2568 call->signature = sig;
2569 call->rgctx_reg = rgctx;
2570 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2572 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2575 if (mini_type_is_vtype (cfg, sig_ret)) {
2576 call->vret_var = cfg->vret_addr;
2577 //g_assert_not_reached ();
2579 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2580 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2583 temp->backend.is_pinvoke = sig->pinvoke;
2586 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2587 * address of return value to increase optimization opportunities.
2588 * Before vtype decomposition, the dreg of the call ins itself represents the
2589 * fact the call modifies the return value. After decomposition, the call will
2590 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2591 * will be transformed into an LDADDR.
2593 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2594 loada->dreg = alloc_preg (cfg);
2595 loada->inst_p0 = temp;
2596 /* We reference the call too since call->dreg could change during optimization */
2597 loada->inst_p1 = call;
2598 MONO_ADD_INS (cfg->cbb, loada);
2600 call->inst.dreg = temp->dreg;
2602 call->vret_var = loada;
2603 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2604 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2606 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2607 if (COMPILE_SOFT_FLOAT (cfg)) {
2609 * If the call has a float argument, we would need to do an r8->r4 conversion using
2610 * an icall, but that cannot be done during the call sequence since it would clobber
2611 * the call registers + the stack. So we do it before emitting the call.
2613 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2615 MonoInst *in = call->args [i];
2617 if (i >= sig->hasthis)
2618 t = sig->params [i - sig->hasthis];
2620 t = &mono_defaults.int_class->byval_arg;
2621 t = mono_type_get_underlying_type (t);
2623 if (!t->byref && t->type == MONO_TYPE_R4) {
2624 MonoInst *iargs [1];
2628 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2630 /* The result will be in an int vreg */
2631 call->args [i] = conv;
2637 call->need_unbox_trampoline = unbox_trampoline;
2640 if (COMPILE_LLVM (cfg))
2641 mono_llvm_emit_call (cfg, call);
2643 mono_arch_emit_call (cfg, call);
2645 mono_arch_emit_call (cfg, call);
2648 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2649 cfg->flags |= MONO_CFG_HAS_CALLS;
2655 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2657 #ifdef MONO_ARCH_RGCTX_REG
2658 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2659 cfg->uses_rgctx_reg = TRUE;
2660 call->rgctx_reg = TRUE;
2662 call->rgctx_arg_reg = rgctx_reg;
2669 inline static MonoInst*
2670 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2675 gboolean check_sp = FALSE;
2677 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2678 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2680 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2685 rgctx_reg = mono_alloc_preg (cfg);
2686 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2690 if (!cfg->stack_inbalance_var)
2691 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2693 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2694 ins->dreg = cfg->stack_inbalance_var->dreg;
2695 MONO_ADD_INS (cfg->cbb, ins);
2698 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2700 call->inst.sreg1 = addr->dreg;
2703 emit_imt_argument (cfg, call, NULL, imt_arg);
2705 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2710 sp_reg = mono_alloc_preg (cfg);
2712 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 MONO_ADD_INS (cfg->cbb, ins);
2716 /* Restore the stack so we don't crash when throwing the exception */
2717 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2718 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2719 MONO_ADD_INS (cfg->cbb, ins);
2721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2722 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2726 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2728 return (MonoInst*)call;
2732 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2735 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2737 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2740 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2741 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2743 #ifndef DISABLE_REMOTING
2744 gboolean might_be_remote = FALSE;
2746 gboolean virtual = this != NULL;
2747 gboolean enable_for_aot = TRUE;
2751 gboolean need_unbox_trampoline;
2754 sig = mono_method_signature (method);
2757 rgctx_reg = mono_alloc_preg (cfg);
2758 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2761 if (method->string_ctor) {
2762 /* Create the real signature */
2763 /* FIXME: Cache these */
2764 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2765 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2770 context_used = mini_method_check_context_used (cfg, method);
2772 #ifndef DISABLE_REMOTING
2773 might_be_remote = this && sig->hasthis &&
2774 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2775 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2777 if (might_be_remote && context_used) {
2780 g_assert (cfg->generic_sharing_context);
2782 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2784 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2788 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2790 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2792 #ifndef DISABLE_REMOTING
2793 if (might_be_remote)
2794 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2797 call->method = method;
2798 call->inst.flags |= MONO_INST_HAS_METHOD;
2799 call->inst.inst_left = this;
2800 call->tail_call = tail;
2803 int vtable_reg, slot_reg, this_reg;
2806 this_reg = this->dreg;
2808 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2809 MonoInst *dummy_use;
2811 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2813 /* Make a call to delegate->invoke_impl */
2814 call->inst.inst_basereg = this_reg;
2815 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2816 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2818 /* We must emit a dummy use here because the delegate trampoline will
2819 replace the 'this' argument with the delegate target making this activation
2820 no longer a root for the delegate.
2821 This is an issue for delegates that target collectible code such as dynamic
2822 methods of GC'able assemblies.
2824 For a test case look into #667921.
2826 FIXME: a dummy use is not the best way to do it as the local register allocator
2827 will put it on a caller save register and spil it around the call.
2828 Ideally, we would either put it on a callee save register or only do the store part.
2830 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2832 return (MonoInst*)call;
2835 if ((!cfg->compile_aot || enable_for_aot) &&
2836 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2837 (MONO_METHOD_IS_FINAL (method) &&
2838 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2839 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2841 * the method is not virtual, we just need to ensure this is not null
2842 * and then we can call the method directly.
2844 #ifndef DISABLE_REMOTING
2845 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2847 * The check above ensures method is not gshared, this is needed since
2848 * gshared methods can't have wrappers.
2850 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2854 if (!method->string_ctor)
2855 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2857 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2858 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2860 * the method is virtual, but we can statically dispatch since either
2861 * it's class or the method itself are sealed.
2862 * But first we need to ensure it's not a null reference.
2864 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2866 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2868 vtable_reg = alloc_preg (cfg);
2869 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2870 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2871 guint32 imt_slot = mono_method_get_imt_slot (method);
2872 emit_imt_argument (cfg, call, call->method, imt_arg);
2873 slot_reg = vtable_reg;
2874 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2876 slot_reg = vtable_reg;
2877 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2878 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2880 g_assert (mono_method_signature (method)->generic_param_count);
2881 emit_imt_argument (cfg, call, call->method, imt_arg);
2885 call->inst.sreg1 = slot_reg;
2886 call->inst.inst_offset = offset;
2887 call->virtual = TRUE;
2891 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2894 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2896 return (MonoInst*)call;
2900 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2902 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2906 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2913 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2916 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2918 return (MonoInst*)call;
2922 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2924 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2928 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2932 * mono_emit_abs_call:
2934 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2936 inline static MonoInst*
2937 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2938 MonoMethodSignature *sig, MonoInst **args)
2940 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2944 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2947 if (cfg->abs_patches == NULL)
2948 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2949 g_hash_table_insert (cfg->abs_patches, ji, ji);
2950 ins = mono_emit_native_call (cfg, ji, sig, args);
2951 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2956 direct_icalls_enabled (MonoCompile *cfg)
2958 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2960 if (cfg->compile_llvm)
2963 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2969 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
2972 * Call the jit icall without a wrapper if possible.
2973 * The wrapper is needed for the following reasons:
2974 * - to handle exceptions thrown using mono_raise_exceptions () from the
2975 * icall function. The EH code needs the lmf frame pushed by the
2976 * wrapper to be able to unwind back to managed code.
2977 * - to be able to do stack walks for asynchronously suspended
2978 * threads when debugging.
2980 if (info->no_raise && direct_icalls_enabled (cfg)) {
2984 if (!info->wrapper_method) {
2985 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2986 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2988 mono_memory_barrier ();
2992 * Inline the wrapper method, which is basically a call to the C icall, and
2993 * an exception check.
2995 costs = inline_method (cfg, info->wrapper_method, NULL,
2996 args, NULL, cfg->real_offset, TRUE, out_cbb);
2997 g_assert (costs > 0);
2998 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3002 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3007 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3009 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3010 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3014 * Native code might return non register sized integers
3015 * without initializing the upper bits.
3017 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3018 case OP_LOADI1_MEMBASE:
3019 widen_op = OP_ICONV_TO_I1;
3021 case OP_LOADU1_MEMBASE:
3022 widen_op = OP_ICONV_TO_U1;
3024 case OP_LOADI2_MEMBASE:
3025 widen_op = OP_ICONV_TO_I2;
3027 case OP_LOADU2_MEMBASE:
3028 widen_op = OP_ICONV_TO_U2;
3034 if (widen_op != -1) {
3035 int dreg = alloc_preg (cfg);
3038 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3039 widen->type = ins->type;
3049 get_memcpy_method (void)
3051 static MonoMethod *memcpy_method = NULL;
3052 if (!memcpy_method) {
3053 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3055 g_error ("Old corlib found. Install a new one");
3057 return memcpy_method;
3061 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3063 MonoClassField *field;
3064 gpointer iter = NULL;
3066 while ((field = mono_class_get_fields (klass, &iter))) {
3069 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3071 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3072 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3073 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3074 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3076 MonoClass *field_class = mono_class_from_mono_type (field->type);
3077 if (field_class->has_references)
3078 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3084 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3086 int card_table_shift_bits;
3087 gpointer card_table_mask;
3089 MonoInst *dummy_use;
3090 int nursery_shift_bits;
3091 size_t nursery_size;
3092 gboolean has_card_table_wb = FALSE;
3094 if (!cfg->gen_write_barriers)
3097 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3099 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3101 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3102 has_card_table_wb = TRUE;
3105 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3108 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3109 wbarrier->sreg1 = ptr->dreg;
3110 wbarrier->sreg2 = value->dreg;
3111 MONO_ADD_INS (cfg->cbb, wbarrier);
3112 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3113 int offset_reg = alloc_preg (cfg);
3114 int card_reg = alloc_preg (cfg);
3117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3118 if (card_table_mask)
3119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3121 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3122 * IMM's larger than 32bits.
3124 if (cfg->compile_aot) {
3125 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3127 MONO_INST_NEW (cfg, ins, OP_PCONST);
3128 ins->inst_p0 = card_table;
3129 ins->dreg = card_reg;
3130 MONO_ADD_INS (cfg->cbb, ins);
3133 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3134 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3136 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3137 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3140 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3144 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3146 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3147 unsigned need_wb = 0;
3152 /*types with references can't have alignment smaller than sizeof(void*) */
3153 if (align < SIZEOF_VOID_P)
3156 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3157 if (size > 32 * SIZEOF_VOID_P)
3160 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3162 /* We don't unroll more than 5 stores to avoid code bloat. */
3163 if (size > 5 * SIZEOF_VOID_P) {
3164 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3165 size += (SIZEOF_VOID_P - 1);
3166 size &= ~(SIZEOF_VOID_P - 1);
3168 EMIT_NEW_ICONST (cfg, iargs [2], size);
3169 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3170 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3174 destreg = iargs [0]->dreg;
3175 srcreg = iargs [1]->dreg;
3178 dest_ptr_reg = alloc_preg (cfg);
3179 tmp_reg = alloc_preg (cfg);
3182 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3184 while (size >= SIZEOF_VOID_P) {
3185 MonoInst *load_inst;
3186 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3187 load_inst->dreg = tmp_reg;
3188 load_inst->inst_basereg = srcreg;
3189 load_inst->inst_offset = offset;
3190 MONO_ADD_INS (cfg->cbb, load_inst);
3192 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3195 emit_write_barrier (cfg, iargs [0], load_inst);
3197 offset += SIZEOF_VOID_P;
3198 size -= SIZEOF_VOID_P;
3201 /*tmp += sizeof (void*)*/
3202 if (size >= SIZEOF_VOID_P) {
3203 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3204 MONO_ADD_INS (cfg->cbb, iargs [0]);
3208 /* Those cannot be references since size < sizeof (void*) */
3210 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3217 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3218 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3224 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3225 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3234 * Emit code to copy a valuetype of type @klass whose address is stored in
3235 * @src->dreg to memory whose address is stored at @dest->dreg.
3238 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3240 MonoInst *iargs [4];
3243 MonoMethod *memcpy_method;
3244 MonoInst *size_ins = NULL;
3245 MonoInst *memcpy_ins = NULL;
3248 if (cfg->generic_sharing_context)
3249 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3252 * This check breaks with spilled vars... need to handle it during verification anyway.
3253 * g_assert (klass && klass == src->klass && klass == dest->klass);
3256 if (mini_is_gsharedvt_klass (cfg, klass)) {
3258 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3259 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3263 n = mono_class_native_size (klass, &align);
3265 n = mono_class_value_size (klass, &align);
3267 /* if native is true there should be no references in the struct */
3268 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3269 /* Avoid barriers when storing to the stack */
3270 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3271 (dest->opcode == OP_LDADDR))) {
3277 context_used = mini_class_check_context_used (cfg, klass);
3279 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3280 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3282 } else if (context_used) {
3283 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3285 if (cfg->compile_aot) {
3286 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3288 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3289 mono_class_compute_gc_descriptor (klass);
3294 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3296 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3301 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3302 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3303 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3308 iargs [2] = size_ins;
3310 EMIT_NEW_ICONST (cfg, iargs [2], n);
3312 memcpy_method = get_memcpy_method ();
3314 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3316 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3321 get_memset_method (void)
3323 static MonoMethod *memset_method = NULL;
3324 if (!memset_method) {
3325 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3327 g_error ("Old corlib found. Install a new one");
3329 return memset_method;
3333 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3335 MonoInst *iargs [3];
3338 MonoMethod *memset_method;
3339 MonoInst *size_ins = NULL;
3340 MonoInst *bzero_ins = NULL;
3341 static MonoMethod *bzero_method;
3343 /* FIXME: Optimize this for the case when dest is an LDADDR */
3344 mono_class_init (klass);
3345 if (mini_is_gsharedvt_klass (cfg, klass)) {
3346 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3347 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3349 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3350 g_assert (bzero_method);
3352 iargs [1] = size_ins;
3353 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3357 n = mono_class_value_size (klass, &align);
3359 if (n <= sizeof (gpointer) * 8) {
3360 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3363 memset_method = get_memset_method ();
3365 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3366 EMIT_NEW_ICONST (cfg, iargs [2], n);
3367 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3372 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3374 MonoInst *this = NULL;
3376 g_assert (cfg->generic_sharing_context);
3378 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3379 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3380 !method->klass->valuetype)
3381 EMIT_NEW_ARGLOAD (cfg, this, 0);
3383 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3384 MonoInst *mrgctx_loc, *mrgctx_var;
3387 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3389 mrgctx_loc = mono_get_vtable_var (cfg);
3390 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3393 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3394 MonoInst *vtable_loc, *vtable_var;
3398 vtable_loc = mono_get_vtable_var (cfg);
3399 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3401 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3402 MonoInst *mrgctx_var = vtable_var;
3405 vtable_reg = alloc_preg (cfg);
3406 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3407 vtable_var->type = STACK_PTR;
3415 vtable_reg = alloc_preg (cfg);
3416 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3421 static MonoJumpInfoRgctxEntry *
3422 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3424 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3425 res->method = method;
3426 res->in_mrgctx = in_mrgctx;
3427 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3428 res->data->type = patch_type;
3429 res->data->data.target = patch_data;
3430 res->info_type = info_type;
3435 static inline MonoInst*
3436 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3438 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3442 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3443 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3445 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3446 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3448 return emit_rgctx_fetch (cfg, rgctx, entry);
3452 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3453 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3455 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3456 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3458 return emit_rgctx_fetch (cfg, rgctx, entry);
3462 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3463 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3465 MonoJumpInfoGSharedVtCall *call_info;
3466 MonoJumpInfoRgctxEntry *entry;
3469 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3470 call_info->sig = sig;
3471 call_info->method = cmethod;
3473 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3474 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3476 return emit_rgctx_fetch (cfg, rgctx, entry);
3480 * emit_get_rgctx_virt_method:
3482 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3485 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3486 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3488 MonoJumpInfoVirtMethod *info;
3489 MonoJumpInfoRgctxEntry *entry;
3492 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3493 info->klass = klass;
3494 info->method = virt_method;
3496 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3497 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3499 return emit_rgctx_fetch (cfg, rgctx, entry);
3503 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3504 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3506 MonoJumpInfoRgctxEntry *entry;
3509 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3510 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3512 return emit_rgctx_fetch (cfg, rgctx, entry);
3516 * emit_get_rgctx_method:
3518 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3519 * normal constants, else emit a load from the rgctx.
3522 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3523 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3525 if (!context_used) {
3528 switch (rgctx_type) {
3529 case MONO_RGCTX_INFO_METHOD:
3530 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3532 case MONO_RGCTX_INFO_METHOD_RGCTX:
3533 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3536 g_assert_not_reached ();
3539 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3540 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3542 return emit_rgctx_fetch (cfg, rgctx, entry);
3547 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3548 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3550 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3551 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3553 return emit_rgctx_fetch (cfg, rgctx, entry);
3557 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3559 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3560 MonoRuntimeGenericContextInfoTemplate *template;
3565 for (i = 0; i < info->num_entries; ++i) {
3566 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3568 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3572 if (info->num_entries == info->count_entries) {
3573 MonoRuntimeGenericContextInfoTemplate *new_entries;
3574 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3576 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3578 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3579 info->entries = new_entries;
3580 info->count_entries = new_count_entries;
3583 idx = info->num_entries;
3584 template = &info->entries [idx];
3585 template->info_type = rgctx_type;
3586 template->data = data;
3588 info->num_entries ++;
3594 * emit_get_gsharedvt_info:
3596 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3599 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3604 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3605 /* Load info->entries [idx] */
3606 dreg = alloc_preg (cfg);
3607 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3613 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3615 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3619 * On return the caller must check @klass for load errors.
3622 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
3624 MonoInst *vtable_arg;
3627 *out_bblock = cfg->cbb;
3629 context_used = mini_class_check_context_used (cfg, klass);
3632 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3633 klass, MONO_RGCTX_INFO_VTABLE);
3635 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3639 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3642 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3646 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3647 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3650 * For LLVM, this requires that the code in the generic trampoline obtain the vtable argument according to
3651 * the normal calling convention of the platform.
3653 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3654 ins->sreg1 = vtable_arg->dreg;
3655 MONO_ADD_INS (cfg->cbb, ins);
3657 static int byte_offset = -1;
3658 static guint8 bitmask;
3659 int bits_reg, inited_reg;
3660 MonoBasicBlock *inited_bb;
3661 MonoInst *args [16];
3663 if (byte_offset < 0)
3664 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3666 bits_reg = alloc_ireg (cfg);
3667 inited_reg = alloc_ireg (cfg);
3669 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3672 NEW_BBLOCK (cfg, inited_bb);
3674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3677 args [0] = vtable_arg;
3678 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3680 MONO_START_BB (cfg, inited_bb);
3681 *out_bblock = inited_bb;
3687 emit_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
3689 /* This could be used as a fallback if needed */
3690 //emit_generic_class_init (cfg, klass, out_bblock);
3692 *out_bblock = cfg->cbb;
3694 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
3698 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3702 if (cfg->gen_seq_points && cfg->method == method) {
3703 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3705 ins->flags |= MONO_INST_NONEMPTY_STACK;
3706 MONO_ADD_INS (cfg->cbb, ins);
3711 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3713 if (mini_get_debug_options ()->better_cast_details) {
3714 int vtable_reg = alloc_preg (cfg);
3715 int klass_reg = alloc_preg (cfg);
3716 MonoBasicBlock *is_null_bb = NULL;
3718 int to_klass_reg, context_used;
3721 NEW_BBLOCK (cfg, is_null_bb);
3723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3727 tls_get = mono_get_jit_tls_intrinsic (cfg);
3729 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3733 MONO_ADD_INS (cfg->cbb, tls_get);
3734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3739 context_used = mini_class_check_context_used (cfg, klass);
3741 MonoInst *class_ins;
3743 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3744 to_klass_reg = class_ins->dreg;
3746 to_klass_reg = alloc_preg (cfg);
3747 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3752 MONO_START_BB (cfg, is_null_bb);
3754 *out_bblock = cfg->cbb;
3760 reset_cast_details (MonoCompile *cfg)
3762 /* Reset the variables holding the cast details */
3763 if (mini_get_debug_options ()->better_cast_details) {
3764 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3766 MONO_ADD_INS (cfg->cbb, tls_get);
3767 /* It is enough to reset the from field */
3768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3773 * On return the caller must check @array_class for load errors
3776 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3778 int vtable_reg = alloc_preg (cfg);
3781 context_used = mini_class_check_context_used (cfg, array_class);
3783 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3785 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3787 if (cfg->opt & MONO_OPT_SHARED) {
3788 int class_reg = alloc_preg (cfg);
3789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3790 if (cfg->compile_aot) {
3791 int klass_reg = alloc_preg (cfg);
3792 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3793 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3797 } else if (context_used) {
3798 MonoInst *vtable_ins;
3800 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3801 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3803 if (cfg->compile_aot) {
3807 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3809 vt_reg = alloc_preg (cfg);
3810 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3811 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3814 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3820 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3822 reset_cast_details (cfg);
3826 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3827 * generic code is generated.
3830 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3832 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3835 MonoInst *rgctx, *addr;
3837 /* FIXME: What if the class is shared? We might not
3838 have to get the address of the method from the
3840 addr = emit_get_rgctx_method (cfg, context_used, method,
3841 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3843 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3845 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3847 gboolean pass_vtable, pass_mrgctx;
3848 MonoInst *rgctx_arg = NULL;
3850 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3851 g_assert (!pass_mrgctx);
3854 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3857 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3860 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3865 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3869 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3870 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3871 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3872 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3874 obj_reg = sp [0]->dreg;
3875 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3876 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3878 /* FIXME: generics */
3879 g_assert (klass->rank == 0);
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3883 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3889 MonoInst *element_class;
3891 /* This assertion is from the unboxcast insn */
3892 g_assert (klass->rank == 0);
3894 element_class = emit_get_rgctx_klass (cfg, context_used,
3895 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3897 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3898 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3900 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3901 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3902 reset_cast_details (cfg);
3905 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3906 MONO_ADD_INS (cfg->cbb, add);
3907 add->type = STACK_MP;
3914 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3916 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3917 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3921 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3927 args [1] = klass_inst;
3930 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3932 NEW_BBLOCK (cfg, is_ref_bb);
3933 NEW_BBLOCK (cfg, is_nullable_bb);
3934 NEW_BBLOCK (cfg, end_bb);
3935 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3942 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3943 addr_reg = alloc_dreg (cfg, STACK_MP);
3947 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3948 MONO_ADD_INS (cfg->cbb, addr);
3950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3953 MONO_START_BB (cfg, is_ref_bb);
3955 /* Save the ref to a temporary */
3956 dreg = alloc_ireg (cfg);
3957 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3958 addr->dreg = addr_reg;
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3963 MONO_START_BB (cfg, is_nullable_bb);
3966 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3967 MonoInst *unbox_call;
3968 MonoMethodSignature *unbox_sig;
3970 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3971 unbox_sig->ret = &klass->byval_arg;
3972 unbox_sig->param_count = 1;
3973 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3974 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3976 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3977 addr->dreg = addr_reg;
3980 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3983 MONO_START_BB (cfg, end_bb);
3986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3988 *out_cbb = cfg->cbb;
3994 * Returns NULL and set the cfg exception on error.
3997 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3999 MonoInst *iargs [2];
4005 MonoInst *iargs [2];
4006 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
4008 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4010 if (cfg->opt & MONO_OPT_SHARED)
4011 rgctx_info = MONO_RGCTX_INFO_KLASS;
4013 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4014 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4016 if (cfg->opt & MONO_OPT_SHARED) {
4017 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4019 alloc_ftn = mono_object_new;
4022 alloc_ftn = mono_object_new_specific;
4025 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4026 if (known_instance_size) {
4027 int size = mono_class_instance_size (klass);
4028 if (size < sizeof (MonoObject))
4029 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4031 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4033 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4036 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4039 if (cfg->opt & MONO_OPT_SHARED) {
4040 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4041 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4043 alloc_ftn = mono_object_new;
4044 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4045 /* This happens often in argument checking code, eg. throw new FooException... */
4046 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4047 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4048 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4050 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4051 MonoMethod *managed_alloc = NULL;
4055 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4056 cfg->exception_ptr = klass;
4060 #ifndef MONO_CROSS_COMPILE
4061 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4064 if (managed_alloc) {
4065 int size = mono_class_instance_size (klass);
4066 if (size < sizeof (MonoObject))
4067 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4069 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4070 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4071 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4073 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4075 guint32 lw = vtable->klass->instance_size;
4076 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4077 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4078 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4081 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4085 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4089 * Returns NULL and set the cfg exception on error.
4092 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4094 MonoInst *alloc, *ins;
4096 *out_cbb = cfg->cbb;
4098 if (mono_class_is_nullable (klass)) {
4099 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4102 /* FIXME: What if the class is shared? We might not
4103 have to get the method address from the RGCTX. */
4104 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4105 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4106 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4108 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4110 gboolean pass_vtable, pass_mrgctx;
4111 MonoInst *rgctx_arg = NULL;
4113 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4114 g_assert (!pass_mrgctx);
4117 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4120 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4123 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4127 if (mini_is_gsharedvt_klass (cfg, klass)) {
4128 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4129 MonoInst *res, *is_ref, *src_var, *addr;
4132 dreg = alloc_ireg (cfg);
4134 NEW_BBLOCK (cfg, is_ref_bb);
4135 NEW_BBLOCK (cfg, is_nullable_bb);
4136 NEW_BBLOCK (cfg, end_bb);
4137 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4142 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4145 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4148 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4149 ins->opcode = OP_STOREV_MEMBASE;
4151 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4152 res->type = STACK_OBJ;
4154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4157 MONO_START_BB (cfg, is_ref_bb);
4159 /* val is a vtype, so has to load the value manually */
4160 src_var = get_vreg_to_inst (cfg, val->dreg);
4162 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4163 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4168 MONO_START_BB (cfg, is_nullable_bb);
4171 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4172 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4174 MonoMethodSignature *box_sig;
4177 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4178 * construct that method at JIT time, so have to do things by hand.
4180 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4181 box_sig->ret = &mono_defaults.object_class->byval_arg;
4182 box_sig->param_count = 1;
4183 box_sig->params [0] = &klass->byval_arg;
4184 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4185 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4186 res->type = STACK_OBJ;
4190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4192 MONO_START_BB (cfg, end_bb);
4194 *out_cbb = cfg->cbb;
4198 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4208 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4211 MonoGenericContainer *container;
4212 MonoGenericInst *ginst;
4214 if (klass->generic_class) {
4215 container = klass->generic_class->container_class->generic_container;
4216 ginst = klass->generic_class->context.class_inst;
4217 } else if (klass->generic_container && context_used) {
4218 container = klass->generic_container;
4219 ginst = container->context.class_inst;
4224 for (i = 0; i < container->type_argc; ++i) {
4226 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4228 type = ginst->type_argv [i];
4229 if (mini_type_is_reference (cfg, type))
4235 static GHashTable* direct_icall_type_hash;
4238 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4240 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4241 if (!direct_icalls_enabled (cfg))
4245 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4246 * Whitelist a few icalls for now.
4248 if (!direct_icall_type_hash) {
4249 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4251 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4252 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4253 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4254 mono_memory_barrier ();
4255 direct_icall_type_hash = h;
4258 if (cmethod->klass == mono_defaults.math_class)
4260 /* No locking needed */
4261 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4266 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4269 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4271 MonoMethod *mono_castclass;
4274 mono_castclass = mono_marshal_get_castclass_with_cache ();
4276 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4277 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4278 reset_cast_details (cfg);
4279 *out_bblock = cfg->cbb;
4285 get_castclass_cache_idx (MonoCompile *cfg)
4287 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4288 cfg->castclass_cache_index ++;
4289 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4293 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4302 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4305 if (cfg->compile_aot) {
4306 idx = get_castclass_cache_idx (cfg);
4307 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4309 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4312 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4314 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4318 * Returns NULL and set the cfg exception on error.
4321 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4323 MonoBasicBlock *is_null_bb;
4324 int obj_reg = src->dreg;
4325 int vtable_reg = alloc_preg (cfg);
4327 MonoInst *klass_inst = NULL, *res;
4328 MonoBasicBlock *bblock;
4332 context_used = mini_class_check_context_used (cfg, klass);
4334 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4335 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4336 (*inline_costs) += 2;
4339 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4340 MonoMethod *mono_castclass;
4341 MonoInst *iargs [1];
4344 mono_castclass = mono_marshal_get_castclass (klass);
4347 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4348 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4349 iargs, ip, cfg->real_offset, TRUE, &bblock);
4350 reset_cast_details (cfg);
4351 CHECK_CFG_EXCEPTION;
4352 g_assert (costs > 0);
4354 cfg->real_offset += 5;
4356 (*inline_costs) += costs;
4365 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4366 MonoInst *cache_ins;
4368 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4373 /* klass - it's the second element of the cache entry*/
4374 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4377 args [2] = cache_ins;
4379 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4382 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4385 NEW_BBLOCK (cfg, is_null_bb);
4387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4390 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4392 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4394 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4396 int klass_reg = alloc_preg (cfg);
4398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4400 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4401 /* the remoting code is broken, access the class for now */
4402 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4403 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4406 cfg->exception_ptr = klass;
4409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4414 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4416 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4417 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4421 MONO_START_BB (cfg, is_null_bb);
4423 reset_cast_details (cfg);
4434 * Returns NULL and set the cfg exception on error.
4437 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4440 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4441 int obj_reg = src->dreg;
4442 int vtable_reg = alloc_preg (cfg);
4443 int res_reg = alloc_ireg_ref (cfg);
4444 MonoInst *klass_inst = NULL;
4449 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4450 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4451 MonoInst *cache_ins;
4453 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4458 /* klass - it's the second element of the cache entry*/
4459 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4462 args [2] = cache_ins;
4464 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4467 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4470 NEW_BBLOCK (cfg, is_null_bb);
4471 NEW_BBLOCK (cfg, false_bb);
4472 NEW_BBLOCK (cfg, end_bb);
4474 /* Do the assignment at the beginning, so the other assignment can be if converted */
4475 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4476 ins->type = STACK_OBJ;
4479 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4480 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4484 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4485 g_assert (!context_used);
4486 /* the is_null_bb target simply copies the input register to the output */
4487 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4489 int klass_reg = alloc_preg (cfg);
4492 int rank_reg = alloc_preg (cfg);
4493 int eclass_reg = alloc_preg (cfg);
4495 g_assert (!context_used);
4496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4501 if (klass->cast_class == mono_defaults.object_class) {
4502 int parent_reg = alloc_preg (cfg);
4503 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4504 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4505 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4507 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4508 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4509 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4511 } else if (klass->cast_class == mono_defaults.enum_class) {
4512 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4514 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4515 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4517 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4518 /* Check that the object is a vector too */
4519 int bounds_reg = alloc_preg (cfg);
4520 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4522 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4525 /* the is_null_bb target simply copies the input register to the output */
4526 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4528 } else if (mono_class_is_nullable (klass)) {
4529 g_assert (!context_used);
4530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4531 /* the is_null_bb target simply copies the input register to the output */
4532 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4534 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4535 g_assert (!context_used);
4536 /* the remoting code is broken, access the class for now */
4537 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4538 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4541 cfg->exception_ptr = klass;
4544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4553 /* the is_null_bb target simply copies the input register to the output */
4554 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4559 MONO_START_BB (cfg, false_bb);
4561 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4564 MONO_START_BB (cfg, is_null_bb);
4566 MONO_START_BB (cfg, end_bb);
4572 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4574 /* This opcode takes as input an object reference and a class, and returns:
4575 0) if the object is an instance of the class,
4576 1) if the object is not instance of the class,
4577 2) if the object is a proxy whose type cannot be determined */
4580 #ifndef DISABLE_REMOTING
4581 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4583 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4585 int obj_reg = src->dreg;
4586 int dreg = alloc_ireg (cfg);
4588 #ifndef DISABLE_REMOTING
4589 int klass_reg = alloc_preg (cfg);
4592 NEW_BBLOCK (cfg, true_bb);
4593 NEW_BBLOCK (cfg, false_bb);
4594 NEW_BBLOCK (cfg, end_bb);
4595 #ifndef DISABLE_REMOTING
4596 NEW_BBLOCK (cfg, false2_bb);
4597 NEW_BBLOCK (cfg, no_proxy_bb);
4600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4603 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4604 #ifndef DISABLE_REMOTING
4605 NEW_BBLOCK (cfg, interface_fail_bb);
4608 tmp_reg = alloc_preg (cfg);
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4610 #ifndef DISABLE_REMOTING
4611 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4612 MONO_START_BB (cfg, interface_fail_bb);
4613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4615 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4617 tmp_reg = alloc_preg (cfg);
4618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4622 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4625 #ifndef DISABLE_REMOTING
4626 tmp_reg = alloc_preg (cfg);
4627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4630 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4631 tmp_reg = alloc_preg (cfg);
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4635 tmp_reg = alloc_preg (cfg);
4636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4637 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4640 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4643 MONO_START_BB (cfg, no_proxy_bb);
4645 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4647 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4651 MONO_START_BB (cfg, false_bb);
4653 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4656 #ifndef DISABLE_REMOTING
4657 MONO_START_BB (cfg, false2_bb);
4659 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4663 MONO_START_BB (cfg, true_bb);
4665 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4667 MONO_START_BB (cfg, end_bb);
4670 MONO_INST_NEW (cfg, ins, OP_ICONST);
4672 ins->type = STACK_I4;
4678 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4680 /* This opcode takes as input an object reference and a class, and returns:
4681 0) if the object is an instance of the class,
4682 1) if the object is a proxy whose type cannot be determined
4683 an InvalidCastException exception is thrown otherwhise*/
4686 #ifndef DISABLE_REMOTING
4687 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4689 MonoBasicBlock *ok_result_bb;
4691 int obj_reg = src->dreg;
4692 int dreg = alloc_ireg (cfg);
4693 int tmp_reg = alloc_preg (cfg);
4695 #ifndef DISABLE_REMOTING
4696 int klass_reg = alloc_preg (cfg);
4697 NEW_BBLOCK (cfg, end_bb);
4700 NEW_BBLOCK (cfg, ok_result_bb);
4702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4705 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4707 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4708 #ifndef DISABLE_REMOTING
4709 NEW_BBLOCK (cfg, interface_fail_bb);
4711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4712 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4713 MONO_START_BB (cfg, interface_fail_bb);
4714 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4716 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4718 tmp_reg = alloc_preg (cfg);
4719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4720 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4721 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4723 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4726 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4727 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4731 #ifndef DISABLE_REMOTING
4732 NEW_BBLOCK (cfg, no_proxy_bb);
4734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4736 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4738 tmp_reg = alloc_preg (cfg);
4739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4742 tmp_reg = alloc_preg (cfg);
4743 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4744 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4747 NEW_BBLOCK (cfg, fail_1_bb);
4749 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4751 MONO_START_BB (cfg, fail_1_bb);
4753 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4756 MONO_START_BB (cfg, no_proxy_bb);
4758 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4760 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4764 MONO_START_BB (cfg, ok_result_bb);
4766 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4768 #ifndef DISABLE_REMOTING
4769 MONO_START_BB (cfg, end_bb);
4773 MONO_INST_NEW (cfg, ins, OP_ICONST);
4775 ins->type = STACK_I4;
4780 static G_GNUC_UNUSED MonoInst*
4781 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4783 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4784 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4787 switch (enum_type->type) {
4790 #if SIZEOF_REGISTER == 8
4802 MonoInst *load, *and, *cmp, *ceq;
4803 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4804 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4805 int dest_reg = alloc_ireg (cfg);
4807 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4808 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4809 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4810 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4812 ceq->type = STACK_I4;
4815 load = mono_decompose_opcode (cfg, load, NULL);
4816 and = mono_decompose_opcode (cfg, and, NULL);
4817 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4818 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4826 * Returns NULL and set the cfg exception on error.
4828 static G_GNUC_UNUSED MonoInst*
4829 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4833 gpointer trampoline;
4834 MonoInst *obj, *method_ins, *tramp_ins;
4839 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4842 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4846 obj = handle_alloc (cfg, klass, FALSE, 0);
4850 /* Inline the contents of mono_delegate_ctor */
4852 /* Set target field */
4853 /* Optimize away setting of NULL target */
4854 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4856 if (cfg->gen_write_barriers) {
4857 dreg = alloc_preg (cfg);
4858 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4859 emit_write_barrier (cfg, ptr, target);
4863 /* Set method field */
4864 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4868 * To avoid looking up the compiled code belonging to the target method
4869 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4870 * store it, and we fill it after the method has been compiled.
4872 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4873 MonoInst *code_slot_ins;
4876 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4878 domain = mono_domain_get ();
4879 mono_domain_lock (domain);
4880 if (!domain_jit_info (domain)->method_code_hash)
4881 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4882 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4884 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4885 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4887 mono_domain_unlock (domain);
4889 if (cfg->compile_aot)
4890 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4892 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4894 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4897 if (cfg->compile_aot) {
4898 MonoDelegateClassMethodPair *del_tramp;
4900 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4901 del_tramp->klass = klass;
4902 del_tramp->method = context_used ? NULL : method;
4903 del_tramp->virtual = virtual;
4904 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4907 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4909 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4910 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4913 /* Set invoke_impl field */
4915 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4917 dreg = alloc_preg (cfg);
4918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4921 dreg = alloc_preg (cfg);
4922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4923 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4926 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4932 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4934 MonoJitICallInfo *info;
4936 /* Need to register the icall so it gets an icall wrapper */
4937 info = mono_get_array_new_va_icall (rank);
4939 cfg->flags |= MONO_CFG_HAS_VARARGS;
4941 /* mono_array_new_va () needs a vararg calling convention */
4942 cfg->disable_llvm = TRUE;
4944 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4945 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4949 * handle_constrained_gsharedvt_call:
4951 * Handle constrained calls where the receiver is a gsharedvt type.
4952 * Return the instruction representing the call. Set the cfg exception on failure.
4955 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4956 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4958 MonoInst *ins = NULL;
4959 MonoBasicBlock *bblock = *ref_bblock;
4960 gboolean emit_widen = *ref_emit_widen;
4963 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4964 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4965 * pack the arguments into an array, and do the rest of the work in in an icall.
4967 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4968 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4969 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4970 MonoInst *args [16];
4973 * This case handles calls to
4974 * - object:ToString()/Equals()/GetHashCode(),
4975 * - System.IComparable<T>:CompareTo()
4976 * - System.IEquatable<T>:Equals ()
4977 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4981 if (mono_method_check_context_used (cmethod))
4982 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4984 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4985 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4987 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4988 if (fsig->hasthis && fsig->param_count) {
4989 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4990 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4991 ins->dreg = alloc_preg (cfg);
4992 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4993 MONO_ADD_INS (cfg->cbb, ins);
4996 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4999 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5001 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5002 addr_reg = ins->dreg;
5003 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5005 EMIT_NEW_ICONST (cfg, args [3], 0);
5006 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5009 EMIT_NEW_ICONST (cfg, args [3], 0);
5010 EMIT_NEW_ICONST (cfg, args [4], 0);
5012 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5015 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
5016 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
5017 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5021 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5022 MONO_ADD_INS (cfg->cbb, add);
5024 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5025 MONO_ADD_INS (cfg->cbb, ins);
5026 /* ins represents the call result */
5029 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5032 *ref_emit_widen = emit_widen;
5033 *ref_bblock = bblock;
5042 mono_emit_load_got_addr (MonoCompile *cfg)
5044 MonoInst *getaddr, *dummy_use;
5046 if (!cfg->got_var || cfg->got_var_allocated)
5049 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5050 getaddr->cil_code = cfg->header->code;
5051 getaddr->dreg = cfg->got_var->dreg;
5053 /* Add it to the start of the first bblock */
5054 if (cfg->bb_entry->code) {
5055 getaddr->next = cfg->bb_entry->code;
5056 cfg->bb_entry->code = getaddr;
5059 MONO_ADD_INS (cfg->bb_entry, getaddr);
5061 cfg->got_var_allocated = TRUE;
5064 * Add a dummy use to keep the got_var alive, since real uses might
5065 * only be generated by the back ends.
5066 * Add it to end_bblock, so the variable's lifetime covers the whole
5068 * It would be better to make the usage of the got var explicit in all
5069 * cases when the backend needs it (i.e. calls, throw etc.), so this
5070 * wouldn't be needed.
5072 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5073 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5076 static int inline_limit;
5077 static gboolean inline_limit_inited;
5080 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5082 MonoMethodHeaderSummary header;
5084 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5085 MonoMethodSignature *sig = mono_method_signature (method);
5089 if (cfg->disable_inline)
5091 if (cfg->generic_sharing_context)
5094 if (cfg->inline_depth > 10)
5097 #ifdef MONO_ARCH_HAVE_LMF_OPS
5098 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5099 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5100 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5105 if (!mono_method_get_header_summary (method, &header))
5108 /*runtime, icall and pinvoke are checked by summary call*/
5109 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5110 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5111 (mono_class_is_marshalbyref (method->klass)) ||
5115 /* also consider num_locals? */
5116 /* Do the size check early to avoid creating vtables */
5117 if (!inline_limit_inited) {
5118 if (g_getenv ("MONO_INLINELIMIT"))
5119 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5121 inline_limit = INLINE_LENGTH_LIMIT;
5122 inline_limit_inited = TRUE;
5124 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5128 * if we can initialize the class of the method right away, we do,
5129 * otherwise we don't allow inlining if the class needs initialization,
5130 * since it would mean inserting a call to mono_runtime_class_init()
5131 * inside the inlined code
5133 if (!(cfg->opt & MONO_OPT_SHARED)) {
5134 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5135 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5136 vtable = mono_class_vtable (cfg->domain, method->klass);
5139 if (!cfg->compile_aot)
5140 mono_runtime_class_init (vtable);
5141 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5142 if (cfg->run_cctors && method->klass->has_cctor) {
5143 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5144 if (!method->klass->runtime_info)
5145 /* No vtable created yet */
5147 vtable = mono_class_vtable (cfg->domain, method->klass);
5150 /* This makes so that inline cannot trigger */
5151 /* .cctors: too many apps depend on them */
5152 /* running with a specific order... */
5153 if (! vtable->initialized)
5155 mono_runtime_class_init (vtable);
5157 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5158 if (!method->klass->runtime_info)
5159 /* No vtable created yet */
5161 vtable = mono_class_vtable (cfg->domain, method->klass);
5164 if (!vtable->initialized)
5169 * If we're compiling for shared code
5170 * the cctor will need to be run at aot method load time, for example,
5171 * or at the end of the compilation of the inlining method.
5173 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5177 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5178 if (mono_arch_is_soft_float ()) {
5180 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5182 for (i = 0; i < sig->param_count; ++i)
5183 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5188 if (g_list_find (cfg->dont_inline, method))
5195 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5197 if (!cfg->compile_aot) {
5199 if (vtable->initialized)
5203 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5204 if (cfg->method == method)
5208 if (!mono_class_needs_cctor_run (klass, method))
5211 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5212 /* The initialization is already done before the method is called */
5219 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5223 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5226 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5229 mono_class_init (klass);
5230 size = mono_class_array_element_size (klass);
5233 mult_reg = alloc_preg (cfg);
5234 array_reg = arr->dreg;
5235 index_reg = index->dreg;
5237 #if SIZEOF_REGISTER == 8
5238 /* The array reg is 64 bits but the index reg is only 32 */
5239 if (COMPILE_LLVM (cfg)) {
5241 index2_reg = index_reg;
5243 index2_reg = alloc_preg (cfg);
5244 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5247 if (index->type == STACK_I8) {
5248 index2_reg = alloc_preg (cfg);
5249 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5251 index2_reg = index_reg;
5256 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5258 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5259 if (size == 1 || size == 2 || size == 4 || size == 8) {
5260 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5262 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5263 ins->klass = mono_class_get_element_class (klass);
5264 ins->type = STACK_MP;
5270 add_reg = alloc_ireg_mp (cfg);
5273 MonoInst *rgctx_ins;
5276 g_assert (cfg->generic_sharing_context);
5277 context_used = mini_class_check_context_used (cfg, klass);
5278 g_assert (context_used);
5279 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5280 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5282 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5284 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5285 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5286 ins->klass = mono_class_get_element_class (klass);
5287 ins->type = STACK_MP;
5288 MONO_ADD_INS (cfg->cbb, ins);
5293 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5295 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5297 int bounds_reg = alloc_preg (cfg);
5298 int add_reg = alloc_ireg_mp (cfg);
5299 int mult_reg = alloc_preg (cfg);
5300 int mult2_reg = alloc_preg (cfg);
5301 int low1_reg = alloc_preg (cfg);
5302 int low2_reg = alloc_preg (cfg);
5303 int high1_reg = alloc_preg (cfg);
5304 int high2_reg = alloc_preg (cfg);
5305 int realidx1_reg = alloc_preg (cfg);
5306 int realidx2_reg = alloc_preg (cfg);
5307 int sum_reg = alloc_preg (cfg);
5308 int index1, index2, tmpreg;
5312 mono_class_init (klass);
5313 size = mono_class_array_element_size (klass);
5315 index1 = index_ins1->dreg;
5316 index2 = index_ins2->dreg;
5318 #if SIZEOF_REGISTER == 8
5319 /* The array reg is 64 bits but the index reg is only 32 */
5320 if (COMPILE_LLVM (cfg)) {
5323 tmpreg = alloc_preg (cfg);
5324 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5326 tmpreg = alloc_preg (cfg);
5327 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5331 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5335 /* range checking */
5336 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5337 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5339 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5340 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5341 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5343 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5344 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5345 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5347 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5348 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5349 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5350 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5351 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5352 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5353 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5355 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5356 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5358 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5359 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5361 ins->type = STACK_MP;
5363 MONO_ADD_INS (cfg->cbb, ins);
5370 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5374 MonoMethod *addr_method;
5376 MonoClass *eclass = cmethod->klass->element_class;
5378 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5381 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5383 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5384 /* emit_ldelema_2 depends on OP_LMUL */
5385 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5386 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5390 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5393 element_size = mono_class_array_element_size (eclass);
5394 addr_method = mono_marshal_get_array_address (rank, element_size);
5395 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5400 static MonoBreakPolicy
5401 always_insert_breakpoint (MonoMethod *method)
5403 return MONO_BREAK_POLICY_ALWAYS;
5406 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5409 * mono_set_break_policy:
5410 * policy_callback: the new callback function
5412 * Allow embedders to decide wherther to actually obey breakpoint instructions
5413 * (both break IL instructions and Debugger.Break () method calls), for example
5414 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5415 * untrusted or semi-trusted code.
5417 * @policy_callback will be called every time a break point instruction needs to
5418 * be inserted with the method argument being the method that calls Debugger.Break()
5419 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5420 * if it wants the breakpoint to not be effective in the given method.
5421 * #MONO_BREAK_POLICY_ALWAYS is the default.
5424 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5426 if (policy_callback)
5427 break_policy_func = policy_callback;
5429 break_policy_func = always_insert_breakpoint;
5433 should_insert_brekpoint (MonoMethod *method) {
5434 switch (break_policy_func (method)) {
5435 case MONO_BREAK_POLICY_ALWAYS:
5437 case MONO_BREAK_POLICY_NEVER:
5439 case MONO_BREAK_POLICY_ON_DBG:
5440 g_warning ("mdb no longer supported");
5443 g_warning ("Incorrect value returned from break policy callback");
5448 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5450 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5452 MonoInst *addr, *store, *load;
5453 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5455 /* the bounds check is already done by the callers */
5456 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5458 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5459 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5460 if (mini_type_is_reference (cfg, fsig->params [2]))
5461 emit_write_barrier (cfg, addr, load);
5463 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5464 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5471 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5473 return mini_type_is_reference (cfg, &klass->byval_arg);
5477 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5479 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5480 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5481 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5482 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5483 MonoInst *iargs [3];
5486 mono_class_setup_vtable (obj_array);
5487 g_assert (helper->slot);
5489 if (sp [0]->type != STACK_OBJ)
5491 if (sp [2]->type != STACK_OBJ)
5498 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5502 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5505 // FIXME-VT: OP_ICONST optimization
5506 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5507 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5508 ins->opcode = OP_STOREV_MEMBASE;
5509 } else if (sp [1]->opcode == OP_ICONST) {
5510 int array_reg = sp [0]->dreg;
5511 int index_reg = sp [1]->dreg;
5512 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5515 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5516 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5518 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5519 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5520 if (generic_class_is_reference_type (cfg, klass))
5521 emit_write_barrier (cfg, addr, sp [2]);
5528 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5533 eklass = mono_class_from_mono_type (fsig->params [2]);
5535 eklass = mono_class_from_mono_type (fsig->ret);
5538 return emit_array_store (cfg, eklass, args, FALSE);
5540 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5541 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5547 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5551 //Only allow for valuetypes
5552 if (!param_klass->valuetype || !return_klass->valuetype)
5556 if (param_klass->has_references || return_klass->has_references)
5559 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5560 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5561 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5564 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5565 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5568 //And have the same size
5569 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5575 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5577 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5578 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5580 //Valuetypes that are semantically equivalent
5581 if (is_unsafe_mov_compatible (param_klass, return_klass))
5584 //Arrays of valuetypes that are semantically equivalent
5585 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5592 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5594 #ifdef MONO_ARCH_SIMD_INTRINSICS
5595 MonoInst *ins = NULL;
5597 if (cfg->opt & MONO_OPT_SIMD) {
5598 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5604 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5608 emit_memory_barrier (MonoCompile *cfg, int kind)
5610 MonoInst *ins = NULL;
5611 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5612 MONO_ADD_INS (cfg->cbb, ins);
5613 ins->backend.memory_barrier_kind = kind;
5619 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5621 MonoInst *ins = NULL;
5624 /* The LLVM backend supports these intrinsics */
5625 if (cmethod->klass == mono_defaults.math_class) {
5626 if (strcmp (cmethod->name, "Sin") == 0) {
5628 } else if (strcmp (cmethod->name, "Cos") == 0) {
5630 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5632 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5636 if (opcode && fsig->param_count == 1) {
5637 MONO_INST_NEW (cfg, ins, opcode);
5638 ins->type = STACK_R8;
5639 ins->dreg = mono_alloc_freg (cfg);
5640 ins->sreg1 = args [0]->dreg;
5641 MONO_ADD_INS (cfg->cbb, ins);
5645 if (cfg->opt & MONO_OPT_CMOV) {
5646 if (strcmp (cmethod->name, "Min") == 0) {
5647 if (fsig->params [0]->type == MONO_TYPE_I4)
5649 if (fsig->params [0]->type == MONO_TYPE_U4)
5650 opcode = OP_IMIN_UN;
5651 else if (fsig->params [0]->type == MONO_TYPE_I8)
5653 else if (fsig->params [0]->type == MONO_TYPE_U8)
5654 opcode = OP_LMIN_UN;
5655 } else if (strcmp (cmethod->name, "Max") == 0) {
5656 if (fsig->params [0]->type == MONO_TYPE_I4)
5658 if (fsig->params [0]->type == MONO_TYPE_U4)
5659 opcode = OP_IMAX_UN;
5660 else if (fsig->params [0]->type == MONO_TYPE_I8)
5662 else if (fsig->params [0]->type == MONO_TYPE_U8)
5663 opcode = OP_LMAX_UN;
5667 if (opcode && fsig->param_count == 2) {
5668 MONO_INST_NEW (cfg, ins, opcode);
5669 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5670 ins->dreg = mono_alloc_ireg (cfg);
5671 ins->sreg1 = args [0]->dreg;
5672 ins->sreg2 = args [1]->dreg;
5673 MONO_ADD_INS (cfg->cbb, ins);
5681 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5683 if (cmethod->klass == mono_defaults.array_class) {
5684 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5685 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5686 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5687 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5688 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5689 return emit_array_unsafe_mov (cfg, fsig, args);
5696 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5698 MonoInst *ins = NULL;
5700 static MonoClass *runtime_helpers_class = NULL;
5701 if (! runtime_helpers_class)
5702 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5703 "System.Runtime.CompilerServices", "RuntimeHelpers");
5705 if (cmethod->klass == mono_defaults.string_class) {
5706 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5707 int dreg = alloc_ireg (cfg);
5708 int index_reg = alloc_preg (cfg);
5709 int add_reg = alloc_preg (cfg);
5711 #if SIZEOF_REGISTER == 8
5712 /* The array reg is 64 bits but the index reg is only 32 */
5713 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5715 index_reg = args [1]->dreg;
5717 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5719 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5720 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5721 add_reg = ins->dreg;
5722 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5725 int mult_reg = alloc_preg (cfg);
5726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5727 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5728 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5729 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5731 type_from_op (cfg, ins, NULL, NULL);
5733 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5734 int dreg = alloc_ireg (cfg);
5735 /* Decompose later to allow more optimizations */
5736 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5737 ins->type = STACK_I4;
5738 ins->flags |= MONO_INST_FAULT;
5739 cfg->cbb->has_array_access = TRUE;
5740 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5745 } else if (cmethod->klass == mono_defaults.object_class) {
5747 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5748 int dreg = alloc_ireg_ref (cfg);
5749 int vt_reg = alloc_preg (cfg);
5750 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5751 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5752 type_from_op (cfg, ins, NULL, NULL);
5755 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5756 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5757 int dreg = alloc_ireg (cfg);
5758 int t1 = alloc_ireg (cfg);
5760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5761 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5762 ins->type = STACK_I4;
5766 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5767 MONO_INST_NEW (cfg, ins, OP_NOP);
5768 MONO_ADD_INS (cfg->cbb, ins);
5772 } else if (cmethod->klass == mono_defaults.array_class) {
5773 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5774 return emit_array_generic_access (cfg, fsig, args, FALSE);
5775 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5776 return emit_array_generic_access (cfg, fsig, args, TRUE);
5778 #ifndef MONO_BIG_ARRAYS
5780 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5783 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5784 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5785 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5786 int dreg = alloc_ireg (cfg);
5787 int bounds_reg = alloc_ireg_mp (cfg);
5788 MonoBasicBlock *end_bb, *szarray_bb;
5789 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5791 NEW_BBLOCK (cfg, end_bb);
5792 NEW_BBLOCK (cfg, szarray_bb);
5794 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5795 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5798 /* Non-szarray case */
5800 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5801 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5803 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5804 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5805 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5806 MONO_START_BB (cfg, szarray_bb);
5809 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5810 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5812 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5813 MONO_START_BB (cfg, end_bb);
5815 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5816 ins->type = STACK_I4;
5822 if (cmethod->name [0] != 'g')
5825 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5826 int dreg = alloc_ireg (cfg);
5827 int vtable_reg = alloc_preg (cfg);
5828 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5829 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5830 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5831 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5832 type_from_op (cfg, ins, NULL, NULL);
5835 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5836 int dreg = alloc_ireg (cfg);
5838 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5839 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5840 type_from_op (cfg, ins, NULL, NULL);
5845 } else if (cmethod->klass == runtime_helpers_class) {
5847 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5848 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5852 } else if (cmethod->klass == mono_defaults.thread_class) {
5853 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5854 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5855 MONO_ADD_INS (cfg->cbb, ins);
5857 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5858 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5859 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5861 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5863 if (fsig->params [0]->type == MONO_TYPE_I1)
5864 opcode = OP_LOADI1_MEMBASE;
5865 else if (fsig->params [0]->type == MONO_TYPE_U1)
5866 opcode = OP_LOADU1_MEMBASE;
5867 else if (fsig->params [0]->type == MONO_TYPE_I2)
5868 opcode = OP_LOADI2_MEMBASE;
5869 else if (fsig->params [0]->type == MONO_TYPE_U2)
5870 opcode = OP_LOADU2_MEMBASE;
5871 else if (fsig->params [0]->type == MONO_TYPE_I4)
5872 opcode = OP_LOADI4_MEMBASE;
5873 else if (fsig->params [0]->type == MONO_TYPE_U4)
5874 opcode = OP_LOADU4_MEMBASE;
5875 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5876 opcode = OP_LOADI8_MEMBASE;
5877 else if (fsig->params [0]->type == MONO_TYPE_R4)
5878 opcode = OP_LOADR4_MEMBASE;
5879 else if (fsig->params [0]->type == MONO_TYPE_R8)
5880 opcode = OP_LOADR8_MEMBASE;
5881 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5882 opcode = OP_LOAD_MEMBASE;
5885 MONO_INST_NEW (cfg, ins, opcode);
5886 ins->inst_basereg = args [0]->dreg;
5887 ins->inst_offset = 0;
5888 MONO_ADD_INS (cfg->cbb, ins);
5890 switch (fsig->params [0]->type) {
5897 ins->dreg = mono_alloc_ireg (cfg);
5898 ins->type = STACK_I4;
5902 ins->dreg = mono_alloc_lreg (cfg);
5903 ins->type = STACK_I8;
5907 ins->dreg = mono_alloc_ireg (cfg);
5908 #if SIZEOF_REGISTER == 8
5909 ins->type = STACK_I8;
5911 ins->type = STACK_I4;
5916 ins->dreg = mono_alloc_freg (cfg);
5917 ins->type = STACK_R8;
5920 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5921 ins->dreg = mono_alloc_ireg_ref (cfg);
5922 ins->type = STACK_OBJ;
5926 if (opcode == OP_LOADI8_MEMBASE)
5927 ins = mono_decompose_opcode (cfg, ins, NULL);
5929 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5933 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5935 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5937 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5938 opcode = OP_STOREI1_MEMBASE_REG;
5939 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5940 opcode = OP_STOREI2_MEMBASE_REG;
5941 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5942 opcode = OP_STOREI4_MEMBASE_REG;
5943 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5944 opcode = OP_STOREI8_MEMBASE_REG;
5945 else if (fsig->params [0]->type == MONO_TYPE_R4)
5946 opcode = OP_STORER4_MEMBASE_REG;
5947 else if (fsig->params [0]->type == MONO_TYPE_R8)
5948 opcode = OP_STORER8_MEMBASE_REG;
5949 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5950 opcode = OP_STORE_MEMBASE_REG;
5953 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5955 MONO_INST_NEW (cfg, ins, opcode);
5956 ins->sreg1 = args [1]->dreg;
5957 ins->inst_destbasereg = args [0]->dreg;
5958 ins->inst_offset = 0;
5959 MONO_ADD_INS (cfg->cbb, ins);
5961 if (opcode == OP_STOREI8_MEMBASE_REG)
5962 ins = mono_decompose_opcode (cfg, ins, NULL);
5967 } else if (cmethod->klass == mono_defaults.monitor_class) {
5968 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5969 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5972 if (COMPILE_LLVM (cfg)) {
5974 * Pass the argument normally, the LLVM backend will handle the
5975 * calling convention problems.
5977 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5979 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5980 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5981 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5982 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5985 return (MonoInst*)call;
5986 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5987 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5990 if (COMPILE_LLVM (cfg)) {
5992 * Pass the argument normally, the LLVM backend will handle the
5993 * calling convention problems.
5995 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5997 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5998 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5999 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6000 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6003 return (MonoInst*)call;
6005 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6008 if (COMPILE_LLVM (cfg)) {
6009 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6011 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6012 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6013 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6014 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6017 return (MonoInst*)call;
6020 } else if (cmethod->klass->image == mono_defaults.corlib &&
6021 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6022 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6025 #if SIZEOF_REGISTER == 8
6026 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6027 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6028 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6029 ins->dreg = mono_alloc_preg (cfg);
6030 ins->sreg1 = args [0]->dreg;
6031 ins->type = STACK_I8;
6032 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6033 MONO_ADD_INS (cfg->cbb, ins);
6037 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6039 /* 64 bit reads are already atomic */
6040 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6041 load_ins->dreg = mono_alloc_preg (cfg);
6042 load_ins->inst_basereg = args [0]->dreg;
6043 load_ins->inst_offset = 0;
6044 load_ins->type = STACK_I8;
6045 MONO_ADD_INS (cfg->cbb, load_ins);
6047 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6054 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6055 MonoInst *ins_iconst;
6058 if (fsig->params [0]->type == MONO_TYPE_I4) {
6059 opcode = OP_ATOMIC_ADD_I4;
6060 cfg->has_atomic_add_i4 = TRUE;
6062 #if SIZEOF_REGISTER == 8
6063 else if (fsig->params [0]->type == MONO_TYPE_I8)
6064 opcode = OP_ATOMIC_ADD_I8;
6067 if (!mono_arch_opcode_supported (opcode))
6069 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6070 ins_iconst->inst_c0 = 1;
6071 ins_iconst->dreg = mono_alloc_ireg (cfg);
6072 MONO_ADD_INS (cfg->cbb, ins_iconst);
6074 MONO_INST_NEW (cfg, ins, opcode);
6075 ins->dreg = mono_alloc_ireg (cfg);
6076 ins->inst_basereg = args [0]->dreg;
6077 ins->inst_offset = 0;
6078 ins->sreg2 = ins_iconst->dreg;
6079 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6080 MONO_ADD_INS (cfg->cbb, ins);
6082 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6083 MonoInst *ins_iconst;
6086 if (fsig->params [0]->type == MONO_TYPE_I4) {
6087 opcode = OP_ATOMIC_ADD_I4;
6088 cfg->has_atomic_add_i4 = TRUE;
6090 #if SIZEOF_REGISTER == 8
6091 else if (fsig->params [0]->type == MONO_TYPE_I8)
6092 opcode = OP_ATOMIC_ADD_I8;
6095 if (!mono_arch_opcode_supported (opcode))
6097 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6098 ins_iconst->inst_c0 = -1;
6099 ins_iconst->dreg = mono_alloc_ireg (cfg);
6100 MONO_ADD_INS (cfg->cbb, ins_iconst);
6102 MONO_INST_NEW (cfg, ins, opcode);
6103 ins->dreg = mono_alloc_ireg (cfg);
6104 ins->inst_basereg = args [0]->dreg;
6105 ins->inst_offset = 0;
6106 ins->sreg2 = ins_iconst->dreg;
6107 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6108 MONO_ADD_INS (cfg->cbb, ins);
6110 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6113 if (fsig->params [0]->type == MONO_TYPE_I4) {
6114 opcode = OP_ATOMIC_ADD_I4;
6115 cfg->has_atomic_add_i4 = TRUE;
6117 #if SIZEOF_REGISTER == 8
6118 else if (fsig->params [0]->type == MONO_TYPE_I8)
6119 opcode = OP_ATOMIC_ADD_I8;
6122 if (!mono_arch_opcode_supported (opcode))
6124 MONO_INST_NEW (cfg, ins, opcode);
6125 ins->dreg = mono_alloc_ireg (cfg);
6126 ins->inst_basereg = args [0]->dreg;
6127 ins->inst_offset = 0;
6128 ins->sreg2 = args [1]->dreg;
6129 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6130 MONO_ADD_INS (cfg->cbb, ins);
6133 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6134 MonoInst *f2i = NULL, *i2f;
6135 guint32 opcode, f2i_opcode, i2f_opcode;
6136 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6137 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6139 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6140 fsig->params [0]->type == MONO_TYPE_R4) {
6141 opcode = OP_ATOMIC_EXCHANGE_I4;
6142 f2i_opcode = OP_MOVE_F_TO_I4;
6143 i2f_opcode = OP_MOVE_I4_TO_F;
6144 cfg->has_atomic_exchange_i4 = TRUE;
6146 #if SIZEOF_REGISTER == 8
6148 fsig->params [0]->type == MONO_TYPE_I8 ||
6149 fsig->params [0]->type == MONO_TYPE_R8 ||
6150 fsig->params [0]->type == MONO_TYPE_I) {
6151 opcode = OP_ATOMIC_EXCHANGE_I8;
6152 f2i_opcode = OP_MOVE_F_TO_I8;
6153 i2f_opcode = OP_MOVE_I8_TO_F;
6156 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6157 opcode = OP_ATOMIC_EXCHANGE_I4;
6158 cfg->has_atomic_exchange_i4 = TRUE;
6164 if (!mono_arch_opcode_supported (opcode))
6168 /* TODO: Decompose these opcodes instead of bailing here. */
6169 if (COMPILE_SOFT_FLOAT (cfg))
6172 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6173 f2i->dreg = mono_alloc_ireg (cfg);
6174 f2i->sreg1 = args [1]->dreg;
6175 if (f2i_opcode == OP_MOVE_F_TO_I4)
6176 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6177 MONO_ADD_INS (cfg->cbb, f2i);
6180 MONO_INST_NEW (cfg, ins, opcode);
6181 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6182 ins->inst_basereg = args [0]->dreg;
6183 ins->inst_offset = 0;
6184 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6185 MONO_ADD_INS (cfg->cbb, ins);
6187 switch (fsig->params [0]->type) {
6189 ins->type = STACK_I4;
6192 ins->type = STACK_I8;
6195 #if SIZEOF_REGISTER == 8
6196 ins->type = STACK_I8;
6198 ins->type = STACK_I4;
6203 ins->type = STACK_R8;
6206 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6207 ins->type = STACK_OBJ;
6212 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6213 i2f->dreg = mono_alloc_freg (cfg);
6214 i2f->sreg1 = ins->dreg;
6215 i2f->type = STACK_R8;
6216 if (i2f_opcode == OP_MOVE_I4_TO_F)
6217 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6218 MONO_ADD_INS (cfg->cbb, i2f);
6223 if (cfg->gen_write_barriers && is_ref)
6224 emit_write_barrier (cfg, args [0], args [1]);
6226 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6227 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6228 guint32 opcode, f2i_opcode, i2f_opcode;
6229 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6230 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6232 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6233 fsig->params [1]->type == MONO_TYPE_R4) {
6234 opcode = OP_ATOMIC_CAS_I4;
6235 f2i_opcode = OP_MOVE_F_TO_I4;
6236 i2f_opcode = OP_MOVE_I4_TO_F;
6237 cfg->has_atomic_cas_i4 = TRUE;
6239 #if SIZEOF_REGISTER == 8
6241 fsig->params [1]->type == MONO_TYPE_I8 ||
6242 fsig->params [1]->type == MONO_TYPE_R8 ||
6243 fsig->params [1]->type == MONO_TYPE_I) {
6244 opcode = OP_ATOMIC_CAS_I8;
6245 f2i_opcode = OP_MOVE_F_TO_I8;
6246 i2f_opcode = OP_MOVE_I8_TO_F;
6249 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6250 opcode = OP_ATOMIC_CAS_I4;
6251 cfg->has_atomic_cas_i4 = TRUE;
6257 if (!mono_arch_opcode_supported (opcode))
6261 /* TODO: Decompose these opcodes instead of bailing here. */
6262 if (COMPILE_SOFT_FLOAT (cfg))
6265 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6266 f2i_new->dreg = mono_alloc_ireg (cfg);
6267 f2i_new->sreg1 = args [1]->dreg;
6268 if (f2i_opcode == OP_MOVE_F_TO_I4)
6269 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6270 MONO_ADD_INS (cfg->cbb, f2i_new);
6272 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6273 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6274 f2i_cmp->sreg1 = args [2]->dreg;
6275 if (f2i_opcode == OP_MOVE_F_TO_I4)
6276 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6277 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6280 MONO_INST_NEW (cfg, ins, opcode);
6281 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6282 ins->sreg1 = args [0]->dreg;
6283 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6284 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6285 MONO_ADD_INS (cfg->cbb, ins);
6287 switch (fsig->params [1]->type) {
6289 ins->type = STACK_I4;
6292 ins->type = STACK_I8;
6295 #if SIZEOF_REGISTER == 8
6296 ins->type = STACK_I8;
6298 ins->type = STACK_I4;
6303 ins->type = STACK_R8;
6306 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6307 ins->type = STACK_OBJ;
6312 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6313 i2f->dreg = mono_alloc_freg (cfg);
6314 i2f->sreg1 = ins->dreg;
6315 i2f->type = STACK_R8;
6316 if (i2f_opcode == OP_MOVE_I4_TO_F)
6317 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6318 MONO_ADD_INS (cfg->cbb, i2f);
6323 if (cfg->gen_write_barriers && is_ref)
6324 emit_write_barrier (cfg, args [0], args [1]);
6326 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6327 fsig->params [1]->type == MONO_TYPE_I4) {
6328 MonoInst *cmp, *ceq;
6330 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6333 /* int32 r = CAS (location, value, comparand); */
6334 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6335 ins->dreg = alloc_ireg (cfg);
6336 ins->sreg1 = args [0]->dreg;
6337 ins->sreg2 = args [1]->dreg;
6338 ins->sreg3 = args [2]->dreg;
6339 ins->type = STACK_I4;
6340 MONO_ADD_INS (cfg->cbb, ins);
6342 /* bool result = r == comparand; */
6343 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6344 cmp->sreg1 = ins->dreg;
6345 cmp->sreg2 = args [2]->dreg;
6346 cmp->type = STACK_I4;
6347 MONO_ADD_INS (cfg->cbb, cmp);
6349 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6350 ceq->dreg = alloc_ireg (cfg);
6351 ceq->type = STACK_I4;
6352 MONO_ADD_INS (cfg->cbb, ceq);
6354 /* *success = result; */
6355 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6357 cfg->has_atomic_cas_i4 = TRUE;
6359 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6360 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6364 } else if (cmethod->klass->image == mono_defaults.corlib &&
6365 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6366 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6369 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6371 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6372 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6374 if (fsig->params [0]->type == MONO_TYPE_I1)
6375 opcode = OP_ATOMIC_LOAD_I1;
6376 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6377 opcode = OP_ATOMIC_LOAD_U1;
6378 else if (fsig->params [0]->type == MONO_TYPE_I2)
6379 opcode = OP_ATOMIC_LOAD_I2;
6380 else if (fsig->params [0]->type == MONO_TYPE_U2)
6381 opcode = OP_ATOMIC_LOAD_U2;
6382 else if (fsig->params [0]->type == MONO_TYPE_I4)
6383 opcode = OP_ATOMIC_LOAD_I4;
6384 else if (fsig->params [0]->type == MONO_TYPE_U4)
6385 opcode = OP_ATOMIC_LOAD_U4;
6386 else if (fsig->params [0]->type == MONO_TYPE_R4)
6387 opcode = OP_ATOMIC_LOAD_R4;
6388 else if (fsig->params [0]->type == MONO_TYPE_R8)
6389 opcode = OP_ATOMIC_LOAD_R8;
6390 #if SIZEOF_REGISTER == 8
6391 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6392 opcode = OP_ATOMIC_LOAD_I8;
6393 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6394 opcode = OP_ATOMIC_LOAD_U8;
6396 else if (fsig->params [0]->type == MONO_TYPE_I)
6397 opcode = OP_ATOMIC_LOAD_I4;
6398 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6399 opcode = OP_ATOMIC_LOAD_U4;
6403 if (!mono_arch_opcode_supported (opcode))
6406 MONO_INST_NEW (cfg, ins, opcode);
6407 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6408 ins->sreg1 = args [0]->dreg;
6409 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6410 MONO_ADD_INS (cfg->cbb, ins);
6412 switch (fsig->params [0]->type) {
6413 case MONO_TYPE_BOOLEAN:
6420 ins->type = STACK_I4;
6424 ins->type = STACK_I8;
6428 #if SIZEOF_REGISTER == 8
6429 ins->type = STACK_I8;
6431 ins->type = STACK_I4;
6436 ins->type = STACK_R8;
6439 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6440 ins->type = STACK_OBJ;
6446 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6448 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6450 if (fsig->params [0]->type == MONO_TYPE_I1)
6451 opcode = OP_ATOMIC_STORE_I1;
6452 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6453 opcode = OP_ATOMIC_STORE_U1;
6454 else if (fsig->params [0]->type == MONO_TYPE_I2)
6455 opcode = OP_ATOMIC_STORE_I2;
6456 else if (fsig->params [0]->type == MONO_TYPE_U2)
6457 opcode = OP_ATOMIC_STORE_U2;
6458 else if (fsig->params [0]->type == MONO_TYPE_I4)
6459 opcode = OP_ATOMIC_STORE_I4;
6460 else if (fsig->params [0]->type == MONO_TYPE_U4)
6461 opcode = OP_ATOMIC_STORE_U4;
6462 else if (fsig->params [0]->type == MONO_TYPE_R4)
6463 opcode = OP_ATOMIC_STORE_R4;
6464 else if (fsig->params [0]->type == MONO_TYPE_R8)
6465 opcode = OP_ATOMIC_STORE_R8;
6466 #if SIZEOF_REGISTER == 8
6467 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6468 opcode = OP_ATOMIC_STORE_I8;
6469 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6470 opcode = OP_ATOMIC_STORE_U8;
6472 else if (fsig->params [0]->type == MONO_TYPE_I)
6473 opcode = OP_ATOMIC_STORE_I4;
6474 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6475 opcode = OP_ATOMIC_STORE_U4;
6479 if (!mono_arch_opcode_supported (opcode))
6482 MONO_INST_NEW (cfg, ins, opcode);
6483 ins->dreg = args [0]->dreg;
6484 ins->sreg1 = args [1]->dreg;
6485 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6486 MONO_ADD_INS (cfg->cbb, ins);
6488 if (cfg->gen_write_barriers && is_ref)
6489 emit_write_barrier (cfg, args [0], args [1]);
6495 } else if (cmethod->klass->image == mono_defaults.corlib &&
6496 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6497 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6498 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6499 if (should_insert_brekpoint (cfg->method)) {
6500 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6502 MONO_INST_NEW (cfg, ins, OP_NOP);
6503 MONO_ADD_INS (cfg->cbb, ins);
6507 } else if (cmethod->klass->image == mono_defaults.corlib &&
6508 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6509 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6510 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6512 EMIT_NEW_ICONST (cfg, ins, 1);
6514 EMIT_NEW_ICONST (cfg, ins, 0);
6517 } else if (cmethod->klass == mono_defaults.math_class) {
6519 * There is general branchless code for Min/Max, but it does not work for
6521 * http://everything2.com/?node_id=1051618
6523 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6524 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6525 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6526 !strcmp (cmethod->klass->name, "Selector")) ||
6527 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6528 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6529 !strcmp (cmethod->klass->name, "Selector"))
6531 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6532 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6533 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6536 MonoJumpInfoToken *ji;
6539 cfg->disable_llvm = TRUE;
6541 if (args [0]->opcode == OP_GOT_ENTRY) {
6542 pi = args [0]->inst_p1;
6543 g_assert (pi->opcode == OP_PATCH_INFO);
6544 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6547 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6548 ji = args [0]->inst_p0;
6551 NULLIFY_INS (args [0]);
6554 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6555 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6556 ins->dreg = mono_alloc_ireg (cfg);
6558 ins->inst_p0 = mono_string_to_utf8 (s);
6559 MONO_ADD_INS (cfg->cbb, ins);
6565 #ifdef MONO_ARCH_SIMD_INTRINSICS
6566 if (cfg->opt & MONO_OPT_SIMD) {
6567 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6573 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6577 if (COMPILE_LLVM (cfg)) {
6578 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6583 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6587 * This entry point could be used later for arbitrary method
6590 inline static MonoInst*
6591 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6592 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6594 if (method->klass == mono_defaults.string_class) {
6595 /* managed string allocation support */
6596 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6597 MonoInst *iargs [2];
6598 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6599 MonoMethod *managed_alloc = NULL;
6601 g_assert (vtable); /*Should not fail since it System.String*/
6602 #ifndef MONO_CROSS_COMPILE
6603 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6607 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6608 iargs [1] = args [0];
6609 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6616 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6618 MonoInst *store, *temp;
6621 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6622 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6625 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6626 * would be different than the MonoInst's used to represent arguments, and
6627 * the ldelema implementation can't deal with that.
6628 * Solution: When ldelema is used on an inline argument, create a var for
6629 * it, emit ldelema on that var, and emit the saving code below in
6630 * inline_method () if needed.
6632 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6633 cfg->args [i] = temp;
6634 /* This uses cfg->args [i] which is set by the preceeding line */
6635 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6636 store->cil_code = sp [0]->cil_code;
6641 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6642 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6644 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6646 check_inline_called_method_name_limit (MonoMethod *called_method)
6649 static const char *limit = NULL;
6651 if (limit == NULL) {
6652 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6654 if (limit_string != NULL)
6655 limit = limit_string;
6660 if (limit [0] != '\0') {
6661 char *called_method_name = mono_method_full_name (called_method, TRUE);
6663 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6664 g_free (called_method_name);
6666 //return (strncmp_result <= 0);
6667 return (strncmp_result == 0);
6674 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6676 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6679 static const char *limit = NULL;
6681 if (limit == NULL) {
6682 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6683 if (limit_string != NULL) {
6684 limit = limit_string;
6690 if (limit [0] != '\0') {
6691 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6693 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6694 g_free (caller_method_name);
6696 //return (strncmp_result <= 0);
6697 return (strncmp_result == 0);
6705 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6707 static double r8_0 = 0.0;
6708 static float r4_0 = 0.0;
6712 rtype = mini_get_underlying_type (cfg, rtype);
6716 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6717 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6718 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6719 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6720 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6721 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6722 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6723 ins->type = STACK_R4;
6724 ins->inst_p0 = (void*)&r4_0;
6726 MONO_ADD_INS (cfg->cbb, ins);
6727 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6728 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6729 ins->type = STACK_R8;
6730 ins->inst_p0 = (void*)&r8_0;
6732 MONO_ADD_INS (cfg->cbb, ins);
6733 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6734 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6735 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6736 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6737 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6739 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6744 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6748 rtype = mini_get_underlying_type (cfg, rtype);
6752 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6753 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6754 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6755 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6756 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6757 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6758 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6759 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6760 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6761 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6762 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6763 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6764 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6765 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6767 emit_init_rvar (cfg, dreg, rtype);
6771 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6773 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6775 MonoInst *var = cfg->locals [local];
6776 if (COMPILE_SOFT_FLOAT (cfg)) {
6778 int reg = alloc_dreg (cfg, var->type);
6779 emit_init_rvar (cfg, reg, type);
6780 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6783 emit_init_rvar (cfg, var->dreg, type);
6785 emit_dummy_init_rvar (cfg, var->dreg, type);
6792 * Return the cost of inlining CMETHOD.
6795 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6796 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6798 MonoInst *ins, *rvar = NULL;
6799 MonoMethodHeader *cheader;
6800 MonoBasicBlock *ebblock, *sbblock;
6802 MonoMethod *prev_inlined_method;
6803 MonoInst **prev_locals, **prev_args;
6804 MonoType **prev_arg_types;
6805 guint prev_real_offset;
6806 GHashTable *prev_cbb_hash;
6807 MonoBasicBlock **prev_cil_offset_to_bb;
6808 MonoBasicBlock *prev_cbb;
6809 unsigned char* prev_cil_start;
6810 guint32 prev_cil_offset_to_bb_len;
6811 MonoMethod *prev_current_method;
6812 MonoGenericContext *prev_generic_context;
6813 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6815 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6817 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6818 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6821 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6822 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6827 fsig = mono_method_signature (cmethod);
6829 if (cfg->verbose_level > 2)
6830 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6832 if (!cmethod->inline_info) {
6833 cfg->stat_inlineable_methods++;
6834 cmethod->inline_info = 1;
6837 /* allocate local variables */
6838 cheader = mono_method_get_header (cmethod);
6840 if (cheader == NULL || mono_loader_get_last_error ()) {
6841 MonoLoaderError *error = mono_loader_get_last_error ();
6844 mono_metadata_free_mh (cheader);
6845 if (inline_always && error)
6846 mono_cfg_set_exception (cfg, error->exception_type);
6848 mono_loader_clear_error ();
6852 /*Must verify before creating locals as it can cause the JIT to assert.*/
6853 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6854 mono_metadata_free_mh (cheader);
6858 /* allocate space to store the return value */
6859 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6860 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6863 prev_locals = cfg->locals;
6864 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6865 for (i = 0; i < cheader->num_locals; ++i)
6866 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6868 /* allocate start and end blocks */
6869 /* This is needed so if the inline is aborted, we can clean up */
6870 NEW_BBLOCK (cfg, sbblock);
6871 sbblock->real_offset = real_offset;
6873 NEW_BBLOCK (cfg, ebblock);
6874 ebblock->block_num = cfg->num_bblocks++;
6875 ebblock->real_offset = real_offset;
6877 prev_args = cfg->args;
6878 prev_arg_types = cfg->arg_types;
6879 prev_inlined_method = cfg->inlined_method;
6880 cfg->inlined_method = cmethod;
6881 cfg->ret_var_set = FALSE;
6882 cfg->inline_depth ++;
6883 prev_real_offset = cfg->real_offset;
6884 prev_cbb_hash = cfg->cbb_hash;
6885 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6886 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6887 prev_cil_start = cfg->cil_start;
6888 prev_cbb = cfg->cbb;
6889 prev_current_method = cfg->current_method;
6890 prev_generic_context = cfg->generic_context;
6891 prev_ret_var_set = cfg->ret_var_set;
6892 prev_disable_inline = cfg->disable_inline;
6894 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6897 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6899 ret_var_set = cfg->ret_var_set;
6901 cfg->inlined_method = prev_inlined_method;
6902 cfg->real_offset = prev_real_offset;
6903 cfg->cbb_hash = prev_cbb_hash;
6904 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6905 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6906 cfg->cil_start = prev_cil_start;
6907 cfg->locals = prev_locals;
6908 cfg->args = prev_args;
6909 cfg->arg_types = prev_arg_types;
6910 cfg->current_method = prev_current_method;
6911 cfg->generic_context = prev_generic_context;
6912 cfg->ret_var_set = prev_ret_var_set;
6913 cfg->disable_inline = prev_disable_inline;
6914 cfg->inline_depth --;
6916 if ((costs >= 0 && costs < 60) || inline_always) {
6917 if (cfg->verbose_level > 2)
6918 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6920 cfg->stat_inlined_methods++;
6922 /* always add some code to avoid block split failures */
6923 MONO_INST_NEW (cfg, ins, OP_NOP);
6924 MONO_ADD_INS (prev_cbb, ins);
6926 prev_cbb->next_bb = sbblock;
6927 link_bblock (cfg, prev_cbb, sbblock);
6930 * Get rid of the begin and end bblocks if possible to aid local
6933 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6935 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6936 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6938 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6939 MonoBasicBlock *prev = ebblock->in_bb [0];
6940 mono_merge_basic_blocks (cfg, prev, ebblock);
6942 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6943 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6944 cfg->cbb = prev_cbb;
6948 * Its possible that the rvar is set in some prev bblock, but not in others.
6954 for (i = 0; i < ebblock->in_count; ++i) {
6955 bb = ebblock->in_bb [i];
6957 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6960 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6969 *out_cbb = cfg->cbb;
6973 * If the inlined method contains only a throw, then the ret var is not
6974 * set, so set it to a dummy value.
6977 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6979 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6982 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6985 if (cfg->verbose_level > 2)
6986 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6987 cfg->exception_type = MONO_EXCEPTION_NONE;
6988 mono_loader_clear_error ();
6990 /* This gets rid of the newly added bblocks */
6991 cfg->cbb = prev_cbb;
6993 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6998 * Some of these comments may well be out-of-date.
6999 * Design decisions: we do a single pass over the IL code (and we do bblock
7000 * splitting/merging in the few cases when it's required: a back jump to an IL
7001 * address that was not already seen as bblock starting point).
7002 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7003 * Complex operations are decomposed in simpler ones right away. We need to let the
7004 * arch-specific code peek and poke inside this process somehow (except when the
7005 * optimizations can take advantage of the full semantic info of coarse opcodes).
7006 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7007 * MonoInst->opcode initially is the IL opcode or some simplification of that
7008 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7009 * opcode with value bigger than OP_LAST.
7010 * At this point the IR can be handed over to an interpreter, a dumb code generator
7011 * or to the optimizing code generator that will translate it to SSA form.
7013 * Profiling directed optimizations.
7014 * We may compile by default with few or no optimizations and instrument the code
7015 * or the user may indicate what methods to optimize the most either in a config file
7016 * or through repeated runs where the compiler applies offline the optimizations to
7017 * each method and then decides if it was worth it.
7020 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7021 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7022 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7023 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7024 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7025 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7026 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7027 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7029 /* offset from br.s -> br like opcodes */
7030 #define BIG_BRANCH_OFFSET 13
7033 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7035 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7037 return b == NULL || b == bb;
7041 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7043 unsigned char *ip = start;
7044 unsigned char *target;
7047 MonoBasicBlock *bblock;
7048 const MonoOpcode *opcode;
7051 cli_addr = ip - start;
7052 i = mono_opcode_value ((const guint8 **)&ip, end);
7055 opcode = &mono_opcodes [i];
7056 switch (opcode->argument) {
7057 case MonoInlineNone:
7060 case MonoInlineString:
7061 case MonoInlineType:
7062 case MonoInlineField:
7063 case MonoInlineMethod:
7066 case MonoShortInlineR:
7073 case MonoShortInlineVar:
7074 case MonoShortInlineI:
7077 case MonoShortInlineBrTarget:
7078 target = start + cli_addr + 2 + (signed char)ip [1];
7079 GET_BBLOCK (cfg, bblock, target);
7082 GET_BBLOCK (cfg, bblock, ip);
7084 case MonoInlineBrTarget:
7085 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7086 GET_BBLOCK (cfg, bblock, target);
7089 GET_BBLOCK (cfg, bblock, ip);
7091 case MonoInlineSwitch: {
7092 guint32 n = read32 (ip + 1);
7095 cli_addr += 5 + 4 * n;
7096 target = start + cli_addr;
7097 GET_BBLOCK (cfg, bblock, target);
7099 for (j = 0; j < n; ++j) {
7100 target = start + cli_addr + (gint32)read32 (ip);
7101 GET_BBLOCK (cfg, bblock, target);
7111 g_assert_not_reached ();
7114 if (i == CEE_THROW) {
7115 unsigned char *bb_start = ip - 1;
7117 /* Find the start of the bblock containing the throw */
7119 while ((bb_start >= start) && !bblock) {
7120 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7124 bblock->out_of_line = 1;
7134 static inline MonoMethod *
7135 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7139 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7140 method = mono_method_get_wrapper_data (m, token);
7143 method = mono_class_inflate_generic_method_checked (method, context, &error);
7144 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7147 method = mono_get_method_full (m->klass->image, token, klass, context);
7153 static inline MonoMethod *
7154 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7156 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7158 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7164 static inline MonoClass*
7165 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7170 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7171 klass = mono_method_get_wrapper_data (method, token);
7173 klass = mono_class_inflate_generic_class (klass, context);
7175 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7176 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7179 mono_class_init (klass);
7183 static inline MonoMethodSignature*
7184 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7186 MonoMethodSignature *fsig;
7188 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7191 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7193 fsig = mono_inflate_generic_signature (fsig, context, &error);
7195 g_assert (mono_error_ok (&error));
7198 fsig = mono_metadata_parse_signature (method->klass->image, token);
7204 throw_exception (void)
7206 static MonoMethod *method = NULL;
7209 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7210 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7217 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7219 MonoMethod *thrower = throw_exception ();
7222 EMIT_NEW_PCONST (cfg, args [0], ex);
7223 mono_emit_method_call (cfg, thrower, args, NULL);
7227 * Return the original method is a wrapper is specified. We can only access
7228 * the custom attributes from the original method.
7231 get_original_method (MonoMethod *method)
7233 if (method->wrapper_type == MONO_WRAPPER_NONE)
7236 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7237 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7240 /* in other cases we need to find the original method */
7241 return mono_marshal_method_from_wrapper (method);
7245 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7246 MonoBasicBlock *bblock, unsigned char *ip)
7248 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7249 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7251 emit_throw_exception (cfg, ex);
7255 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7256 MonoBasicBlock *bblock, unsigned char *ip)
7258 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7259 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7261 emit_throw_exception (cfg, ex);
7265 * Check that the IL instructions at ip are the array initialization
7266 * sequence and return the pointer to the data and the size.
7269 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7272 * newarr[System.Int32]
7274 * ldtoken field valuetype ...
7275 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7277 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7279 guint32 token = read32 (ip + 7);
7280 guint32 field_token = read32 (ip + 2);
7281 guint32 field_index = field_token & 0xffffff;
7283 const char *data_ptr;
7285 MonoMethod *cmethod;
7286 MonoClass *dummy_class;
7287 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7291 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7295 *out_field_token = field_token;
7297 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7300 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7302 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7303 case MONO_TYPE_BOOLEAN:
7307 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7308 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7309 case MONO_TYPE_CHAR:
7326 if (size > mono_type_size (field->type, &dummy_align))
7329 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7330 if (!image_is_dynamic (method->klass->image)) {
7331 field_index = read32 (ip + 2) & 0xffffff;
7332 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7333 data_ptr = mono_image_rva_map (method->klass->image, rva);
7334 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7335 /* for aot code we do the lookup on load */
7336 if (aot && data_ptr)
7337 return GUINT_TO_POINTER (rva);
7339 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7341 data_ptr = mono_field_get_data (field);
7349 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7351 char *method_fname = mono_method_full_name (method, TRUE);
7353 MonoMethodHeader *header = mono_method_get_header (method);
7355 if (header->code_size == 0)
7356 method_code = g_strdup ("method body is empty.");
7358 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7359 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7360 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7361 g_free (method_fname);
7362 g_free (method_code);
7363 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7367 set_exception_object (MonoCompile *cfg, MonoException *exception)
7369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7370 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7371 cfg->exception_ptr = exception;
7375 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7378 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7379 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7380 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7381 /* Optimize reg-reg moves away */
7383 * Can't optimize other opcodes, since sp[0] might point to
7384 * the last ins of a decomposed opcode.
7386 sp [0]->dreg = (cfg)->locals [n]->dreg;
7388 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7393 * ldloca inhibits many optimizations so try to get rid of it in common
7396 static inline unsigned char *
7397 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7407 local = read16 (ip + 2);
7411 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7412 /* From the INITOBJ case */
7413 token = read32 (ip + 2);
7414 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7415 CHECK_TYPELOAD (klass);
7416 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7417 emit_init_local (cfg, local, type, TRUE);
7425 is_exception_class (MonoClass *class)
7428 if (class == mono_defaults.exception_class)
7430 class = class->parent;
7436 * is_jit_optimizer_disabled:
7438 * Determine whenever M's assembly has a DebuggableAttribute with the
7439 * IsJITOptimizerDisabled flag set.
7442 is_jit_optimizer_disabled (MonoMethod *m)
7444 MonoAssembly *ass = m->klass->image->assembly;
7445 MonoCustomAttrInfo* attrs;
7446 static MonoClass *klass;
7448 gboolean val = FALSE;
7451 if (ass->jit_optimizer_disabled_inited)
7452 return ass->jit_optimizer_disabled;
7455 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7458 ass->jit_optimizer_disabled = FALSE;
7459 mono_memory_barrier ();
7460 ass->jit_optimizer_disabled_inited = TRUE;
7464 attrs = mono_custom_attrs_from_assembly (ass);
7466 for (i = 0; i < attrs->num_attrs; ++i) {
7467 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7469 MonoMethodSignature *sig;
7471 if (!attr->ctor || attr->ctor->klass != klass)
7473 /* Decode the attribute. See reflection.c */
7474 p = (const char*)attr->data;
7475 g_assert (read16 (p) == 0x0001);
7478 // FIXME: Support named parameters
7479 sig = mono_method_signature (attr->ctor);
7480 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7482 /* Two boolean arguments */
7486 mono_custom_attrs_free (attrs);
7489 ass->jit_optimizer_disabled = val;
7490 mono_memory_barrier ();
7491 ass->jit_optimizer_disabled_inited = TRUE;
7497 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7499 gboolean supported_tail_call;
7502 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7503 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7505 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7508 for (i = 0; i < fsig->param_count; ++i) {
7509 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7510 /* These can point to the current method's stack */
7511 supported_tail_call = FALSE;
7513 if (fsig->hasthis && cmethod->klass->valuetype)
7514 /* this might point to the current method's stack */
7515 supported_tail_call = FALSE;
7516 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7517 supported_tail_call = FALSE;
7518 if (cfg->method->save_lmf)
7519 supported_tail_call = FALSE;
7520 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7521 supported_tail_call = FALSE;
7522 if (call_opcode != CEE_CALL)
7523 supported_tail_call = FALSE;
7525 /* Debugging support */
7527 if (supported_tail_call) {
7528 if (!mono_debug_count ())
7529 supported_tail_call = FALSE;
7533 return supported_tail_call;
7536 /* emits the code needed to access a managed tls var (like ThreadStatic)
7537 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7538 * pointer for the current thread.
7539 * Returns the MonoInst* representing the address of the tls var.
7542 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7545 int static_data_reg, array_reg, dreg;
7546 int offset2_reg, idx_reg;
7547 // inlined access to the tls data (see threads.c)
7548 static_data_reg = alloc_ireg (cfg);
7549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7550 idx_reg = alloc_ireg (cfg);
7551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7553 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7554 array_reg = alloc_ireg (cfg);
7555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7556 offset2_reg = alloc_ireg (cfg);
7557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7559 dreg = alloc_ireg (cfg);
7560 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7567 * Handle calls made to ctors from NEWOBJ opcodes.
7569 * REF_BBLOCK will point to the current bblock after the call.
7572 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7573 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7575 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7576 MonoBasicBlock *bblock = *ref_bblock;
7578 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7579 mono_method_is_generic_sharable (cmethod, TRUE)) {
7580 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7581 mono_class_vtable (cfg->domain, cmethod->klass);
7582 CHECK_TYPELOAD (cmethod->klass);
7584 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7585 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7588 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7589 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7591 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7593 CHECK_TYPELOAD (cmethod->klass);
7594 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7599 /* Avoid virtual calls to ctors if possible */
7600 if (mono_class_is_marshalbyref (cmethod->klass))
7601 callvirt_this_arg = sp [0];
7603 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7604 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7605 CHECK_CFG_EXCEPTION;
7606 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7607 mono_method_check_inlining (cfg, cmethod) &&
7608 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7611 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7612 cfg->real_offset += 5;
7614 *inline_costs += costs - 5;
7615 *ref_bblock = bblock;
7617 INLINE_FAILURE ("inline failure");
7618 // FIXME-VT: Clean this up
7619 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7620 GSHAREDVT_FAILURE(*ip);
7621 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7623 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7626 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7627 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7628 } else if (context_used &&
7629 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7630 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7631 MonoInst *cmethod_addr;
7633 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7635 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7636 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7638 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7640 INLINE_FAILURE ("ctor call");
7641 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7642 callvirt_this_arg, NULL, vtable_arg);
7649 * mono_method_to_ir:
7651 * Translate the .net IL into linear IR.
7654 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7655 MonoInst *return_var, MonoInst **inline_args,
7656 guint inline_offset, gboolean is_virtual_call)
7659 MonoInst *ins, **sp, **stack_start;
7660 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7661 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7662 MonoMethod *cmethod, *method_definition;
7663 MonoInst **arg_array;
7664 MonoMethodHeader *header;
7666 guint32 token, ins_flag;
7668 MonoClass *constrained_class = NULL;
7669 unsigned char *ip, *end, *target, *err_pos;
7670 MonoMethodSignature *sig;
7671 MonoGenericContext *generic_context = NULL;
7672 MonoGenericContainer *generic_container = NULL;
7673 MonoType **param_types;
7674 int i, n, start_new_bblock, dreg;
7675 int num_calls = 0, inline_costs = 0;
7676 int breakpoint_id = 0;
7678 GSList *class_inits = NULL;
7679 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7681 gboolean init_locals, seq_points, skip_dead_blocks;
7682 gboolean sym_seq_points = FALSE;
7683 MonoDebugMethodInfo *minfo;
7684 MonoBitSet *seq_point_locs = NULL;
7685 MonoBitSet *seq_point_set_locs = NULL;
7687 cfg->disable_inline = is_jit_optimizer_disabled (method);
7689 /* serialization and xdomain stuff may need access to private fields and methods */
7690 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7691 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7692 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7693 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7694 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7695 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7697 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7698 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7699 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7700 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7701 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7703 image = method->klass->image;
7704 header = mono_method_get_header (method);
7706 MonoLoaderError *error;
7708 if ((error = mono_loader_get_last_error ())) {
7709 mono_cfg_set_exception (cfg, error->exception_type);
7711 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7712 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7714 goto exception_exit;
7716 generic_container = mono_method_get_generic_container (method);
7717 sig = mono_method_signature (method);
7718 num_args = sig->hasthis + sig->param_count;
7719 ip = (unsigned char*)header->code;
7720 cfg->cil_start = ip;
7721 end = ip + header->code_size;
7722 cfg->stat_cil_code_size += header->code_size;
7724 seq_points = cfg->gen_seq_points && cfg->method == method;
7726 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7727 /* We could hit a seq point before attaching to the JIT (#8338) */
7731 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7732 minfo = mono_debug_lookup_method (method);
7734 MonoSymSeqPoint *sps;
7735 int i, n_il_offsets;
7737 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7738 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7739 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7740 sym_seq_points = TRUE;
7741 for (i = 0; i < n_il_offsets; ++i) {
7742 if (sps [i].il_offset < header->code_size)
7743 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7746 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7747 /* Methods without line number info like auto-generated property accessors */
7748 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7749 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7750 sym_seq_points = TRUE;
7755 * Methods without init_locals set could cause asserts in various passes
7756 * (#497220). To work around this, we emit dummy initialization opcodes
7757 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7758 * on some platforms.
7760 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7761 init_locals = header->init_locals;
7765 method_definition = method;
7766 while (method_definition->is_inflated) {
7767 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7768 method_definition = imethod->declaring;
7771 /* SkipVerification is not allowed if core-clr is enabled */
7772 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7774 dont_verify_stloc = TRUE;
7777 if (sig->is_inflated)
7778 generic_context = mono_method_get_context (method);
7779 else if (generic_container)
7780 generic_context = &generic_container->context;
7781 cfg->generic_context = generic_context;
7783 if (!cfg->generic_sharing_context)
7784 g_assert (!sig->has_type_parameters);
7786 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7787 g_assert (method->is_inflated);
7788 g_assert (mono_method_get_context (method)->method_inst);
7790 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7791 g_assert (sig->generic_param_count);
7793 if (cfg->method == method) {
7794 cfg->real_offset = 0;
7796 cfg->real_offset = inline_offset;
7799 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7800 cfg->cil_offset_to_bb_len = header->code_size;
7802 cfg->current_method = method;
7804 if (cfg->verbose_level > 2)
7805 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7807 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7809 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7810 for (n = 0; n < sig->param_count; ++n)
7811 param_types [n + sig->hasthis] = sig->params [n];
7812 cfg->arg_types = param_types;
7814 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7815 if (cfg->method == method) {
7817 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7818 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7821 NEW_BBLOCK (cfg, start_bblock);
7822 cfg->bb_entry = start_bblock;
7823 start_bblock->cil_code = NULL;
7824 start_bblock->cil_length = 0;
7827 NEW_BBLOCK (cfg, end_bblock);
7828 cfg->bb_exit = end_bblock;
7829 end_bblock->cil_code = NULL;
7830 end_bblock->cil_length = 0;
7831 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7832 g_assert (cfg->num_bblocks == 2);
7834 arg_array = cfg->args;
7836 if (header->num_clauses) {
7837 cfg->spvars = g_hash_table_new (NULL, NULL);
7838 cfg->exvars = g_hash_table_new (NULL, NULL);
7840 /* handle exception clauses */
7841 for (i = 0; i < header->num_clauses; ++i) {
7842 MonoBasicBlock *try_bb;
7843 MonoExceptionClause *clause = &header->clauses [i];
7844 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7845 try_bb->real_offset = clause->try_offset;
7846 try_bb->try_start = TRUE;
7847 try_bb->region = ((i + 1) << 8) | clause->flags;
7848 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7849 tblock->real_offset = clause->handler_offset;
7850 tblock->flags |= BB_EXCEPTION_HANDLER;
7853 * Linking the try block with the EH block hinders inlining as we won't be able to
7854 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7856 if (COMPILE_LLVM (cfg))
7857 link_bblock (cfg, try_bb, tblock);
7859 if (*(ip + clause->handler_offset) == CEE_POP)
7860 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7862 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7863 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7864 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7865 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7866 MONO_ADD_INS (tblock, ins);
7868 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7869 /* finally clauses already have a seq point */
7870 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7871 MONO_ADD_INS (tblock, ins);
7874 /* todo: is a fault block unsafe to optimize? */
7875 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7876 tblock->flags |= BB_EXCEPTION_UNSAFE;
7879 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7881 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7883 /* catch and filter blocks get the exception object on the stack */
7884 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7885 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7887 /* mostly like handle_stack_args (), but just sets the input args */
7888 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7889 tblock->in_scount = 1;
7890 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7891 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7895 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7896 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7897 if (!cfg->compile_llvm) {
7898 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7899 ins->dreg = tblock->in_stack [0]->dreg;
7900 MONO_ADD_INS (tblock, ins);
7903 MonoInst *dummy_use;
7906 * Add a dummy use for the exvar so its liveness info will be
7909 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7912 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7913 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7914 tblock->flags |= BB_EXCEPTION_HANDLER;
7915 tblock->real_offset = clause->data.filter_offset;
7916 tblock->in_scount = 1;
7917 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7918 /* The filter block shares the exvar with the handler block */
7919 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7920 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7921 MONO_ADD_INS (tblock, ins);
7925 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7926 clause->data.catch_class &&
7927 cfg->generic_sharing_context &&
7928 mono_class_check_context_used (clause->data.catch_class)) {
7930 * In shared generic code with catch
7931 * clauses containing type variables
7932 * the exception handling code has to
7933 * be able to get to the rgctx.
7934 * Therefore we have to make sure that
7935 * the vtable/mrgctx argument (for
7936 * static or generic methods) or the
7937 * "this" argument (for non-static
7938 * methods) are live.
7940 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7941 mini_method_get_context (method)->method_inst ||
7942 method->klass->valuetype) {
7943 mono_get_vtable_var (cfg);
7945 MonoInst *dummy_use;
7947 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7952 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7953 cfg->cbb = start_bblock;
7954 cfg->args = arg_array;
7955 mono_save_args (cfg, sig, inline_args);
7958 /* FIRST CODE BLOCK */
7959 NEW_BBLOCK (cfg, bblock);
7960 bblock->cil_code = ip;
7964 ADD_BBLOCK (cfg, bblock);
7966 if (cfg->method == method) {
7967 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7968 if (breakpoint_id) {
7969 MONO_INST_NEW (cfg, ins, OP_BREAK);
7970 MONO_ADD_INS (bblock, ins);
7974 /* we use a separate basic block for the initialization code */
7975 NEW_BBLOCK (cfg, init_localsbb);
7976 cfg->bb_init = init_localsbb;
7977 init_localsbb->real_offset = cfg->real_offset;
7978 start_bblock->next_bb = init_localsbb;
7979 init_localsbb->next_bb = bblock;
7980 link_bblock (cfg, start_bblock, init_localsbb);
7981 link_bblock (cfg, init_localsbb, bblock);
7983 cfg->cbb = init_localsbb;
7985 if (cfg->gsharedvt && cfg->method == method) {
7986 MonoGSharedVtMethodInfo *info;
7987 MonoInst *var, *locals_var;
7990 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7991 info->method = cfg->method;
7992 info->count_entries = 16;
7993 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7994 cfg->gsharedvt_info = info;
7996 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7997 /* prevent it from being register allocated */
7998 //var->flags |= MONO_INST_VOLATILE;
7999 cfg->gsharedvt_info_var = var;
8001 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8002 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8004 /* Allocate locals */
8005 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8006 /* prevent it from being register allocated */
8007 //locals_var->flags |= MONO_INST_VOLATILE;
8008 cfg->gsharedvt_locals_var = locals_var;
8010 dreg = alloc_ireg (cfg);
8011 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8013 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8014 ins->dreg = locals_var->dreg;
8016 MONO_ADD_INS (cfg->cbb, ins);
8017 cfg->gsharedvt_locals_var_ins = ins;
8019 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8022 ins->flags |= MONO_INST_INIT;
8026 if (mono_security_core_clr_enabled ()) {
8027 /* check if this is native code, e.g. an icall or a p/invoke */
8028 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8029 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8031 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8032 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8034 /* if this ia a native call then it can only be JITted from platform code */
8035 if ((icall || pinvk) && method->klass && method->klass->image) {
8036 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8037 MonoException *ex = icall ? mono_get_exception_security () :
8038 mono_get_exception_method_access ();
8039 emit_throw_exception (cfg, ex);
8046 CHECK_CFG_EXCEPTION;
8048 if (header->code_size == 0)
8051 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8056 if (cfg->method == method)
8057 mono_debug_init_method (cfg, bblock, breakpoint_id);
8059 for (n = 0; n < header->num_locals; ++n) {
8060 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8065 /* We force the vtable variable here for all shared methods
8066 for the possibility that they might show up in a stack
8067 trace where their exact instantiation is needed. */
8068 if (cfg->generic_sharing_context && method == cfg->method) {
8069 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8070 mini_method_get_context (method)->method_inst ||
8071 method->klass->valuetype) {
8072 mono_get_vtable_var (cfg);
8074 /* FIXME: Is there a better way to do this?
8075 We need the variable live for the duration
8076 of the whole method. */
8077 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8081 /* add a check for this != NULL to inlined methods */
8082 if (is_virtual_call) {
8085 NEW_ARGLOAD (cfg, arg_ins, 0);
8086 MONO_ADD_INS (cfg->cbb, arg_ins);
8087 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8090 skip_dead_blocks = !dont_verify;
8091 if (skip_dead_blocks) {
8092 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8097 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8098 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8101 start_new_bblock = 0;
8104 if (cfg->method == method)
8105 cfg->real_offset = ip - header->code;
8107 cfg->real_offset = inline_offset;
8112 if (start_new_bblock) {
8113 bblock->cil_length = ip - bblock->cil_code;
8114 if (start_new_bblock == 2) {
8115 g_assert (ip == tblock->cil_code);
8117 GET_BBLOCK (cfg, tblock, ip);
8119 bblock->next_bb = tblock;
8122 start_new_bblock = 0;
8123 for (i = 0; i < bblock->in_scount; ++i) {
8124 if (cfg->verbose_level > 3)
8125 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8126 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8130 g_slist_free (class_inits);
8133 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8134 link_bblock (cfg, bblock, tblock);
8135 if (sp != stack_start) {
8136 handle_stack_args (cfg, stack_start, sp - stack_start);
8138 CHECK_UNVERIFIABLE (cfg);
8140 bblock->next_bb = tblock;
8143 for (i = 0; i < bblock->in_scount; ++i) {
8144 if (cfg->verbose_level > 3)
8145 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8146 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8149 g_slist_free (class_inits);
8154 if (skip_dead_blocks) {
8155 int ip_offset = ip - header->code;
8157 if (ip_offset == bb->end)
8161 int op_size = mono_opcode_size (ip, end);
8162 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8164 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8166 if (ip_offset + op_size == bb->end) {
8167 MONO_INST_NEW (cfg, ins, OP_NOP);
8168 MONO_ADD_INS (bblock, ins);
8169 start_new_bblock = 1;
8177 * Sequence points are points where the debugger can place a breakpoint.
8178 * Currently, we generate these automatically at points where the IL
8181 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8183 * Make methods interruptable at the beginning, and at the targets of
8184 * backward branches.
8185 * Also, do this at the start of every bblock in methods with clauses too,
8186 * to be able to handle instructions with inprecise control flow like
8188 * Backward branches are handled at the end of method-to-ir ().
8190 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8192 /* Avoid sequence points on empty IL like .volatile */
8193 // FIXME: Enable this
8194 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8195 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8196 if (sp != stack_start)
8197 ins->flags |= MONO_INST_NONEMPTY_STACK;
8198 MONO_ADD_INS (cfg->cbb, ins);
8201 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8204 bblock->real_offset = cfg->real_offset;
8206 if ((cfg->method == method) && cfg->coverage_info) {
8207 guint32 cil_offset = ip - header->code;
8208 cfg->coverage_info->data [cil_offset].cil_code = ip;
8210 /* TODO: Use an increment here */
8211 #if defined(TARGET_X86)
8212 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8213 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8215 MONO_ADD_INS (cfg->cbb, ins);
8217 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8218 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8222 if (cfg->verbose_level > 3)
8223 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8227 if (seq_points && !sym_seq_points && sp != stack_start) {
8229 * The C# compiler uses these nops to notify the JIT that it should
8230 * insert seq points.
8232 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8233 MONO_ADD_INS (cfg->cbb, ins);
8235 if (cfg->keep_cil_nops)
8236 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8238 MONO_INST_NEW (cfg, ins, OP_NOP);
8240 MONO_ADD_INS (bblock, ins);
8243 if (should_insert_brekpoint (cfg->method)) {
8244 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8246 MONO_INST_NEW (cfg, ins, OP_NOP);
8249 MONO_ADD_INS (bblock, ins);
8255 CHECK_STACK_OVF (1);
8256 n = (*ip)-CEE_LDARG_0;
8258 EMIT_NEW_ARGLOAD (cfg, ins, n);
8266 CHECK_STACK_OVF (1);
8267 n = (*ip)-CEE_LDLOC_0;
8269 EMIT_NEW_LOCLOAD (cfg, ins, n);
8278 n = (*ip)-CEE_STLOC_0;
8281 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8283 emit_stloc_ir (cfg, sp, header, n);
8290 CHECK_STACK_OVF (1);
8293 EMIT_NEW_ARGLOAD (cfg, ins, n);
8299 CHECK_STACK_OVF (1);
8302 NEW_ARGLOADA (cfg, ins, n);
8303 MONO_ADD_INS (cfg->cbb, ins);
8313 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8315 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8320 CHECK_STACK_OVF (1);
8323 EMIT_NEW_LOCLOAD (cfg, ins, n);
8327 case CEE_LDLOCA_S: {
8328 unsigned char *tmp_ip;
8330 CHECK_STACK_OVF (1);
8331 CHECK_LOCAL (ip [1]);
8333 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8339 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8348 CHECK_LOCAL (ip [1]);
8349 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8351 emit_stloc_ir (cfg, sp, header, ip [1]);
8356 CHECK_STACK_OVF (1);
8357 EMIT_NEW_PCONST (cfg, ins, NULL);
8358 ins->type = STACK_OBJ;
8363 CHECK_STACK_OVF (1);
8364 EMIT_NEW_ICONST (cfg, ins, -1);
8377 CHECK_STACK_OVF (1);
8378 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8384 CHECK_STACK_OVF (1);
8386 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8392 CHECK_STACK_OVF (1);
8393 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8399 CHECK_STACK_OVF (1);
8400 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8401 ins->type = STACK_I8;
8402 ins->dreg = alloc_dreg (cfg, STACK_I8);
8404 ins->inst_l = (gint64)read64 (ip);
8405 MONO_ADD_INS (bblock, ins);
8411 gboolean use_aotconst = FALSE;
8413 #ifdef TARGET_POWERPC
8414 /* FIXME: Clean this up */
8415 if (cfg->compile_aot)
8416 use_aotconst = TRUE;
8419 /* FIXME: we should really allocate this only late in the compilation process */
8420 f = mono_domain_alloc (cfg->domain, sizeof (float));
8422 CHECK_STACK_OVF (1);
8428 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8430 dreg = alloc_freg (cfg);
8431 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8432 ins->type = cfg->r4_stack_type;
8434 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8435 ins->type = cfg->r4_stack_type;
8436 ins->dreg = alloc_dreg (cfg, STACK_R8);
8438 MONO_ADD_INS (bblock, ins);
8448 gboolean use_aotconst = FALSE;
8450 #ifdef TARGET_POWERPC
8451 /* FIXME: Clean this up */
8452 if (cfg->compile_aot)
8453 use_aotconst = TRUE;
8456 /* FIXME: we should really allocate this only late in the compilation process */
8457 d = mono_domain_alloc (cfg->domain, sizeof (double));
8459 CHECK_STACK_OVF (1);
8465 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8467 dreg = alloc_freg (cfg);
8468 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8469 ins->type = STACK_R8;
8471 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8472 ins->type = STACK_R8;
8473 ins->dreg = alloc_dreg (cfg, STACK_R8);
8475 MONO_ADD_INS (bblock, ins);
8484 MonoInst *temp, *store;
8486 CHECK_STACK_OVF (1);
8490 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8491 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8493 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8496 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8509 if (sp [0]->type == STACK_R8)
8510 /* we need to pop the value from the x86 FP stack */
8511 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8517 INLINE_FAILURE ("jmp");
8518 GSHAREDVT_FAILURE (*ip);
8521 if (stack_start != sp)
8523 token = read32 (ip + 1);
8524 /* FIXME: check the signature matches */
8525 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8527 if (!cmethod || mono_loader_get_last_error ())
8530 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8531 GENERIC_SHARING_FAILURE (CEE_JMP);
8533 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8535 if (ARCH_HAVE_OP_TAIL_CALL) {
8536 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8539 /* Handle tail calls similarly to calls */
8540 n = fsig->param_count + fsig->hasthis;
8544 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8545 call->method = cmethod;
8546 call->tail_call = TRUE;
8547 call->signature = mono_method_signature (cmethod);
8548 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8549 call->inst.inst_p0 = cmethod;
8550 for (i = 0; i < n; ++i)
8551 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8553 mono_arch_emit_call (cfg, call);
8554 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8555 MONO_ADD_INS (bblock, (MonoInst*)call);
8557 for (i = 0; i < num_args; ++i)
8558 /* Prevent arguments from being optimized away */
8559 arg_array [i]->flags |= MONO_INST_VOLATILE;
8561 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8562 ins = (MonoInst*)call;
8563 ins->inst_p0 = cmethod;
8564 MONO_ADD_INS (bblock, ins);
8568 start_new_bblock = 1;
8573 MonoMethodSignature *fsig;
8576 token = read32 (ip + 1);
8580 //GSHAREDVT_FAILURE (*ip);
8585 fsig = mini_get_signature (method, token, generic_context);
8587 if (method->dynamic && fsig->pinvoke) {
8591 * This is a call through a function pointer using a pinvoke
8592 * signature. Have to create a wrapper and call that instead.
8593 * FIXME: This is very slow, need to create a wrapper at JIT time
8594 * instead based on the signature.
8596 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8597 EMIT_NEW_PCONST (cfg, args [1], fsig);
8599 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8602 n = fsig->param_count + fsig->hasthis;
8606 //g_assert (!virtual || fsig->hasthis);
8610 inline_costs += 10 * num_calls++;
8613 * Making generic calls out of gsharedvt methods.
8614 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8615 * patching gshared method addresses into a gsharedvt method.
8617 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8619 * We pass the address to the gsharedvt trampoline in the rgctx reg
8621 MonoInst *callee = addr;
8623 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8625 GSHAREDVT_FAILURE (*ip);
8627 addr = emit_get_rgctx_sig (cfg, context_used,
8628 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8629 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8633 /* Prevent inlining of methods with indirect calls */
8634 INLINE_FAILURE ("indirect call");
8636 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8641 * Instead of emitting an indirect call, emit a direct call
8642 * with the contents of the aotconst as the patch info.
8644 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8645 info_type = addr->inst_c1;
8646 info_data = addr->inst_p0;
8648 info_type = addr->inst_right->inst_c1;
8649 info_data = addr->inst_right->inst_left;
8652 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8653 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8658 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8662 /* End of call, INS should contain the result of the call, if any */
8664 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8666 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8669 CHECK_CFG_EXCEPTION;
8673 constrained_class = NULL;
8677 case CEE_CALLVIRT: {
8678 MonoInst *addr = NULL;
8679 MonoMethodSignature *fsig = NULL;
8681 int virtual = *ip == CEE_CALLVIRT;
8682 gboolean pass_imt_from_rgctx = FALSE;
8683 MonoInst *imt_arg = NULL;
8684 MonoInst *keep_this_alive = NULL;
8685 gboolean pass_vtable = FALSE;
8686 gboolean pass_mrgctx = FALSE;
8687 MonoInst *vtable_arg = NULL;
8688 gboolean check_this = FALSE;
8689 gboolean supported_tail_call = FALSE;
8690 gboolean tail_call = FALSE;
8691 gboolean need_seq_point = FALSE;
8692 guint32 call_opcode = *ip;
8693 gboolean emit_widen = TRUE;
8694 gboolean push_res = TRUE;
8695 gboolean skip_ret = FALSE;
8696 gboolean delegate_invoke = FALSE;
8697 gboolean direct_icall = FALSE;
8698 gboolean constrained_partial_call = FALSE;
8699 MonoMethod *cil_method;
8702 token = read32 (ip + 1);
8706 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8707 cil_method = cmethod;
8709 if (constrained_class) {
8710 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8711 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8712 g_assert (!cmethod->klass->valuetype);
8713 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8714 constrained_partial_call = TRUE;
8718 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8719 if (cfg->verbose_level > 2)
8720 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8721 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8722 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8723 cfg->generic_sharing_context)) {
8724 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8728 if (cfg->verbose_level > 2)
8729 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8731 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8733 * This is needed since get_method_constrained can't find
8734 * the method in klass representing a type var.
8735 * The type var is guaranteed to be a reference type in this
8738 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8739 g_assert (!cmethod->klass->valuetype);
8741 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8747 if (!cmethod || mono_loader_get_last_error ())
8749 if (!dont_verify && !cfg->skip_visibility) {
8750 MonoMethod *target_method = cil_method;
8751 if (method->is_inflated) {
8752 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8754 if (!mono_method_can_access_method (method_definition, target_method) &&
8755 !mono_method_can_access_method (method, cil_method))
8756 METHOD_ACCESS_FAILURE (method, cil_method);
8759 if (mono_security_core_clr_enabled ())
8760 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8762 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8763 /* MS.NET seems to silently convert this to a callvirt */
8768 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8769 * converts to a callvirt.
8771 * tests/bug-515884.il is an example of this behavior
8773 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8774 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8775 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8779 if (!cmethod->klass->inited)
8780 if (!mono_class_init (cmethod->klass))
8781 TYPE_LOAD_ERROR (cmethod->klass);
8783 fsig = mono_method_signature (cmethod);
8786 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8787 mini_class_is_system_array (cmethod->klass)) {
8788 array_rank = cmethod->klass->rank;
8789 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8790 direct_icall = TRUE;
8791 } else if (fsig->pinvoke) {
8792 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8793 check_for_pending_exc, cfg->compile_aot);
8794 fsig = mono_method_signature (wrapper);
8795 } else if (constrained_class) {
8797 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8801 mono_save_token_info (cfg, image, token, cil_method);
8803 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8804 need_seq_point = TRUE;
8806 /* Don't support calls made using type arguments for now */
8808 if (cfg->gsharedvt) {
8809 if (mini_is_gsharedvt_signature (cfg, fsig))
8810 GSHAREDVT_FAILURE (*ip);
8814 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8815 g_assert_not_reached ();
8817 n = fsig->param_count + fsig->hasthis;
8819 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8822 if (!cfg->generic_sharing_context)
8823 g_assert (!mono_method_check_context_used (cmethod));
8827 //g_assert (!virtual || fsig->hasthis);
8831 if (constrained_class) {
8832 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8833 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8834 /* The 'Own method' case below */
8835 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8836 /* 'The type parameter is instantiated as a reference type' case below. */
8838 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8839 CHECK_CFG_EXCEPTION;
8846 * We have the `constrained.' prefix opcode.
8848 if (constrained_partial_call) {
8849 gboolean need_box = TRUE;
8852 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8853 * called method is not known at compile time either. The called method could end up being
8854 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8855 * to box the receiver.
8856 * A simple solution would be to box always and make a normal virtual call, but that would
8857 * be bad performance wise.
8859 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8861 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8868 MonoBasicBlock *is_ref_bb, *end_bb;
8869 MonoInst *nonbox_call;
8872 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8874 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8875 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8877 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8879 NEW_BBLOCK (cfg, is_ref_bb);
8880 NEW_BBLOCK (cfg, end_bb);
8882 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8884 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8887 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8889 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8892 MONO_START_BB (cfg, is_ref_bb);
8893 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8894 ins->klass = constrained_class;
8895 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8896 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8900 MONO_START_BB (cfg, end_bb);
8903 nonbox_call->dreg = ins->dreg;
8905 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8906 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8907 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8910 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8912 * The type parameter is instantiated as a valuetype,
8913 * but that type doesn't override the method we're
8914 * calling, so we need to box `this'.
8916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8917 ins->klass = constrained_class;
8918 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8919 CHECK_CFG_EXCEPTION;
8920 } else if (!constrained_class->valuetype) {
8921 int dreg = alloc_ireg_ref (cfg);
8924 * The type parameter is instantiated as a reference
8925 * type. We have a managed pointer on the stack, so
8926 * we need to dereference it here.
8928 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8929 ins->type = STACK_OBJ;
8932 if (cmethod->klass->valuetype) {
8935 /* Interface method */
8938 mono_class_setup_vtable (constrained_class);
8939 CHECK_TYPELOAD (constrained_class);
8940 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8942 TYPE_LOAD_ERROR (constrained_class);
8943 slot = mono_method_get_vtable_slot (cmethod);
8945 TYPE_LOAD_ERROR (cmethod->klass);
8946 cmethod = constrained_class->vtable [ioffset + slot];
8948 if (cmethod->klass == mono_defaults.enum_class) {
8949 /* Enum implements some interfaces, so treat this as the first case */
8950 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8951 ins->klass = constrained_class;
8952 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8953 CHECK_CFG_EXCEPTION;
8958 constrained_class = NULL;
8961 if (check_call_signature (cfg, fsig, sp))
8964 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8965 delegate_invoke = TRUE;
8967 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8969 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8970 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8978 * If the callee is a shared method, then its static cctor
8979 * might not get called after the call was patched.
8981 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8982 emit_generic_class_init (cfg, cmethod->klass, &bblock);
8983 CHECK_TYPELOAD (cmethod->klass);
8986 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8988 if (cfg->generic_sharing_context) {
8989 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8991 context_used = mini_method_check_context_used (cfg, cmethod);
8993 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8994 /* Generic method interface
8995 calls are resolved via a
8996 helper function and don't
8998 if (!cmethod_context || !cmethod_context->method_inst)
8999 pass_imt_from_rgctx = TRUE;
9003 * If a shared method calls another
9004 * shared method then the caller must
9005 * have a generic sharing context
9006 * because the magic trampoline
9007 * requires it. FIXME: We shouldn't
9008 * have to force the vtable/mrgctx
9009 * variable here. Instead there
9010 * should be a flag in the cfg to
9011 * request a generic sharing context.
9014 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9015 mono_get_vtable_var (cfg);
9020 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9022 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9024 CHECK_TYPELOAD (cmethod->klass);
9025 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9030 g_assert (!vtable_arg);
9032 if (!cfg->compile_aot) {
9034 * emit_get_rgctx_method () calls mono_class_vtable () so check
9035 * for type load errors before.
9037 mono_class_setup_vtable (cmethod->klass);
9038 CHECK_TYPELOAD (cmethod->klass);
9041 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9043 /* !marshalbyref is needed to properly handle generic methods + remoting */
9044 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9045 MONO_METHOD_IS_FINAL (cmethod)) &&
9046 !mono_class_is_marshalbyref (cmethod->klass)) {
9053 if (pass_imt_from_rgctx) {
9054 g_assert (!pass_vtable);
9056 imt_arg = emit_get_rgctx_method (cfg, context_used,
9057 cmethod, MONO_RGCTX_INFO_METHOD);
9061 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9063 /* Calling virtual generic methods */
9064 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9065 !(MONO_METHOD_IS_FINAL (cmethod) &&
9066 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9067 fsig->generic_param_count &&
9068 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9069 MonoInst *this_temp, *this_arg_temp, *store;
9070 MonoInst *iargs [4];
9071 gboolean use_imt = FALSE;
9073 g_assert (fsig->is_inflated);
9075 /* Prevent inlining of methods that contain indirect calls */
9076 INLINE_FAILURE ("virtual generic call");
9078 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9079 GSHAREDVT_FAILURE (*ip);
9081 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9082 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9087 g_assert (!imt_arg);
9089 g_assert (cmethod->is_inflated);
9090 imt_arg = emit_get_rgctx_method (cfg, context_used,
9091 cmethod, MONO_RGCTX_INFO_METHOD);
9092 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9094 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9095 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9096 MONO_ADD_INS (bblock, store);
9098 /* FIXME: This should be a managed pointer */
9099 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9101 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9102 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9103 cmethod, MONO_RGCTX_INFO_METHOD);
9104 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9105 addr = mono_emit_jit_icall (cfg,
9106 mono_helper_compile_generic_method, iargs);
9108 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9110 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9117 * Implement a workaround for the inherent races involved in locking:
9123 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9124 * try block, the Exit () won't be executed, see:
9125 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9126 * To work around this, we extend such try blocks to include the last x bytes
9127 * of the Monitor.Enter () call.
9129 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9130 MonoBasicBlock *tbb;
9132 GET_BBLOCK (cfg, tbb, ip + 5);
9134 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9135 * from Monitor.Enter like ArgumentNullException.
9137 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9138 /* Mark this bblock as needing to be extended */
9139 tbb->extend_try_block = TRUE;
9143 /* Conversion to a JIT intrinsic */
9144 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9146 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9147 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9154 if ((cfg->opt & MONO_OPT_INLINE) &&
9155 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9156 mono_method_check_inlining (cfg, cmethod)) {
9158 gboolean always = FALSE;
9160 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9161 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9162 /* Prevent inlining of methods that call wrappers */
9163 INLINE_FAILURE ("wrapper call");
9164 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9168 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9170 cfg->real_offset += 5;
9172 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9173 /* *sp is already set by inline_method */
9178 inline_costs += costs;
9184 /* Tail recursion elimination */
9185 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9186 gboolean has_vtargs = FALSE;
9189 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9190 INLINE_FAILURE ("tail call");
9192 /* keep it simple */
9193 for (i = fsig->param_count - 1; i >= 0; i--) {
9194 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9199 for (i = 0; i < n; ++i)
9200 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9201 MONO_INST_NEW (cfg, ins, OP_BR);
9202 MONO_ADD_INS (bblock, ins);
9203 tblock = start_bblock->out_bb [0];
9204 link_bblock (cfg, bblock, tblock);
9205 ins->inst_target_bb = tblock;
9206 start_new_bblock = 1;
9208 /* skip the CEE_RET, too */
9209 if (ip_in_bb (cfg, bblock, ip + 5))
9216 inline_costs += 10 * num_calls++;
9219 * Making generic calls out of gsharedvt methods.
9220 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9221 * patching gshared method addresses into a gsharedvt method.
9223 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9224 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9225 MonoRgctxInfoType info_type;
9228 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9229 //GSHAREDVT_FAILURE (*ip);
9230 // disable for possible remoting calls
9231 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9232 GSHAREDVT_FAILURE (*ip);
9233 if (fsig->generic_param_count) {
9234 /* virtual generic call */
9235 g_assert (!imt_arg);
9236 /* Same as the virtual generic case above */
9237 imt_arg = emit_get_rgctx_method (cfg, context_used,
9238 cmethod, MONO_RGCTX_INFO_METHOD);
9239 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9241 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9242 /* This can happen when we call a fully instantiated iface method */
9243 imt_arg = emit_get_rgctx_method (cfg, context_used,
9244 cmethod, MONO_RGCTX_INFO_METHOD);
9249 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9250 keep_this_alive = sp [0];
9252 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9253 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9255 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9256 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9258 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9262 /* Generic sharing */
9265 * Use this if the callee is gsharedvt sharable too, since
9266 * at runtime we might find an instantiation so the call cannot
9267 * be patched (the 'no_patch' code path in mini-trampolines.c).
9269 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9270 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9271 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9272 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9273 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9274 INLINE_FAILURE ("gshared");
9276 g_assert (cfg->generic_sharing_context && cmethod);
9280 * We are compiling a call to a
9281 * generic method from shared code,
9282 * which means that we have to look up
9283 * the method in the rgctx and do an
9287 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9289 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9290 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9294 /* Direct calls to icalls */
9296 MonoMethod *wrapper;
9299 /* Inline the wrapper */
9300 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9302 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9303 g_assert (costs > 0);
9304 cfg->real_offset += 5;
9306 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9307 /* *sp is already set by inline_method */
9312 inline_costs += costs;
9321 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9322 MonoInst *val = sp [fsig->param_count];
9324 if (val->type == STACK_OBJ) {
9325 MonoInst *iargs [2];
9330 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9333 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9335 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9336 emit_write_barrier (cfg, addr, val);
9337 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9338 GSHAREDVT_FAILURE (*ip);
9339 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9340 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9342 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9343 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9344 if (!cmethod->klass->element_class->valuetype && !readonly)
9345 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9346 CHECK_TYPELOAD (cmethod->klass);
9349 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9352 g_assert_not_reached ();
9359 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9363 /* Tail prefix / tail call optimization */
9365 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9366 /* FIXME: runtime generic context pointer for jumps? */
9367 /* FIXME: handle this for generic sharing eventually */
9368 if ((ins_flag & MONO_INST_TAILCALL) &&
9369 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9370 supported_tail_call = TRUE;
9372 if (supported_tail_call) {
9375 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9376 INLINE_FAILURE ("tail call");
9378 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9380 if (ARCH_HAVE_OP_TAIL_CALL) {
9381 /* Handle tail calls similarly to normal calls */
9384 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9386 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9387 call->tail_call = TRUE;
9388 call->method = cmethod;
9389 call->signature = mono_method_signature (cmethod);
9392 * We implement tail calls by storing the actual arguments into the
9393 * argument variables, then emitting a CEE_JMP.
9395 for (i = 0; i < n; ++i) {
9396 /* Prevent argument from being register allocated */
9397 arg_array [i]->flags |= MONO_INST_VOLATILE;
9398 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9400 ins = (MonoInst*)call;
9401 ins->inst_p0 = cmethod;
9402 ins->inst_p1 = arg_array [0];
9403 MONO_ADD_INS (bblock, ins);
9404 link_bblock (cfg, bblock, end_bblock);
9405 start_new_bblock = 1;
9407 // FIXME: Eliminate unreachable epilogs
9410 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9411 * only reachable from this call.
9413 GET_BBLOCK (cfg, tblock, ip + 5);
9414 if (tblock == bblock || tblock->in_count == 0)
9423 * Synchronized wrappers.
9424 * Its hard to determine where to replace a method with its synchronized
9425 * wrapper without causing an infinite recursion. The current solution is
9426 * to add the synchronized wrapper in the trampolines, and to
9427 * change the called method to a dummy wrapper, and resolve that wrapper
9428 * to the real method in mono_jit_compile_method ().
9430 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9431 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9432 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9433 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9437 INLINE_FAILURE ("call");
9438 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9439 imt_arg, vtable_arg);
9442 link_bblock (cfg, bblock, end_bblock);
9443 start_new_bblock = 1;
9445 // FIXME: Eliminate unreachable epilogs
9448 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9449 * only reachable from this call.
9451 GET_BBLOCK (cfg, tblock, ip + 5);
9452 if (tblock == bblock || tblock->in_count == 0)
9459 /* End of call, INS should contain the result of the call, if any */
9461 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9464 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9469 if (keep_this_alive) {
9470 MonoInst *dummy_use;
9472 /* See mono_emit_method_call_full () */
9473 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9476 CHECK_CFG_EXCEPTION;
9480 g_assert (*ip == CEE_RET);
9484 constrained_class = NULL;
9486 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9490 if (cfg->method != method) {
9491 /* return from inlined method */
9493 * If in_count == 0, that means the ret is unreachable due to
9494 * being preceeded by a throw. In that case, inline_method () will
9495 * handle setting the return value
9496 * (test case: test_0_inline_throw ()).
9498 if (return_var && cfg->cbb->in_count) {
9499 MonoType *ret_type = mono_method_signature (method)->ret;
9505 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9508 //g_assert (returnvar != -1);
9509 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9510 cfg->ret_var_set = TRUE;
9513 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9515 if (cfg->lmf_var && cfg->cbb->in_count)
9519 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9521 if (seq_points && !sym_seq_points) {
9523 * Place a seq point here too even through the IL stack is not
9524 * empty, so a step over on
9527 * will work correctly.
9529 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9530 MONO_ADD_INS (cfg->cbb, ins);
9533 g_assert (!return_var);
9537 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9540 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9543 if (!cfg->vret_addr) {
9546 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9548 EMIT_NEW_RETLOADA (cfg, ret_addr);
9550 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9551 ins->klass = mono_class_from_mono_type (ret_type);
9554 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9555 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9556 MonoInst *iargs [1];
9560 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9561 mono_arch_emit_setret (cfg, method, conv);
9563 mono_arch_emit_setret (cfg, method, *sp);
9566 mono_arch_emit_setret (cfg, method, *sp);
9571 if (sp != stack_start)
9573 MONO_INST_NEW (cfg, ins, OP_BR);
9575 ins->inst_target_bb = end_bblock;
9576 MONO_ADD_INS (bblock, ins);
9577 link_bblock (cfg, bblock, end_bblock);
9578 start_new_bblock = 1;
9582 MONO_INST_NEW (cfg, ins, OP_BR);
9584 target = ip + 1 + (signed char)(*ip);
9586 GET_BBLOCK (cfg, tblock, target);
9587 link_bblock (cfg, bblock, tblock);
9588 ins->inst_target_bb = tblock;
9589 if (sp != stack_start) {
9590 handle_stack_args (cfg, stack_start, sp - stack_start);
9592 CHECK_UNVERIFIABLE (cfg);
9594 MONO_ADD_INS (bblock, ins);
9595 start_new_bblock = 1;
9596 inline_costs += BRANCH_COST;
9610 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9612 target = ip + 1 + *(signed char*)ip;
9618 inline_costs += BRANCH_COST;
9622 MONO_INST_NEW (cfg, ins, OP_BR);
9625 target = ip + 4 + (gint32)read32(ip);
9627 GET_BBLOCK (cfg, tblock, target);
9628 link_bblock (cfg, bblock, tblock);
9629 ins->inst_target_bb = tblock;
9630 if (sp != stack_start) {
9631 handle_stack_args (cfg, stack_start, sp - stack_start);
9633 CHECK_UNVERIFIABLE (cfg);
9636 MONO_ADD_INS (bblock, ins);
9638 start_new_bblock = 1;
9639 inline_costs += BRANCH_COST;
9646 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9647 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9648 guint32 opsize = is_short ? 1 : 4;
9650 CHECK_OPSIZE (opsize);
9652 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9655 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9660 GET_BBLOCK (cfg, tblock, target);
9661 link_bblock (cfg, bblock, tblock);
9662 GET_BBLOCK (cfg, tblock, ip);
9663 link_bblock (cfg, bblock, tblock);
9665 if (sp != stack_start) {
9666 handle_stack_args (cfg, stack_start, sp - stack_start);
9667 CHECK_UNVERIFIABLE (cfg);
9670 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9671 cmp->sreg1 = sp [0]->dreg;
9672 type_from_op (cfg, cmp, sp [0], NULL);
9675 #if SIZEOF_REGISTER == 4
9676 if (cmp->opcode == OP_LCOMPARE_IMM) {
9677 /* Convert it to OP_LCOMPARE */
9678 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9679 ins->type = STACK_I8;
9680 ins->dreg = alloc_dreg (cfg, STACK_I8);
9682 MONO_ADD_INS (bblock, ins);
9683 cmp->opcode = OP_LCOMPARE;
9684 cmp->sreg2 = ins->dreg;
9687 MONO_ADD_INS (bblock, cmp);
9689 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9690 type_from_op (cfg, ins, sp [0], NULL);
9691 MONO_ADD_INS (bblock, ins);
9692 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9693 GET_BBLOCK (cfg, tblock, target);
9694 ins->inst_true_bb = tblock;
9695 GET_BBLOCK (cfg, tblock, ip);
9696 ins->inst_false_bb = tblock;
9697 start_new_bblock = 2;
9700 inline_costs += BRANCH_COST;
9715 MONO_INST_NEW (cfg, ins, *ip);
9717 target = ip + 4 + (gint32)read32(ip);
9723 inline_costs += BRANCH_COST;
9727 MonoBasicBlock **targets;
9728 MonoBasicBlock *default_bblock;
9729 MonoJumpInfoBBTable *table;
9730 int offset_reg = alloc_preg (cfg);
9731 int target_reg = alloc_preg (cfg);
9732 int table_reg = alloc_preg (cfg);
9733 int sum_reg = alloc_preg (cfg);
9734 gboolean use_op_switch;
9738 n = read32 (ip + 1);
9741 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9745 CHECK_OPSIZE (n * sizeof (guint32));
9746 target = ip + n * sizeof (guint32);
9748 GET_BBLOCK (cfg, default_bblock, target);
9749 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9751 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9752 for (i = 0; i < n; ++i) {
9753 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9754 targets [i] = tblock;
9755 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9759 if (sp != stack_start) {
9761 * Link the current bb with the targets as well, so handle_stack_args
9762 * will set their in_stack correctly.
9764 link_bblock (cfg, bblock, default_bblock);
9765 for (i = 0; i < n; ++i)
9766 link_bblock (cfg, bblock, targets [i]);
9768 handle_stack_args (cfg, stack_start, sp - stack_start);
9770 CHECK_UNVERIFIABLE (cfg);
9773 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9777 for (i = 0; i < n; ++i)
9778 link_bblock (cfg, bblock, targets [i]);
9780 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9781 table->table = targets;
9782 table->table_size = n;
9784 use_op_switch = FALSE;
9786 /* ARM implements SWITCH statements differently */
9787 /* FIXME: Make it use the generic implementation */
9788 if (!cfg->compile_aot)
9789 use_op_switch = TRUE;
9792 if (COMPILE_LLVM (cfg))
9793 use_op_switch = TRUE;
9795 cfg->cbb->has_jump_table = 1;
9797 if (use_op_switch) {
9798 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9799 ins->sreg1 = src1->dreg;
9800 ins->inst_p0 = table;
9801 ins->inst_many_bb = targets;
9802 ins->klass = GUINT_TO_POINTER (n);
9803 MONO_ADD_INS (cfg->cbb, ins);
9805 if (sizeof (gpointer) == 8)
9806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9810 #if SIZEOF_REGISTER == 8
9811 /* The upper word might not be zero, and we add it to a 64 bit address later */
9812 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9815 if (cfg->compile_aot) {
9816 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9818 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9819 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9820 ins->inst_p0 = table;
9821 ins->dreg = table_reg;
9822 MONO_ADD_INS (cfg->cbb, ins);
9825 /* FIXME: Use load_memindex */
9826 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9828 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9830 start_new_bblock = 1;
9831 inline_costs += (BRANCH_COST * 2);
9851 dreg = alloc_freg (cfg);
9854 dreg = alloc_lreg (cfg);
9857 dreg = alloc_ireg_ref (cfg);
9860 dreg = alloc_preg (cfg);
9863 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9864 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9865 if (*ip == CEE_LDIND_R4)
9866 ins->type = cfg->r4_stack_type;
9867 ins->flags |= ins_flag;
9868 MONO_ADD_INS (bblock, ins);
9870 if (ins_flag & MONO_INST_VOLATILE) {
9871 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9872 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9888 if (ins_flag & MONO_INST_VOLATILE) {
9889 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9890 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9893 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9894 ins->flags |= ins_flag;
9897 MONO_ADD_INS (bblock, ins);
9899 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9900 emit_write_barrier (cfg, sp [0], sp [1]);
9909 MONO_INST_NEW (cfg, ins, (*ip));
9911 ins->sreg1 = sp [0]->dreg;
9912 ins->sreg2 = sp [1]->dreg;
9913 type_from_op (cfg, ins, sp [0], sp [1]);
9915 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9917 /* Use the immediate opcodes if possible */
9918 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9919 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9920 if (imm_opcode != -1) {
9921 ins->opcode = imm_opcode;
9922 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9925 NULLIFY_INS (sp [1]);
9929 MONO_ADD_INS ((cfg)->cbb, (ins));
9931 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9948 MONO_INST_NEW (cfg, ins, (*ip));
9950 ins->sreg1 = sp [0]->dreg;
9951 ins->sreg2 = sp [1]->dreg;
9952 type_from_op (cfg, ins, sp [0], sp [1]);
9954 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9955 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9957 /* FIXME: Pass opcode to is_inst_imm */
9959 /* Use the immediate opcodes if possible */
9960 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9963 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9964 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9965 /* Keep emulated opcodes which are optimized away later */
9966 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9967 imm_opcode = mono_op_to_op_imm (ins->opcode);
9970 if (imm_opcode != -1) {
9971 ins->opcode = imm_opcode;
9972 if (sp [1]->opcode == OP_I8CONST) {
9973 #if SIZEOF_REGISTER == 8
9974 ins->inst_imm = sp [1]->inst_l;
9976 ins->inst_ls_word = sp [1]->inst_ls_word;
9977 ins->inst_ms_word = sp [1]->inst_ms_word;
9981 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9984 /* Might be followed by an instruction added by add_widen_op */
9985 if (sp [1]->next == NULL)
9986 NULLIFY_INS (sp [1]);
9989 MONO_ADD_INS ((cfg)->cbb, (ins));
9991 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10004 case CEE_CONV_OVF_I8:
10005 case CEE_CONV_OVF_U8:
10006 case CEE_CONV_R_UN:
10009 /* Special case this earlier so we have long constants in the IR */
10010 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10011 int data = sp [-1]->inst_c0;
10012 sp [-1]->opcode = OP_I8CONST;
10013 sp [-1]->type = STACK_I8;
10014 #if SIZEOF_REGISTER == 8
10015 if ((*ip) == CEE_CONV_U8)
10016 sp [-1]->inst_c0 = (guint32)data;
10018 sp [-1]->inst_c0 = data;
10020 sp [-1]->inst_ls_word = data;
10021 if ((*ip) == CEE_CONV_U8)
10022 sp [-1]->inst_ms_word = 0;
10024 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10026 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10033 case CEE_CONV_OVF_I4:
10034 case CEE_CONV_OVF_I1:
10035 case CEE_CONV_OVF_I2:
10036 case CEE_CONV_OVF_I:
10037 case CEE_CONV_OVF_U:
10040 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10041 ADD_UNOP (CEE_CONV_OVF_I8);
10048 case CEE_CONV_OVF_U1:
10049 case CEE_CONV_OVF_U2:
10050 case CEE_CONV_OVF_U4:
10053 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10054 ADD_UNOP (CEE_CONV_OVF_U8);
10061 case CEE_CONV_OVF_I1_UN:
10062 case CEE_CONV_OVF_I2_UN:
10063 case CEE_CONV_OVF_I4_UN:
10064 case CEE_CONV_OVF_I8_UN:
10065 case CEE_CONV_OVF_U1_UN:
10066 case CEE_CONV_OVF_U2_UN:
10067 case CEE_CONV_OVF_U4_UN:
10068 case CEE_CONV_OVF_U8_UN:
10069 case CEE_CONV_OVF_I_UN:
10070 case CEE_CONV_OVF_U_UN:
10077 CHECK_CFG_EXCEPTION;
10081 case CEE_ADD_OVF_UN:
10083 case CEE_MUL_OVF_UN:
10085 case CEE_SUB_OVF_UN:
10091 GSHAREDVT_FAILURE (*ip);
10094 token = read32 (ip + 1);
10095 klass = mini_get_class (method, token, generic_context);
10096 CHECK_TYPELOAD (klass);
10098 if (generic_class_is_reference_type (cfg, klass)) {
10099 MonoInst *store, *load;
10100 int dreg = alloc_ireg_ref (cfg);
10102 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10103 load->flags |= ins_flag;
10104 MONO_ADD_INS (cfg->cbb, load);
10106 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10107 store->flags |= ins_flag;
10108 MONO_ADD_INS (cfg->cbb, store);
10110 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10111 emit_write_barrier (cfg, sp [0], sp [1]);
10113 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10119 int loc_index = -1;
10125 token = read32 (ip + 1);
10126 klass = mini_get_class (method, token, generic_context);
10127 CHECK_TYPELOAD (klass);
10129 /* Optimize the common ldobj+stloc combination */
10132 loc_index = ip [6];
10139 loc_index = ip [5] - CEE_STLOC_0;
10146 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10147 CHECK_LOCAL (loc_index);
10149 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10150 ins->dreg = cfg->locals [loc_index]->dreg;
10151 ins->flags |= ins_flag;
10154 if (ins_flag & MONO_INST_VOLATILE) {
10155 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10156 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10162 /* Optimize the ldobj+stobj combination */
10163 /* The reference case ends up being a load+store anyway */
10164 /* Skip this if the operation is volatile. */
10165 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10170 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10177 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10178 ins->flags |= ins_flag;
10181 if (ins_flag & MONO_INST_VOLATILE) {
10182 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10183 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10192 CHECK_STACK_OVF (1);
10194 n = read32 (ip + 1);
10196 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10197 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10198 ins->type = STACK_OBJ;
10201 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10202 MonoInst *iargs [1];
10203 char *str = mono_method_get_wrapper_data (method, n);
10205 if (cfg->compile_aot)
10206 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10208 EMIT_NEW_PCONST (cfg, iargs [0], str);
10209 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10211 if (cfg->opt & MONO_OPT_SHARED) {
10212 MonoInst *iargs [3];
10214 if (cfg->compile_aot) {
10215 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10217 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10218 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10219 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10220 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10221 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10223 if (bblock->out_of_line) {
10224 MonoInst *iargs [2];
10226 if (image == mono_defaults.corlib) {
10228 * Avoid relocations in AOT and save some space by using a
10229 * version of helper_ldstr specialized to mscorlib.
10231 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10232 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10234 /* Avoid creating the string object */
10235 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10236 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10237 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10241 if (cfg->compile_aot) {
10242 NEW_LDSTRCONST (cfg, ins, image, n);
10244 MONO_ADD_INS (bblock, ins);
10247 NEW_PCONST (cfg, ins, NULL);
10248 ins->type = STACK_OBJ;
10249 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10251 OUT_OF_MEMORY_FAILURE;
10254 MONO_ADD_INS (bblock, ins);
10263 MonoInst *iargs [2];
10264 MonoMethodSignature *fsig;
10267 MonoInst *vtable_arg = NULL;
10270 token = read32 (ip + 1);
10271 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10272 if (!cmethod || mono_loader_get_last_error ())
10274 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10277 mono_save_token_info (cfg, image, token, cmethod);
10279 if (!mono_class_init (cmethod->klass))
10280 TYPE_LOAD_ERROR (cmethod->klass);
10282 context_used = mini_method_check_context_used (cfg, cmethod);
10284 if (mono_security_core_clr_enabled ())
10285 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10287 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10288 emit_generic_class_init (cfg, cmethod->klass, &bblock);
10289 CHECK_TYPELOAD (cmethod->klass);
10293 if (cfg->gsharedvt) {
10294 if (mini_is_gsharedvt_variable_signature (sig))
10295 GSHAREDVT_FAILURE (*ip);
10299 n = fsig->param_count;
10303 * Generate smaller code for the common newobj <exception> instruction in
10304 * argument checking code.
10306 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10307 is_exception_class (cmethod->klass) && n <= 2 &&
10308 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10309 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10310 MonoInst *iargs [3];
10314 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10317 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10320 iargs [1] = sp [0];
10321 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10324 iargs [1] = sp [0];
10325 iargs [2] = sp [1];
10326 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10329 g_assert_not_reached ();
10337 /* move the args to allow room for 'this' in the first position */
10343 /* check_call_signature () requires sp[0] to be set */
10344 this_ins.type = STACK_OBJ;
10345 sp [0] = &this_ins;
10346 if (check_call_signature (cfg, fsig, sp))
10351 if (mini_class_is_system_array (cmethod->klass)) {
10352 *sp = emit_get_rgctx_method (cfg, context_used,
10353 cmethod, MONO_RGCTX_INFO_METHOD);
10355 /* Avoid varargs in the common case */
10356 if (fsig->param_count == 1)
10357 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10358 else if (fsig->param_count == 2)
10359 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10360 else if (fsig->param_count == 3)
10361 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10362 else if (fsig->param_count == 4)
10363 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10365 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10366 } else if (cmethod->string_ctor) {
10367 g_assert (!context_used);
10368 g_assert (!vtable_arg);
10369 /* we simply pass a null pointer */
10370 EMIT_NEW_PCONST (cfg, *sp, NULL);
10371 /* now call the string ctor */
10372 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10374 if (cmethod->klass->valuetype) {
10375 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10376 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10377 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10382 * The code generated by mini_emit_virtual_call () expects
10383 * iargs [0] to be a boxed instance, but luckily the vcall
10384 * will be transformed into a normal call there.
10386 } else if (context_used) {
10387 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10390 MonoVTable *vtable = NULL;
10392 if (!cfg->compile_aot)
10393 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10394 CHECK_TYPELOAD (cmethod->klass);
10397 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10398 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10399 * As a workaround, we call class cctors before allocating objects.
10401 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10402 emit_class_init (cfg, cmethod->klass, &bblock);
10403 if (cfg->verbose_level > 2)
10404 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10405 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10408 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10411 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10414 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10416 /* Now call the actual ctor */
10417 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10418 CHECK_CFG_EXCEPTION;
10421 if (alloc == NULL) {
10423 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10424 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10432 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10433 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10436 case CEE_CASTCLASS:
10440 token = read32 (ip + 1);
10441 klass = mini_get_class (method, token, generic_context);
10442 CHECK_TYPELOAD (klass);
10443 if (sp [0]->type != STACK_OBJ)
10446 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10447 CHECK_CFG_EXCEPTION;
10456 token = read32 (ip + 1);
10457 klass = mini_get_class (method, token, generic_context);
10458 CHECK_TYPELOAD (klass);
10459 if (sp [0]->type != STACK_OBJ)
10462 context_used = mini_class_check_context_used (cfg, klass);
10464 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10465 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10466 MonoInst *args [3];
10473 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10476 if (cfg->compile_aot) {
10477 idx = get_castclass_cache_idx (cfg);
10478 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10480 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10483 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10486 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10487 MonoMethod *mono_isinst;
10488 MonoInst *iargs [1];
10491 mono_isinst = mono_marshal_get_isinst (klass);
10492 iargs [0] = sp [0];
10494 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10495 iargs, ip, cfg->real_offset, TRUE, &bblock);
10496 CHECK_CFG_EXCEPTION;
10497 g_assert (costs > 0);
10500 cfg->real_offset += 5;
10504 inline_costs += costs;
10507 ins = handle_isinst (cfg, klass, *sp, context_used);
10508 CHECK_CFG_EXCEPTION;
10515 case CEE_UNBOX_ANY: {
10516 MonoInst *res, *addr;
10521 token = read32 (ip + 1);
10522 klass = mini_get_class (method, token, generic_context);
10523 CHECK_TYPELOAD (klass);
10525 mono_save_token_info (cfg, image, token, klass);
10527 context_used = mini_class_check_context_used (cfg, klass);
10529 if (mini_is_gsharedvt_klass (cfg, klass)) {
10530 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10532 } else if (generic_class_is_reference_type (cfg, klass)) {
10533 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10534 CHECK_CFG_EXCEPTION;
10535 } else if (mono_class_is_nullable (klass)) {
10536 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10538 addr = handle_unbox (cfg, klass, sp, context_used);
10540 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10551 MonoClass *enum_class;
10552 MonoMethod *has_flag;
10558 token = read32 (ip + 1);
10559 klass = mini_get_class (method, token, generic_context);
10560 CHECK_TYPELOAD (klass);
10562 mono_save_token_info (cfg, image, token, klass);
10564 context_used = mini_class_check_context_used (cfg, klass);
10566 if (generic_class_is_reference_type (cfg, klass)) {
10572 if (klass == mono_defaults.void_class)
10574 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10576 /* frequent check in generic code: box (struct), brtrue */
10581 * <push int/long ptr>
10584 * constrained. MyFlags
10585 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10587 * If we find this sequence and the operand types on box and constrained
10588 * are equal, we can emit a specialized instruction sequence instead of
10589 * the very slow HasFlag () call.
10591 if ((cfg->opt & MONO_OPT_INTRINS) &&
10592 /* Cheap checks first. */
10593 ip + 5 + 6 + 5 < end &&
10594 ip [5] == CEE_PREFIX1 &&
10595 ip [6] == CEE_CONSTRAINED_ &&
10596 ip [11] == CEE_CALLVIRT &&
10597 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10598 mono_class_is_enum (klass) &&
10599 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10600 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10601 has_flag->klass == mono_defaults.enum_class &&
10602 !strcmp (has_flag->name, "HasFlag") &&
10603 has_flag->signature->hasthis &&
10604 has_flag->signature->param_count == 1) {
10605 CHECK_TYPELOAD (enum_class);
10607 if (enum_class == klass) {
10608 MonoInst *enum_this, *enum_flag;
10613 enum_this = sp [0];
10614 enum_flag = sp [1];
10616 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10621 // FIXME: LLVM can't handle the inconsistent bb linking
10622 if (!mono_class_is_nullable (klass) &&
10623 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10624 (ip [5] == CEE_BRTRUE ||
10625 ip [5] == CEE_BRTRUE_S ||
10626 ip [5] == CEE_BRFALSE ||
10627 ip [5] == CEE_BRFALSE_S)) {
10628 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10630 MonoBasicBlock *true_bb, *false_bb;
10634 if (cfg->verbose_level > 3) {
10635 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10636 printf ("<box+brtrue opt>\n");
10641 case CEE_BRFALSE_S:
10644 target = ip + 1 + (signed char)(*ip);
10651 target = ip + 4 + (gint)(read32 (ip));
10655 g_assert_not_reached ();
10659 * We need to link both bblocks, since it is needed for handling stack
10660 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10661 * Branching to only one of them would lead to inconsistencies, so
10662 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10664 GET_BBLOCK (cfg, true_bb, target);
10665 GET_BBLOCK (cfg, false_bb, ip);
10667 mono_link_bblock (cfg, cfg->cbb, true_bb);
10668 mono_link_bblock (cfg, cfg->cbb, false_bb);
10670 if (sp != stack_start) {
10671 handle_stack_args (cfg, stack_start, sp - stack_start);
10673 CHECK_UNVERIFIABLE (cfg);
10676 if (COMPILE_LLVM (cfg)) {
10677 dreg = alloc_ireg (cfg);
10678 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10681 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10683 /* The JIT can't eliminate the iconst+compare */
10684 MONO_INST_NEW (cfg, ins, OP_BR);
10685 ins->inst_target_bb = is_true ? true_bb : false_bb;
10686 MONO_ADD_INS (cfg->cbb, ins);
10689 start_new_bblock = 1;
10693 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10695 CHECK_CFG_EXCEPTION;
10704 token = read32 (ip + 1);
10705 klass = mini_get_class (method, token, generic_context);
10706 CHECK_TYPELOAD (klass);
10708 mono_save_token_info (cfg, image, token, klass);
10710 context_used = mini_class_check_context_used (cfg, klass);
10712 if (mono_class_is_nullable (klass)) {
10715 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10716 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10720 ins = handle_unbox (cfg, klass, sp, context_used);
10733 MonoClassField *field;
10734 #ifndef DISABLE_REMOTING
10738 gboolean is_instance;
10740 gpointer addr = NULL;
10741 gboolean is_special_static;
10743 MonoInst *store_val = NULL;
10744 MonoInst *thread_ins;
10747 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10749 if (op == CEE_STFLD) {
10752 store_val = sp [1];
10757 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10759 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10762 if (op == CEE_STSFLD) {
10765 store_val = sp [0];
10770 token = read32 (ip + 1);
10771 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10772 field = mono_method_get_wrapper_data (method, token);
10773 klass = field->parent;
10776 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10779 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10780 FIELD_ACCESS_FAILURE (method, field);
10781 mono_class_init (klass);
10783 /* if the class is Critical then transparent code cannot access it's fields */
10784 if (!is_instance && mono_security_core_clr_enabled ())
10785 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10787 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10788 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10789 if (mono_security_core_clr_enabled ())
10790 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10794 * LDFLD etc. is usable on static fields as well, so convert those cases to
10797 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10809 g_assert_not_reached ();
10811 is_instance = FALSE;
10814 context_used = mini_class_check_context_used (cfg, klass);
10816 /* INSTANCE CASE */
10818 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10819 if (op == CEE_STFLD) {
10820 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10822 #ifndef DISABLE_REMOTING
10823 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10824 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10825 MonoInst *iargs [5];
10827 GSHAREDVT_FAILURE (op);
10829 iargs [0] = sp [0];
10830 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10831 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10832 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10834 iargs [4] = sp [1];
10836 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10837 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10838 iargs, ip, cfg->real_offset, TRUE, &bblock);
10839 CHECK_CFG_EXCEPTION;
10840 g_assert (costs > 0);
10842 cfg->real_offset += 5;
10844 inline_costs += costs;
10846 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10853 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10855 if (mini_is_gsharedvt_klass (cfg, klass)) {
10856 MonoInst *offset_ins;
10858 context_used = mini_class_check_context_used (cfg, klass);
10860 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10861 dreg = alloc_ireg_mp (cfg);
10862 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10863 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10864 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10866 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10868 if (sp [0]->opcode != OP_LDADDR)
10869 store->flags |= MONO_INST_FAULT;
10871 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10872 /* insert call to write barrier */
10876 dreg = alloc_ireg_mp (cfg);
10877 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10878 emit_write_barrier (cfg, ptr, sp [1]);
10881 store->flags |= ins_flag;
10888 #ifndef DISABLE_REMOTING
10889 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10890 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10891 MonoInst *iargs [4];
10893 GSHAREDVT_FAILURE (op);
10895 iargs [0] = sp [0];
10896 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10897 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10898 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10899 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10900 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10901 iargs, ip, cfg->real_offset, TRUE, &bblock);
10902 CHECK_CFG_EXCEPTION;
10903 g_assert (costs > 0);
10905 cfg->real_offset += 5;
10909 inline_costs += costs;
10911 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10917 if (sp [0]->type == STACK_VTYPE) {
10920 /* Have to compute the address of the variable */
10922 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10924 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10926 g_assert (var->klass == klass);
10928 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10932 if (op == CEE_LDFLDA) {
10933 if (sp [0]->type == STACK_OBJ) {
10934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10935 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10938 dreg = alloc_ireg_mp (cfg);
10940 if (mini_is_gsharedvt_klass (cfg, klass)) {
10941 MonoInst *offset_ins;
10943 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10944 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10946 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10948 ins->klass = mono_class_from_mono_type (field->type);
10949 ins->type = STACK_MP;
10954 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10956 if (mini_is_gsharedvt_klass (cfg, klass)) {
10957 MonoInst *offset_ins;
10959 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10960 dreg = alloc_ireg_mp (cfg);
10961 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10962 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10966 load->flags |= ins_flag;
10967 if (sp [0]->opcode != OP_LDADDR)
10968 load->flags |= MONO_INST_FAULT;
10980 context_used = mini_class_check_context_used (cfg, klass);
10982 ftype = mono_field_get_type (field);
10984 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10987 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10988 * to be called here.
10990 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10991 mono_class_vtable (cfg->domain, klass);
10992 CHECK_TYPELOAD (klass);
10994 mono_domain_lock (cfg->domain);
10995 if (cfg->domain->special_static_fields)
10996 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10997 mono_domain_unlock (cfg->domain);
10999 is_special_static = mono_class_field_is_special_static (field);
11001 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11002 thread_ins = mono_get_thread_intrinsic (cfg);
11006 /* Generate IR to compute the field address */
11007 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11009 * Fast access to TLS data
11010 * Inline version of get_thread_static_data () in
11014 int idx, static_data_reg, array_reg, dreg;
11016 GSHAREDVT_FAILURE (op);
11018 MONO_ADD_INS (cfg->cbb, thread_ins);
11019 static_data_reg = alloc_ireg (cfg);
11020 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11022 if (cfg->compile_aot) {
11023 int offset_reg, offset2_reg, idx_reg;
11025 /* For TLS variables, this will return the TLS offset */
11026 EMIT_NEW_SFLDACONST (cfg, ins, field);
11027 offset_reg = ins->dreg;
11028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11029 idx_reg = alloc_ireg (cfg);
11030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11032 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11033 array_reg = alloc_ireg (cfg);
11034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11035 offset2_reg = alloc_ireg (cfg);
11036 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11038 dreg = alloc_ireg (cfg);
11039 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11041 offset = (gsize)addr & 0x7fffffff;
11042 idx = offset & 0x3f;
11044 array_reg = alloc_ireg (cfg);
11045 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11046 dreg = alloc_ireg (cfg);
11047 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11049 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11050 (cfg->compile_aot && is_special_static) ||
11051 (context_used && is_special_static)) {
11052 MonoInst *iargs [2];
11054 g_assert (field->parent);
11055 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11056 if (context_used) {
11057 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11058 field, MONO_RGCTX_INFO_CLASS_FIELD);
11060 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11062 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11063 } else if (context_used) {
11064 MonoInst *static_data;
11067 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11068 method->klass->name_space, method->klass->name, method->name,
11069 depth, field->offset);
11072 if (mono_class_needs_cctor_run (klass, method))
11073 emit_generic_class_init (cfg, klass, &bblock);
11076 * The pointer we're computing here is
11078 * super_info.static_data + field->offset
11080 static_data = emit_get_rgctx_klass (cfg, context_used,
11081 klass, MONO_RGCTX_INFO_STATIC_DATA);
11083 if (mini_is_gsharedvt_klass (cfg, klass)) {
11084 MonoInst *offset_ins;
11086 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11087 dreg = alloc_ireg_mp (cfg);
11088 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11089 } else if (field->offset == 0) {
11092 int addr_reg = mono_alloc_preg (cfg);
11093 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11095 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11096 MonoInst *iargs [2];
11098 g_assert (field->parent);
11099 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11100 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11101 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11103 MonoVTable *vtable = NULL;
11105 if (!cfg->compile_aot)
11106 vtable = mono_class_vtable (cfg->domain, klass);
11107 CHECK_TYPELOAD (klass);
11110 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11111 if (!(g_slist_find (class_inits, klass))) {
11112 emit_class_init (cfg, klass, &bblock);
11113 if (cfg->verbose_level > 2)
11114 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11115 class_inits = g_slist_prepend (class_inits, klass);
11118 if (cfg->run_cctors) {
11120 /* This makes so that inline cannot trigger */
11121 /* .cctors: too many apps depend on them */
11122 /* running with a specific order... */
11124 if (! vtable->initialized)
11125 INLINE_FAILURE ("class init");
11126 ex = mono_runtime_class_init_full (vtable, FALSE);
11128 set_exception_object (cfg, ex);
11129 goto exception_exit;
11133 if (cfg->compile_aot)
11134 EMIT_NEW_SFLDACONST (cfg, ins, field);
11137 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11139 EMIT_NEW_PCONST (cfg, ins, addr);
11142 MonoInst *iargs [1];
11143 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11144 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11148 /* Generate IR to do the actual load/store operation */
11150 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11151 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11152 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11155 if (op == CEE_LDSFLDA) {
11156 ins->klass = mono_class_from_mono_type (ftype);
11157 ins->type = STACK_PTR;
11159 } else if (op == CEE_STSFLD) {
11162 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11163 store->flags |= ins_flag;
11165 gboolean is_const = FALSE;
11166 MonoVTable *vtable = NULL;
11167 gpointer addr = NULL;
11169 if (!context_used) {
11170 vtable = mono_class_vtable (cfg->domain, klass);
11171 CHECK_TYPELOAD (klass);
11173 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11174 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11175 int ro_type = ftype->type;
11177 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11178 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11179 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11182 GSHAREDVT_FAILURE (op);
11184 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11187 case MONO_TYPE_BOOLEAN:
11189 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11193 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11196 case MONO_TYPE_CHAR:
11198 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11202 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11207 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11211 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11216 case MONO_TYPE_PTR:
11217 case MONO_TYPE_FNPTR:
11218 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11219 type_to_eval_stack_type ((cfg), field->type, *sp);
11222 case MONO_TYPE_STRING:
11223 case MONO_TYPE_OBJECT:
11224 case MONO_TYPE_CLASS:
11225 case MONO_TYPE_SZARRAY:
11226 case MONO_TYPE_ARRAY:
11227 if (!mono_gc_is_moving ()) {
11228 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11229 type_to_eval_stack_type ((cfg), field->type, *sp);
11237 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11242 case MONO_TYPE_VALUETYPE:
11252 CHECK_STACK_OVF (1);
11254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11255 load->flags |= ins_flag;
11261 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11262 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11263 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11274 token = read32 (ip + 1);
11275 klass = mini_get_class (method, token, generic_context);
11276 CHECK_TYPELOAD (klass);
11277 if (ins_flag & MONO_INST_VOLATILE) {
11278 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11279 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11281 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11282 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11283 ins->flags |= ins_flag;
11284 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11285 generic_class_is_reference_type (cfg, klass)) {
11286 /* insert call to write barrier */
11287 emit_write_barrier (cfg, sp [0], sp [1]);
11299 const char *data_ptr;
11301 guint32 field_token;
11307 token = read32 (ip + 1);
11309 klass = mini_get_class (method, token, generic_context);
11310 CHECK_TYPELOAD (klass);
11312 context_used = mini_class_check_context_used (cfg, klass);
11314 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11315 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11316 ins->sreg1 = sp [0]->dreg;
11317 ins->type = STACK_I4;
11318 ins->dreg = alloc_ireg (cfg);
11319 MONO_ADD_INS (cfg->cbb, ins);
11320 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11323 if (context_used) {
11324 MonoInst *args [3];
11325 MonoClass *array_class = mono_array_class_get (klass, 1);
11326 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11328 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11331 args [0] = emit_get_rgctx_klass (cfg, context_used,
11332 array_class, MONO_RGCTX_INFO_VTABLE);
11337 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11339 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11341 if (cfg->opt & MONO_OPT_SHARED) {
11342 /* Decompose now to avoid problems with references to the domainvar */
11343 MonoInst *iargs [3];
11345 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11346 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11347 iargs [2] = sp [0];
11349 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11351 /* Decompose later since it is needed by abcrem */
11352 MonoClass *array_type = mono_array_class_get (klass, 1);
11353 mono_class_vtable (cfg->domain, array_type);
11354 CHECK_TYPELOAD (array_type);
11356 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11357 ins->dreg = alloc_ireg_ref (cfg);
11358 ins->sreg1 = sp [0]->dreg;
11359 ins->inst_newa_class = klass;
11360 ins->type = STACK_OBJ;
11361 ins->klass = array_type;
11362 MONO_ADD_INS (cfg->cbb, ins);
11363 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11364 cfg->cbb->has_array_access = TRUE;
11366 /* Needed so mono_emit_load_get_addr () gets called */
11367 mono_get_got_var (cfg);
11377 * we inline/optimize the initialization sequence if possible.
11378 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11379 * for small sizes open code the memcpy
11380 * ensure the rva field is big enough
11382 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11383 MonoMethod *memcpy_method = get_memcpy_method ();
11384 MonoInst *iargs [3];
11385 int add_reg = alloc_ireg_mp (cfg);
11387 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11388 if (cfg->compile_aot) {
11389 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11391 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11393 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11394 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11403 if (sp [0]->type != STACK_OBJ)
11406 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11407 ins->dreg = alloc_preg (cfg);
11408 ins->sreg1 = sp [0]->dreg;
11409 ins->type = STACK_I4;
11410 /* This flag will be inherited by the decomposition */
11411 ins->flags |= MONO_INST_FAULT;
11412 MONO_ADD_INS (cfg->cbb, ins);
11413 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11414 cfg->cbb->has_array_access = TRUE;
11422 if (sp [0]->type != STACK_OBJ)
11425 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11427 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11428 CHECK_TYPELOAD (klass);
11429 /* we need to make sure that this array is exactly the type it needs
11430 * to be for correctness. the wrappers are lax with their usage
11431 * so we need to ignore them here
11433 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11434 MonoClass *array_class = mono_array_class_get (klass, 1);
11435 mini_emit_check_array_type (cfg, sp [0], array_class);
11436 CHECK_TYPELOAD (array_class);
11440 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11445 case CEE_LDELEM_I1:
11446 case CEE_LDELEM_U1:
11447 case CEE_LDELEM_I2:
11448 case CEE_LDELEM_U2:
11449 case CEE_LDELEM_I4:
11450 case CEE_LDELEM_U4:
11451 case CEE_LDELEM_I8:
11453 case CEE_LDELEM_R4:
11454 case CEE_LDELEM_R8:
11455 case CEE_LDELEM_REF: {
11461 if (*ip == CEE_LDELEM) {
11463 token = read32 (ip + 1);
11464 klass = mini_get_class (method, token, generic_context);
11465 CHECK_TYPELOAD (klass);
11466 mono_class_init (klass);
11469 klass = array_access_to_klass (*ip);
11471 if (sp [0]->type != STACK_OBJ)
11474 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11476 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11477 // FIXME-VT: OP_ICONST optimization
11478 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11479 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11480 ins->opcode = OP_LOADV_MEMBASE;
11481 } else if (sp [1]->opcode == OP_ICONST) {
11482 int array_reg = sp [0]->dreg;
11483 int index_reg = sp [1]->dreg;
11484 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11486 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11487 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11489 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11490 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11493 if (*ip == CEE_LDELEM)
11500 case CEE_STELEM_I1:
11501 case CEE_STELEM_I2:
11502 case CEE_STELEM_I4:
11503 case CEE_STELEM_I8:
11504 case CEE_STELEM_R4:
11505 case CEE_STELEM_R8:
11506 case CEE_STELEM_REF:
11511 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11513 if (*ip == CEE_STELEM) {
11515 token = read32 (ip + 1);
11516 klass = mini_get_class (method, token, generic_context);
11517 CHECK_TYPELOAD (klass);
11518 mono_class_init (klass);
11521 klass = array_access_to_klass (*ip);
11523 if (sp [0]->type != STACK_OBJ)
11526 emit_array_store (cfg, klass, sp, TRUE);
11528 if (*ip == CEE_STELEM)
11535 case CEE_CKFINITE: {
11539 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11540 ins->sreg1 = sp [0]->dreg;
11541 ins->dreg = alloc_freg (cfg);
11542 ins->type = STACK_R8;
11543 MONO_ADD_INS (bblock, ins);
11545 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11550 case CEE_REFANYVAL: {
11551 MonoInst *src_var, *src;
11553 int klass_reg = alloc_preg (cfg);
11554 int dreg = alloc_preg (cfg);
11556 GSHAREDVT_FAILURE (*ip);
11559 MONO_INST_NEW (cfg, ins, *ip);
11562 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11563 CHECK_TYPELOAD (klass);
11565 context_used = mini_class_check_context_used (cfg, klass);
11568 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11570 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11571 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11574 if (context_used) {
11575 MonoInst *klass_ins;
11577 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11578 klass, MONO_RGCTX_INFO_KLASS);
11581 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11582 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11584 mini_emit_class_check (cfg, klass_reg, klass);
11586 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11587 ins->type = STACK_MP;
11592 case CEE_MKREFANY: {
11593 MonoInst *loc, *addr;
11595 GSHAREDVT_FAILURE (*ip);
11598 MONO_INST_NEW (cfg, ins, *ip);
11601 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11602 CHECK_TYPELOAD (klass);
11604 context_used = mini_class_check_context_used (cfg, klass);
11606 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11607 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11609 if (context_used) {
11610 MonoInst *const_ins;
11611 int type_reg = alloc_preg (cfg);
11613 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11616 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11617 } else if (cfg->compile_aot) {
11618 int const_reg = alloc_preg (cfg);
11619 int type_reg = alloc_preg (cfg);
11621 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11622 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11624 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11626 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11627 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11631 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11632 ins->type = STACK_VTYPE;
11633 ins->klass = mono_defaults.typed_reference_class;
11638 case CEE_LDTOKEN: {
11640 MonoClass *handle_class;
11642 CHECK_STACK_OVF (1);
11645 n = read32 (ip + 1);
11647 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11648 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11649 handle = mono_method_get_wrapper_data (method, n);
11650 handle_class = mono_method_get_wrapper_data (method, n + 1);
11651 if (handle_class == mono_defaults.typehandle_class)
11652 handle = &((MonoClass*)handle)->byval_arg;
11655 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11660 mono_class_init (handle_class);
11661 if (cfg->generic_sharing_context) {
11662 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11663 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11664 /* This case handles ldtoken
11665 of an open type, like for
11668 } else if (handle_class == mono_defaults.typehandle_class) {
11669 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11670 } else if (handle_class == mono_defaults.fieldhandle_class)
11671 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11672 else if (handle_class == mono_defaults.methodhandle_class)
11673 context_used = mini_method_check_context_used (cfg, handle);
11675 g_assert_not_reached ();
11678 if ((cfg->opt & MONO_OPT_SHARED) &&
11679 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11680 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11681 MonoInst *addr, *vtvar, *iargs [3];
11682 int method_context_used;
11684 method_context_used = mini_method_check_context_used (cfg, method);
11686 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11688 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11689 EMIT_NEW_ICONST (cfg, iargs [1], n);
11690 if (method_context_used) {
11691 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11692 method, MONO_RGCTX_INFO_METHOD);
11693 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11695 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11696 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11698 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11702 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11704 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11705 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11706 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11707 (cmethod->klass == mono_defaults.systemtype_class) &&
11708 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11709 MonoClass *tclass = mono_class_from_mono_type (handle);
11711 mono_class_init (tclass);
11712 if (context_used) {
11713 ins = emit_get_rgctx_klass (cfg, context_used,
11714 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11715 } else if (cfg->compile_aot) {
11716 if (method->wrapper_type) {
11717 mono_error_init (&error); //got to do it since there are multiple conditionals below
11718 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11719 /* Special case for static synchronized wrappers */
11720 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11722 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11723 /* FIXME: n is not a normal token */
11725 EMIT_NEW_PCONST (cfg, ins, NULL);
11728 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11731 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11733 ins->type = STACK_OBJ;
11734 ins->klass = cmethod->klass;
11737 MonoInst *addr, *vtvar;
11739 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11741 if (context_used) {
11742 if (handle_class == mono_defaults.typehandle_class) {
11743 ins = emit_get_rgctx_klass (cfg, context_used,
11744 mono_class_from_mono_type (handle),
11745 MONO_RGCTX_INFO_TYPE);
11746 } else if (handle_class == mono_defaults.methodhandle_class) {
11747 ins = emit_get_rgctx_method (cfg, context_used,
11748 handle, MONO_RGCTX_INFO_METHOD);
11749 } else if (handle_class == mono_defaults.fieldhandle_class) {
11750 ins = emit_get_rgctx_field (cfg, context_used,
11751 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11753 g_assert_not_reached ();
11755 } else if (cfg->compile_aot) {
11756 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11758 EMIT_NEW_PCONST (cfg, ins, handle);
11760 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11761 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11762 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11772 MONO_INST_NEW (cfg, ins, OP_THROW);
11774 ins->sreg1 = sp [0]->dreg;
11776 bblock->out_of_line = TRUE;
11777 MONO_ADD_INS (bblock, ins);
11778 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11779 MONO_ADD_INS (bblock, ins);
11782 link_bblock (cfg, bblock, end_bblock);
11783 start_new_bblock = 1;
11785 case CEE_ENDFINALLY:
11786 /* mono_save_seq_point_info () depends on this */
11787 if (sp != stack_start)
11788 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11789 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11790 MONO_ADD_INS (bblock, ins);
11792 start_new_bblock = 1;
11795 * Control will leave the method so empty the stack, otherwise
11796 * the next basic block will start with a nonempty stack.
11798 while (sp != stack_start) {
11803 case CEE_LEAVE_S: {
11806 if (*ip == CEE_LEAVE) {
11808 target = ip + 5 + (gint32)read32(ip + 1);
11811 target = ip + 2 + (signed char)(ip [1]);
11814 /* empty the stack */
11815 while (sp != stack_start) {
11820 * If this leave statement is in a catch block, check for a
11821 * pending exception, and rethrow it if necessary.
11822 * We avoid doing this in runtime invoke wrappers, since those are called
11823 * by native code which excepts the wrapper to catch all exceptions.
11825 for (i = 0; i < header->num_clauses; ++i) {
11826 MonoExceptionClause *clause = &header->clauses [i];
11829 * Use <= in the final comparison to handle clauses with multiple
11830 * leave statements, like in bug #78024.
11831 * The ordering of the exception clauses guarantees that we find the
11832 * innermost clause.
11834 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11836 MonoBasicBlock *dont_throw;
11841 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11844 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11846 NEW_BBLOCK (cfg, dont_throw);
11849 * Currently, we always rethrow the abort exception, despite the
11850 * fact that this is not correct. See thread6.cs for an example.
11851 * But propagating the abort exception is more important than
11852 * getting the sematics right.
11854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11855 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11856 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11858 MONO_START_BB (cfg, dont_throw);
11863 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11865 MonoExceptionClause *clause;
11867 for (tmp = handlers; tmp; tmp = tmp->next) {
11868 clause = tmp->data;
11869 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11871 link_bblock (cfg, bblock, tblock);
11872 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11873 ins->inst_target_bb = tblock;
11874 ins->inst_eh_block = clause;
11875 MONO_ADD_INS (bblock, ins);
11876 bblock->has_call_handler = 1;
11877 if (COMPILE_LLVM (cfg)) {
11878 MonoBasicBlock *target_bb;
11881 * Link the finally bblock with the target, since it will
11882 * conceptually branch there.
11883 * FIXME: Have to link the bblock containing the endfinally.
11885 GET_BBLOCK (cfg, target_bb, target);
11886 link_bblock (cfg, tblock, target_bb);
11889 g_list_free (handlers);
11892 MONO_INST_NEW (cfg, ins, OP_BR);
11893 MONO_ADD_INS (bblock, ins);
11894 GET_BBLOCK (cfg, tblock, target);
11895 link_bblock (cfg, bblock, tblock);
11896 ins->inst_target_bb = tblock;
11897 start_new_bblock = 1;
11899 if (*ip == CEE_LEAVE)
11908 * Mono specific opcodes
11910 case MONO_CUSTOM_PREFIX: {
11912 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11916 case CEE_MONO_ICALL: {
11918 MonoJitICallInfo *info;
11920 token = read32 (ip + 2);
11921 func = mono_method_get_wrapper_data (method, token);
11922 info = mono_find_jit_icall_by_addr (func);
11924 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11927 CHECK_STACK (info->sig->param_count);
11928 sp -= info->sig->param_count;
11930 ins = mono_emit_jit_icall (cfg, info->func, sp);
11931 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11935 inline_costs += 10 * num_calls++;
11939 case CEE_MONO_LDPTR_CARD_TABLE: {
11941 gpointer card_mask;
11942 CHECK_STACK_OVF (1);
11944 if (cfg->compile_aot)
11945 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11947 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
11951 inline_costs += 10 * num_calls++;
11954 case CEE_MONO_LDPTR_NURSERY_START: {
11957 CHECK_STACK_OVF (1);
11959 if (cfg->compile_aot)
11960 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11962 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
11966 inline_costs += 10 * num_calls++;
11969 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11970 CHECK_STACK_OVF (1);
11972 if (cfg->compile_aot)
11973 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11975 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
11979 inline_costs += 10 * num_calls++;
11982 case CEE_MONO_LDPTR: {
11985 CHECK_STACK_OVF (1);
11987 token = read32 (ip + 2);
11989 ptr = mono_method_get_wrapper_data (method, token);
11990 EMIT_NEW_PCONST (cfg, ins, ptr);
11993 inline_costs += 10 * num_calls++;
11994 /* Can't embed random pointers into AOT code */
11998 case CEE_MONO_JIT_ICALL_ADDR: {
11999 MonoJitICallInfo *callinfo;
12002 CHECK_STACK_OVF (1);
12004 token = read32 (ip + 2);
12006 ptr = mono_method_get_wrapper_data (method, token);
12007 callinfo = mono_find_jit_icall_by_addr (ptr);
12008 g_assert (callinfo);
12009 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12012 inline_costs += 10 * num_calls++;
12015 case CEE_MONO_ICALL_ADDR: {
12016 MonoMethod *cmethod;
12019 CHECK_STACK_OVF (1);
12021 token = read32 (ip + 2);
12023 cmethod = mono_method_get_wrapper_data (method, token);
12025 if (cfg->compile_aot) {
12026 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12028 ptr = mono_lookup_internal_call (cmethod);
12030 EMIT_NEW_PCONST (cfg, ins, ptr);
12036 case CEE_MONO_VTADDR: {
12037 MonoInst *src_var, *src;
12043 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12044 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12049 case CEE_MONO_NEWOBJ: {
12050 MonoInst *iargs [2];
12052 CHECK_STACK_OVF (1);
12054 token = read32 (ip + 2);
12055 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12056 mono_class_init (klass);
12057 NEW_DOMAINCONST (cfg, iargs [0]);
12058 MONO_ADD_INS (cfg->cbb, iargs [0]);
12059 NEW_CLASSCONST (cfg, iargs [1], klass);
12060 MONO_ADD_INS (cfg->cbb, iargs [1]);
12061 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12063 inline_costs += 10 * num_calls++;
12066 case CEE_MONO_OBJADDR:
12069 MONO_INST_NEW (cfg, ins, OP_MOVE);
12070 ins->dreg = alloc_ireg_mp (cfg);
12071 ins->sreg1 = sp [0]->dreg;
12072 ins->type = STACK_MP;
12073 MONO_ADD_INS (cfg->cbb, ins);
12077 case CEE_MONO_LDNATIVEOBJ:
12079 * Similar to LDOBJ, but instead load the unmanaged
12080 * representation of the vtype to the stack.
12085 token = read32 (ip + 2);
12086 klass = mono_method_get_wrapper_data (method, token);
12087 g_assert (klass->valuetype);
12088 mono_class_init (klass);
12091 MonoInst *src, *dest, *temp;
12094 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12095 temp->backend.is_pinvoke = 1;
12096 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12097 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12099 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12100 dest->type = STACK_VTYPE;
12101 dest->klass = klass;
12107 case CEE_MONO_RETOBJ: {
12109 * Same as RET, but return the native representation of a vtype
12112 g_assert (cfg->ret);
12113 g_assert (mono_method_signature (method)->pinvoke);
12118 token = read32 (ip + 2);
12119 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12121 if (!cfg->vret_addr) {
12122 g_assert (cfg->ret_var_is_local);
12124 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12126 EMIT_NEW_RETLOADA (cfg, ins);
12128 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12130 if (sp != stack_start)
12133 MONO_INST_NEW (cfg, ins, OP_BR);
12134 ins->inst_target_bb = end_bblock;
12135 MONO_ADD_INS (bblock, ins);
12136 link_bblock (cfg, bblock, end_bblock);
12137 start_new_bblock = 1;
12141 case CEE_MONO_CISINST:
12142 case CEE_MONO_CCASTCLASS: {
12147 token = read32 (ip + 2);
12148 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12149 if (ip [1] == CEE_MONO_CISINST)
12150 ins = handle_cisinst (cfg, klass, sp [0]);
12152 ins = handle_ccastclass (cfg, klass, sp [0]);
12158 case CEE_MONO_SAVE_LMF:
12159 case CEE_MONO_RESTORE_LMF:
12160 #ifdef MONO_ARCH_HAVE_LMF_OPS
12161 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12162 MONO_ADD_INS (bblock, ins);
12163 cfg->need_lmf_area = TRUE;
12167 case CEE_MONO_CLASSCONST:
12168 CHECK_STACK_OVF (1);
12170 token = read32 (ip + 2);
12171 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12174 inline_costs += 10 * num_calls++;
12176 case CEE_MONO_NOT_TAKEN:
12177 bblock->out_of_line = TRUE;
12180 case CEE_MONO_TLS: {
12183 CHECK_STACK_OVF (1);
12185 key = (gint32)read32 (ip + 2);
12186 g_assert (key < TLS_KEY_NUM);
12188 ins = mono_create_tls_get (cfg, key);
12190 if (cfg->compile_aot) {
12192 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12193 ins->dreg = alloc_preg (cfg);
12194 ins->type = STACK_PTR;
12196 g_assert_not_reached ();
12199 ins->type = STACK_PTR;
12200 MONO_ADD_INS (bblock, ins);
12205 case CEE_MONO_DYN_CALL: {
12206 MonoCallInst *call;
12208 /* It would be easier to call a trampoline, but that would put an
12209 * extra frame on the stack, confusing exception handling. So
12210 * implement it inline using an opcode for now.
12213 if (!cfg->dyn_call_var) {
12214 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12215 /* prevent it from being register allocated */
12216 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12219 /* Has to use a call inst since it local regalloc expects it */
12220 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12221 ins = (MonoInst*)call;
12223 ins->sreg1 = sp [0]->dreg;
12224 ins->sreg2 = sp [1]->dreg;
12225 MONO_ADD_INS (bblock, ins);
12227 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12230 inline_costs += 10 * num_calls++;
12234 case CEE_MONO_MEMORY_BARRIER: {
12236 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12240 case CEE_MONO_JIT_ATTACH: {
12241 MonoInst *args [16], *domain_ins;
12242 MonoInst *ad_ins, *jit_tls_ins;
12243 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12245 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12247 EMIT_NEW_PCONST (cfg, ins, NULL);
12248 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12250 ad_ins = mono_get_domain_intrinsic (cfg);
12251 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12253 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12254 NEW_BBLOCK (cfg, next_bb);
12255 NEW_BBLOCK (cfg, call_bb);
12257 if (cfg->compile_aot) {
12258 /* AOT code is only used in the root domain */
12259 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12261 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12263 MONO_ADD_INS (cfg->cbb, ad_ins);
12264 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12267 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12268 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12269 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12271 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12272 MONO_START_BB (cfg, call_bb);
12275 if (cfg->compile_aot) {
12276 /* AOT code is only used in the root domain */
12277 EMIT_NEW_PCONST (cfg, args [0], NULL);
12279 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12281 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12282 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12285 MONO_START_BB (cfg, next_bb);
12291 case CEE_MONO_JIT_DETACH: {
12292 MonoInst *args [16];
12294 /* Restore the original domain */
12295 dreg = alloc_ireg (cfg);
12296 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12297 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12302 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12308 case CEE_PREFIX1: {
12311 case CEE_ARGLIST: {
12312 /* somewhat similar to LDTOKEN */
12313 MonoInst *addr, *vtvar;
12314 CHECK_STACK_OVF (1);
12315 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12317 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12318 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12320 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12321 ins->type = STACK_VTYPE;
12322 ins->klass = mono_defaults.argumenthandle_class;
12332 MonoInst *cmp, *arg1, *arg2;
12340 * The following transforms:
12341 * CEE_CEQ into OP_CEQ
12342 * CEE_CGT into OP_CGT
12343 * CEE_CGT_UN into OP_CGT_UN
12344 * CEE_CLT into OP_CLT
12345 * CEE_CLT_UN into OP_CLT_UN
12347 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12349 MONO_INST_NEW (cfg, ins, cmp->opcode);
12350 cmp->sreg1 = arg1->dreg;
12351 cmp->sreg2 = arg2->dreg;
12352 type_from_op (cfg, cmp, arg1, arg2);
12354 add_widen_op (cfg, cmp, &arg1, &arg2);
12355 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12356 cmp->opcode = OP_LCOMPARE;
12357 else if (arg1->type == STACK_R4)
12358 cmp->opcode = OP_RCOMPARE;
12359 else if (arg1->type == STACK_R8)
12360 cmp->opcode = OP_FCOMPARE;
12362 cmp->opcode = OP_ICOMPARE;
12363 MONO_ADD_INS (bblock, cmp);
12364 ins->type = STACK_I4;
12365 ins->dreg = alloc_dreg (cfg, ins->type);
12366 type_from_op (cfg, ins, arg1, arg2);
12368 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12370 * The backends expect the fceq opcodes to do the
12373 ins->sreg1 = cmp->sreg1;
12374 ins->sreg2 = cmp->sreg2;
12377 MONO_ADD_INS (bblock, ins);
12383 MonoInst *argconst;
12384 MonoMethod *cil_method;
12386 CHECK_STACK_OVF (1);
12388 n = read32 (ip + 2);
12389 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12390 if (!cmethod || mono_loader_get_last_error ())
12392 mono_class_init (cmethod->klass);
12394 mono_save_token_info (cfg, image, n, cmethod);
12396 context_used = mini_method_check_context_used (cfg, cmethod);
12398 cil_method = cmethod;
12399 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12400 METHOD_ACCESS_FAILURE (method, cil_method);
12402 if (mono_security_core_clr_enabled ())
12403 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12406 * Optimize the common case of ldftn+delegate creation
12408 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12409 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12410 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12411 MonoInst *target_ins, *handle_ins;
12412 MonoMethod *invoke;
12413 int invoke_context_used;
12415 invoke = mono_get_delegate_invoke (ctor_method->klass);
12416 if (!invoke || !mono_method_signature (invoke))
12419 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12421 target_ins = sp [-1];
12423 if (mono_security_core_clr_enabled ())
12424 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12426 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12427 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12428 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12430 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12434 /* FIXME: SGEN support */
12435 if (invoke_context_used == 0) {
12437 if (cfg->verbose_level > 3)
12438 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12439 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12442 CHECK_CFG_EXCEPTION;
12452 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12453 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12457 inline_costs += 10 * num_calls++;
12460 case CEE_LDVIRTFTN: {
12461 MonoInst *args [2];
12465 n = read32 (ip + 2);
12466 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12467 if (!cmethod || mono_loader_get_last_error ())
12469 mono_class_init (cmethod->klass);
12471 context_used = mini_method_check_context_used (cfg, cmethod);
12473 if (mono_security_core_clr_enabled ())
12474 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12477 * Optimize the common case of ldvirtftn+delegate creation
12479 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12480 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12481 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12482 MonoInst *target_ins, *handle_ins;
12483 MonoMethod *invoke;
12484 int invoke_context_used;
12485 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12487 invoke = mono_get_delegate_invoke (ctor_method->klass);
12488 if (!invoke || !mono_method_signature (invoke))
12491 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12493 target_ins = sp [-1];
12495 if (mono_security_core_clr_enabled ())
12496 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12498 /* FIXME: SGEN support */
12499 if (invoke_context_used == 0) {
12501 if (cfg->verbose_level > 3)
12502 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12503 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12506 CHECK_CFG_EXCEPTION;
12519 args [1] = emit_get_rgctx_method (cfg, context_used,
12520 cmethod, MONO_RGCTX_INFO_METHOD);
12523 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12525 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12528 inline_costs += 10 * num_calls++;
12532 CHECK_STACK_OVF (1);
12534 n = read16 (ip + 2);
12536 EMIT_NEW_ARGLOAD (cfg, ins, n);
12541 CHECK_STACK_OVF (1);
12543 n = read16 (ip + 2);
12545 NEW_ARGLOADA (cfg, ins, n);
12546 MONO_ADD_INS (cfg->cbb, ins);
12554 n = read16 (ip + 2);
12556 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12558 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12562 CHECK_STACK_OVF (1);
12564 n = read16 (ip + 2);
12566 EMIT_NEW_LOCLOAD (cfg, ins, n);
12571 unsigned char *tmp_ip;
12572 CHECK_STACK_OVF (1);
12574 n = read16 (ip + 2);
12577 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12583 EMIT_NEW_LOCLOADA (cfg, ins, n);
12592 n = read16 (ip + 2);
12594 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12596 emit_stloc_ir (cfg, sp, header, n);
12603 if (sp != stack_start)
12605 if (cfg->method != method)
12607 * Inlining this into a loop in a parent could lead to
12608 * stack overflows which is different behavior than the
12609 * non-inlined case, thus disable inlining in this case.
12611 INLINE_FAILURE("localloc");
12613 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12614 ins->dreg = alloc_preg (cfg);
12615 ins->sreg1 = sp [0]->dreg;
12616 ins->type = STACK_PTR;
12617 MONO_ADD_INS (cfg->cbb, ins);
12619 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12621 ins->flags |= MONO_INST_INIT;
12626 case CEE_ENDFILTER: {
12627 MonoExceptionClause *clause, *nearest;
12632 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12634 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12635 ins->sreg1 = (*sp)->dreg;
12636 MONO_ADD_INS (bblock, ins);
12637 start_new_bblock = 1;
12641 for (cc = 0; cc < header->num_clauses; ++cc) {
12642 clause = &header->clauses [cc];
12643 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12644 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12645 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12648 g_assert (nearest);
12649 if ((ip - header->code) != nearest->handler_offset)
12654 case CEE_UNALIGNED_:
12655 ins_flag |= MONO_INST_UNALIGNED;
12656 /* FIXME: record alignment? we can assume 1 for now */
12660 case CEE_VOLATILE_:
12661 ins_flag |= MONO_INST_VOLATILE;
12665 ins_flag |= MONO_INST_TAILCALL;
12666 cfg->flags |= MONO_CFG_HAS_TAIL;
12667 /* Can't inline tail calls at this time */
12668 inline_costs += 100000;
12675 token = read32 (ip + 2);
12676 klass = mini_get_class (method, token, generic_context);
12677 CHECK_TYPELOAD (klass);
12678 if (generic_class_is_reference_type (cfg, klass))
12679 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12681 mini_emit_initobj (cfg, *sp, NULL, klass);
12685 case CEE_CONSTRAINED_:
12687 token = read32 (ip + 2);
12688 constrained_class = mini_get_class (method, token, generic_context);
12689 CHECK_TYPELOAD (constrained_class);
12693 case CEE_INITBLK: {
12694 MonoInst *iargs [3];
12698 /* Skip optimized paths for volatile operations. */
12699 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12700 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12701 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12702 /* emit_memset only works when val == 0 */
12703 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12706 iargs [0] = sp [0];
12707 iargs [1] = sp [1];
12708 iargs [2] = sp [2];
12709 if (ip [1] == CEE_CPBLK) {
12711 * FIXME: It's unclear whether we should be emitting both the acquire
12712 * and release barriers for cpblk. It is technically both a load and
12713 * store operation, so it seems like that's the sensible thing to do.
12715 * FIXME: We emit full barriers on both sides of the operation for
12716 * simplicity. We should have a separate atomic memcpy method instead.
12718 MonoMethod *memcpy_method = get_memcpy_method ();
12720 if (ins_flag & MONO_INST_VOLATILE)
12721 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12723 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12724 call->flags |= ins_flag;
12726 if (ins_flag & MONO_INST_VOLATILE)
12727 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12729 MonoMethod *memset_method = get_memset_method ();
12730 if (ins_flag & MONO_INST_VOLATILE) {
12731 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12732 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12734 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12735 call->flags |= ins_flag;
12746 ins_flag |= MONO_INST_NOTYPECHECK;
12748 ins_flag |= MONO_INST_NORANGECHECK;
12749 /* we ignore the no-nullcheck for now since we
12750 * really do it explicitly only when doing callvirt->call
12754 case CEE_RETHROW: {
12756 int handler_offset = -1;
12758 for (i = 0; i < header->num_clauses; ++i) {
12759 MonoExceptionClause *clause = &header->clauses [i];
12760 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12761 handler_offset = clause->handler_offset;
12766 bblock->flags |= BB_EXCEPTION_UNSAFE;
12768 if (handler_offset == -1)
12771 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12772 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12773 ins->sreg1 = load->dreg;
12774 MONO_ADD_INS (bblock, ins);
12776 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12777 MONO_ADD_INS (bblock, ins);
12780 link_bblock (cfg, bblock, end_bblock);
12781 start_new_bblock = 1;
12789 CHECK_STACK_OVF (1);
12791 token = read32 (ip + 2);
12792 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12793 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12796 val = mono_type_size (type, &ialign);
12798 MonoClass *klass = mini_get_class (method, token, generic_context);
12799 CHECK_TYPELOAD (klass);
12801 val = mono_type_size (&klass->byval_arg, &ialign);
12803 if (mini_is_gsharedvt_klass (cfg, klass))
12804 GSHAREDVT_FAILURE (*ip);
12806 EMIT_NEW_ICONST (cfg, ins, val);
12811 case CEE_REFANYTYPE: {
12812 MonoInst *src_var, *src;
12814 GSHAREDVT_FAILURE (*ip);
12820 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12822 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12823 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12824 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12829 case CEE_READONLY_:
12842 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12852 g_warning ("opcode 0x%02x not handled", *ip);
12856 if (start_new_bblock != 1)
12859 bblock->cil_length = ip - bblock->cil_code;
12860 if (bblock->next_bb) {
12861 /* This could already be set because of inlining, #693905 */
12862 MonoBasicBlock *bb = bblock;
12864 while (bb->next_bb)
12866 bb->next_bb = end_bblock;
12868 bblock->next_bb = end_bblock;
12871 if (cfg->method == method && cfg->domainvar) {
12873 MonoInst *get_domain;
12875 cfg->cbb = init_localsbb;
12877 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12878 MONO_ADD_INS (cfg->cbb, get_domain);
12880 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12882 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12883 MONO_ADD_INS (cfg->cbb, store);
12886 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12887 if (cfg->compile_aot)
12888 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12889 mono_get_got_var (cfg);
12892 if (cfg->method == method && cfg->got_var)
12893 mono_emit_load_got_addr (cfg);
12895 if (init_localsbb) {
12896 cfg->cbb = init_localsbb;
12898 for (i = 0; i < header->num_locals; ++i) {
12899 emit_init_local (cfg, i, header->locals [i], init_locals);
12903 if (cfg->init_ref_vars && cfg->method == method) {
12904 /* Emit initialization for ref vars */
12905 // FIXME: Avoid duplication initialization for IL locals.
12906 for (i = 0; i < cfg->num_varinfo; ++i) {
12907 MonoInst *ins = cfg->varinfo [i];
12909 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12910 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12914 if (cfg->lmf_var && cfg->method == method) {
12915 cfg->cbb = init_localsbb;
12916 emit_push_lmf (cfg);
12919 cfg->cbb = init_localsbb;
12920 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12923 MonoBasicBlock *bb;
12926 * Make seq points at backward branch targets interruptable.
12928 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12929 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12930 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12933 /* Add a sequence point for method entry/exit events */
12934 if (seq_points && cfg->gen_sdb_seq_points) {
12935 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12936 MONO_ADD_INS (init_localsbb, ins);
12937 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12938 MONO_ADD_INS (cfg->bb_exit, ins);
12942 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12943 * the code they refer to was dead (#11880).
12945 if (sym_seq_points) {
12946 for (i = 0; i < header->code_size; ++i) {
12947 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12950 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12951 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12958 if (cfg->method == method) {
12959 MonoBasicBlock *bb;
12960 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12961 bb->region = mono_find_block_region (cfg, bb->real_offset);
12963 mono_create_spvar_for_region (cfg, bb->region);
12964 if (cfg->verbose_level > 2)
12965 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12969 if (inline_costs < 0) {
12972 /* Method is too large */
12973 mname = mono_method_full_name (method, TRUE);
12974 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12975 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12979 if ((cfg->verbose_level > 2) && (cfg->method == method))
12980 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12985 g_assert (!mono_error_ok (&cfg->error));
12989 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12993 set_exception_type_from_invalid_il (cfg, method, ip);
12997 g_slist_free (class_inits);
12998 mono_basic_block_free (original_bb);
12999 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13000 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13001 if (cfg->exception_type)
13004 return inline_costs;
13008 store_membase_reg_to_store_membase_imm (int opcode)
13011 case OP_STORE_MEMBASE_REG:
13012 return OP_STORE_MEMBASE_IMM;
13013 case OP_STOREI1_MEMBASE_REG:
13014 return OP_STOREI1_MEMBASE_IMM;
13015 case OP_STOREI2_MEMBASE_REG:
13016 return OP_STOREI2_MEMBASE_IMM;
13017 case OP_STOREI4_MEMBASE_REG:
13018 return OP_STOREI4_MEMBASE_IMM;
13019 case OP_STOREI8_MEMBASE_REG:
13020 return OP_STOREI8_MEMBASE_IMM;
13022 g_assert_not_reached ();
13029 mono_op_to_op_imm (int opcode)
13033 return OP_IADD_IMM;
13035 return OP_ISUB_IMM;
13037 return OP_IDIV_IMM;
13039 return OP_IDIV_UN_IMM;
13041 return OP_IREM_IMM;
13043 return OP_IREM_UN_IMM;
13045 return OP_IMUL_IMM;
13047 return OP_IAND_IMM;
13051 return OP_IXOR_IMM;
13053 return OP_ISHL_IMM;
13055 return OP_ISHR_IMM;
13057 return OP_ISHR_UN_IMM;
13060 return OP_LADD_IMM;
13062 return OP_LSUB_IMM;
13064 return OP_LAND_IMM;
13068 return OP_LXOR_IMM;
13070 return OP_LSHL_IMM;
13072 return OP_LSHR_IMM;
13074 return OP_LSHR_UN_IMM;
13075 #if SIZEOF_REGISTER == 8
13077 return OP_LREM_IMM;
13081 return OP_COMPARE_IMM;
13083 return OP_ICOMPARE_IMM;
13085 return OP_LCOMPARE_IMM;
13087 case OP_STORE_MEMBASE_REG:
13088 return OP_STORE_MEMBASE_IMM;
13089 case OP_STOREI1_MEMBASE_REG:
13090 return OP_STOREI1_MEMBASE_IMM;
13091 case OP_STOREI2_MEMBASE_REG:
13092 return OP_STOREI2_MEMBASE_IMM;
13093 case OP_STOREI4_MEMBASE_REG:
13094 return OP_STOREI4_MEMBASE_IMM;
13096 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13098 return OP_X86_PUSH_IMM;
13099 case OP_X86_COMPARE_MEMBASE_REG:
13100 return OP_X86_COMPARE_MEMBASE_IMM;
13102 #if defined(TARGET_AMD64)
13103 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13104 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13106 case OP_VOIDCALL_REG:
13107 return OP_VOIDCALL;
13115 return OP_LOCALLOC_IMM;
13122 ldind_to_load_membase (int opcode)
13126 return OP_LOADI1_MEMBASE;
13128 return OP_LOADU1_MEMBASE;
13130 return OP_LOADI2_MEMBASE;
13132 return OP_LOADU2_MEMBASE;
13134 return OP_LOADI4_MEMBASE;
13136 return OP_LOADU4_MEMBASE;
13138 return OP_LOAD_MEMBASE;
13139 case CEE_LDIND_REF:
13140 return OP_LOAD_MEMBASE;
13142 return OP_LOADI8_MEMBASE;
13144 return OP_LOADR4_MEMBASE;
13146 return OP_LOADR8_MEMBASE;
13148 g_assert_not_reached ();
13155 stind_to_store_membase (int opcode)
13159 return OP_STOREI1_MEMBASE_REG;
13161 return OP_STOREI2_MEMBASE_REG;
13163 return OP_STOREI4_MEMBASE_REG;
13165 case CEE_STIND_REF:
13166 return OP_STORE_MEMBASE_REG;
13168 return OP_STOREI8_MEMBASE_REG;
13170 return OP_STORER4_MEMBASE_REG;
13172 return OP_STORER8_MEMBASE_REG;
13174 g_assert_not_reached ();
13181 mono_load_membase_to_load_mem (int opcode)
13183 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13184 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13186 case OP_LOAD_MEMBASE:
13187 return OP_LOAD_MEM;
13188 case OP_LOADU1_MEMBASE:
13189 return OP_LOADU1_MEM;
13190 case OP_LOADU2_MEMBASE:
13191 return OP_LOADU2_MEM;
13192 case OP_LOADI4_MEMBASE:
13193 return OP_LOADI4_MEM;
13194 case OP_LOADU4_MEMBASE:
13195 return OP_LOADU4_MEM;
13196 #if SIZEOF_REGISTER == 8
13197 case OP_LOADI8_MEMBASE:
13198 return OP_LOADI8_MEM;
13207 op_to_op_dest_membase (int store_opcode, int opcode)
13209 #if defined(TARGET_X86)
13210 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13215 return OP_X86_ADD_MEMBASE_REG;
13217 return OP_X86_SUB_MEMBASE_REG;
13219 return OP_X86_AND_MEMBASE_REG;
13221 return OP_X86_OR_MEMBASE_REG;
13223 return OP_X86_XOR_MEMBASE_REG;
13226 return OP_X86_ADD_MEMBASE_IMM;
13229 return OP_X86_SUB_MEMBASE_IMM;
13232 return OP_X86_AND_MEMBASE_IMM;
13235 return OP_X86_OR_MEMBASE_IMM;
13238 return OP_X86_XOR_MEMBASE_IMM;
13244 #if defined(TARGET_AMD64)
13245 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13250 return OP_X86_ADD_MEMBASE_REG;
13252 return OP_X86_SUB_MEMBASE_REG;
13254 return OP_X86_AND_MEMBASE_REG;
13256 return OP_X86_OR_MEMBASE_REG;
13258 return OP_X86_XOR_MEMBASE_REG;
13260 return OP_X86_ADD_MEMBASE_IMM;
13262 return OP_X86_SUB_MEMBASE_IMM;
13264 return OP_X86_AND_MEMBASE_IMM;
13266 return OP_X86_OR_MEMBASE_IMM;
13268 return OP_X86_XOR_MEMBASE_IMM;
13270 return OP_AMD64_ADD_MEMBASE_REG;
13272 return OP_AMD64_SUB_MEMBASE_REG;
13274 return OP_AMD64_AND_MEMBASE_REG;
13276 return OP_AMD64_OR_MEMBASE_REG;
13278 return OP_AMD64_XOR_MEMBASE_REG;
13281 return OP_AMD64_ADD_MEMBASE_IMM;
13284 return OP_AMD64_SUB_MEMBASE_IMM;
13287 return OP_AMD64_AND_MEMBASE_IMM;
13290 return OP_AMD64_OR_MEMBASE_IMM;
13293 return OP_AMD64_XOR_MEMBASE_IMM;
13303 op_to_op_store_membase (int store_opcode, int opcode)
13305 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13308 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13309 return OP_X86_SETEQ_MEMBASE;
13311 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13312 return OP_X86_SETNE_MEMBASE;
13320 op_to_op_src1_membase (int load_opcode, int opcode)
13323 /* FIXME: This has sign extension issues */
13325 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13326 return OP_X86_COMPARE_MEMBASE8_IMM;
13329 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13334 return OP_X86_PUSH_MEMBASE;
13335 case OP_COMPARE_IMM:
13336 case OP_ICOMPARE_IMM:
13337 return OP_X86_COMPARE_MEMBASE_IMM;
13340 return OP_X86_COMPARE_MEMBASE_REG;
13344 #ifdef TARGET_AMD64
13345 /* FIXME: This has sign extension issues */
13347 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13348 return OP_X86_COMPARE_MEMBASE8_IMM;
13353 #ifdef __mono_ilp32__
13354 if (load_opcode == OP_LOADI8_MEMBASE)
13356 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13358 return OP_X86_PUSH_MEMBASE;
13360 /* FIXME: This only works for 32 bit immediates
13361 case OP_COMPARE_IMM:
13362 case OP_LCOMPARE_IMM:
13363 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13364 return OP_AMD64_COMPARE_MEMBASE_IMM;
13366 case OP_ICOMPARE_IMM:
13367 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13368 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13372 #ifdef __mono_ilp32__
13373 if (load_opcode == OP_LOAD_MEMBASE)
13374 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13375 if (load_opcode == OP_LOADI8_MEMBASE)
13377 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13379 return OP_AMD64_COMPARE_MEMBASE_REG;
13382 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13383 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13392 op_to_op_src2_membase (int load_opcode, int opcode)
13395 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13401 return OP_X86_COMPARE_REG_MEMBASE;
13403 return OP_X86_ADD_REG_MEMBASE;
13405 return OP_X86_SUB_REG_MEMBASE;
13407 return OP_X86_AND_REG_MEMBASE;
13409 return OP_X86_OR_REG_MEMBASE;
13411 return OP_X86_XOR_REG_MEMBASE;
13415 #ifdef TARGET_AMD64
13416 #ifdef __mono_ilp32__
13417 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13419 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13423 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13425 return OP_X86_ADD_REG_MEMBASE;
13427 return OP_X86_SUB_REG_MEMBASE;
13429 return OP_X86_AND_REG_MEMBASE;
13431 return OP_X86_OR_REG_MEMBASE;
13433 return OP_X86_XOR_REG_MEMBASE;
13435 #ifdef __mono_ilp32__
13436 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13438 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13443 return OP_AMD64_COMPARE_REG_MEMBASE;
13445 return OP_AMD64_ADD_REG_MEMBASE;
13447 return OP_AMD64_SUB_REG_MEMBASE;
13449 return OP_AMD64_AND_REG_MEMBASE;
13451 return OP_AMD64_OR_REG_MEMBASE;
13453 return OP_AMD64_XOR_REG_MEMBASE;
13462 mono_op_to_op_imm_noemul (int opcode)
13465 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13471 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13478 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13483 return mono_op_to_op_imm (opcode);
13488 * mono_handle_global_vregs:
13490 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13494 mono_handle_global_vregs (MonoCompile *cfg)
13496 gint32 *vreg_to_bb;
13497 MonoBasicBlock *bb;
13500 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13502 #ifdef MONO_ARCH_SIMD_INTRINSICS
13503 if (cfg->uses_simd_intrinsics)
13504 mono_simd_simplify_indirection (cfg);
13507 /* Find local vregs used in more than one bb */
13508 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13509 MonoInst *ins = bb->code;
13510 int block_num = bb->block_num;
13512 if (cfg->verbose_level > 2)
13513 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13516 for (; ins; ins = ins->next) {
13517 const char *spec = INS_INFO (ins->opcode);
13518 int regtype = 0, regindex;
13521 if (G_UNLIKELY (cfg->verbose_level > 2))
13522 mono_print_ins (ins);
13524 g_assert (ins->opcode >= MONO_CEE_LAST);
13526 for (regindex = 0; regindex < 4; regindex ++) {
13529 if (regindex == 0) {
13530 regtype = spec [MONO_INST_DEST];
13531 if (regtype == ' ')
13534 } else if (regindex == 1) {
13535 regtype = spec [MONO_INST_SRC1];
13536 if (regtype == ' ')
13539 } else if (regindex == 2) {
13540 regtype = spec [MONO_INST_SRC2];
13541 if (regtype == ' ')
13544 } else if (regindex == 3) {
13545 regtype = spec [MONO_INST_SRC3];
13546 if (regtype == ' ')
13551 #if SIZEOF_REGISTER == 4
13552 /* In the LLVM case, the long opcodes are not decomposed */
13553 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13555 * Since some instructions reference the original long vreg,
13556 * and some reference the two component vregs, it is quite hard
13557 * to determine when it needs to be global. So be conservative.
13559 if (!get_vreg_to_inst (cfg, vreg)) {
13560 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13562 if (cfg->verbose_level > 2)
13563 printf ("LONG VREG R%d made global.\n", vreg);
13567 * Make the component vregs volatile since the optimizations can
13568 * get confused otherwise.
13570 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13571 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13575 g_assert (vreg != -1);
13577 prev_bb = vreg_to_bb [vreg];
13578 if (prev_bb == 0) {
13579 /* 0 is a valid block num */
13580 vreg_to_bb [vreg] = block_num + 1;
13581 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13582 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13585 if (!get_vreg_to_inst (cfg, vreg)) {
13586 if (G_UNLIKELY (cfg->verbose_level > 2))
13587 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13591 if (vreg_is_ref (cfg, vreg))
13592 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13594 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13597 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13600 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13603 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13606 g_assert_not_reached ();
13610 /* Flag as having been used in more than one bb */
13611 vreg_to_bb [vreg] = -1;
13617 /* If a variable is used in only one bblock, convert it into a local vreg */
13618 for (i = 0; i < cfg->num_varinfo; i++) {
13619 MonoInst *var = cfg->varinfo [i];
13620 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13622 switch (var->type) {
13628 #if SIZEOF_REGISTER == 8
13631 #if !defined(TARGET_X86)
13632 /* Enabling this screws up the fp stack on x86 */
13635 if (mono_arch_is_soft_float ())
13638 /* Arguments are implicitly global */
13639 /* Putting R4 vars into registers doesn't work currently */
13640 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13641 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13643 * Make that the variable's liveness interval doesn't contain a call, since
13644 * that would cause the lvreg to be spilled, making the whole optimization
13647 /* This is too slow for JIT compilation */
13649 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13651 int def_index, call_index, ins_index;
13652 gboolean spilled = FALSE;
13657 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13658 const char *spec = INS_INFO (ins->opcode);
13660 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13661 def_index = ins_index;
13663 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13664 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13665 if (call_index > def_index) {
13671 if (MONO_IS_CALL (ins))
13672 call_index = ins_index;
13682 if (G_UNLIKELY (cfg->verbose_level > 2))
13683 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13684 var->flags |= MONO_INST_IS_DEAD;
13685 cfg->vreg_to_inst [var->dreg] = NULL;
13692 * Compress the varinfo and vars tables so the liveness computation is faster and
13693 * takes up less space.
13696 for (i = 0; i < cfg->num_varinfo; ++i) {
13697 MonoInst *var = cfg->varinfo [i];
13698 if (pos < i && cfg->locals_start == i)
13699 cfg->locals_start = pos;
13700 if (!(var->flags & MONO_INST_IS_DEAD)) {
13702 cfg->varinfo [pos] = cfg->varinfo [i];
13703 cfg->varinfo [pos]->inst_c0 = pos;
13704 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13705 cfg->vars [pos].idx = pos;
13706 #if SIZEOF_REGISTER == 4
13707 if (cfg->varinfo [pos]->type == STACK_I8) {
13708 /* Modify the two component vars too */
13711 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13712 var1->inst_c0 = pos;
13713 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13714 var1->inst_c0 = pos;
13721 cfg->num_varinfo = pos;
13722 if (cfg->locals_start > cfg->num_varinfo)
13723 cfg->locals_start = cfg->num_varinfo;
13727 * mono_spill_global_vars:
13729 * Generate spill code for variables which are not allocated to registers,
13730 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13731 * code is generated which could be optimized by the local optimization passes.
13734 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13736 MonoBasicBlock *bb;
13738 int orig_next_vreg;
13739 guint32 *vreg_to_lvreg;
13741 guint32 i, lvregs_len;
13742 gboolean dest_has_lvreg = FALSE;
13743 guint32 stacktypes [128];
13744 MonoInst **live_range_start, **live_range_end;
13745 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13746 int *gsharedvt_vreg_to_idx = NULL;
13748 *need_local_opts = FALSE;
13750 memset (spec2, 0, sizeof (spec2));
13752 /* FIXME: Move this function to mini.c */
13753 stacktypes ['i'] = STACK_PTR;
13754 stacktypes ['l'] = STACK_I8;
13755 stacktypes ['f'] = STACK_R8;
13756 #ifdef MONO_ARCH_SIMD_INTRINSICS
13757 stacktypes ['x'] = STACK_VTYPE;
13760 #if SIZEOF_REGISTER == 4
13761 /* Create MonoInsts for longs */
13762 for (i = 0; i < cfg->num_varinfo; i++) {
13763 MonoInst *ins = cfg->varinfo [i];
13765 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13766 switch (ins->type) {
13771 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13774 g_assert (ins->opcode == OP_REGOFFSET);
13776 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13778 tree->opcode = OP_REGOFFSET;
13779 tree->inst_basereg = ins->inst_basereg;
13780 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13782 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13784 tree->opcode = OP_REGOFFSET;
13785 tree->inst_basereg = ins->inst_basereg;
13786 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13796 if (cfg->compute_gc_maps) {
13797 /* registers need liveness info even for !non refs */
13798 for (i = 0; i < cfg->num_varinfo; i++) {
13799 MonoInst *ins = cfg->varinfo [i];
13801 if (ins->opcode == OP_REGVAR)
13802 ins->flags |= MONO_INST_GC_TRACK;
13806 if (cfg->gsharedvt) {
13807 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13809 for (i = 0; i < cfg->num_varinfo; ++i) {
13810 MonoInst *ins = cfg->varinfo [i];
13813 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13814 if (i >= cfg->locals_start) {
13816 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13817 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13818 ins->opcode = OP_GSHAREDVT_LOCAL;
13819 ins->inst_imm = idx;
13822 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13823 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13829 /* FIXME: widening and truncation */
13832 * As an optimization, when a variable allocated to the stack is first loaded into
13833 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13834 * the variable again.
13836 orig_next_vreg = cfg->next_vreg;
13837 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13838 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13842 * These arrays contain the first and last instructions accessing a given
13844 * Since we emit bblocks in the same order we process them here, and we
13845 * don't split live ranges, these will precisely describe the live range of
13846 * the variable, i.e. the instruction range where a valid value can be found
13847 * in the variables location.
13848 * The live range is computed using the liveness info computed by the liveness pass.
13849 * We can't use vmv->range, since that is an abstract live range, and we need
13850 * one which is instruction precise.
13851 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13853 /* FIXME: Only do this if debugging info is requested */
13854 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13855 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13856 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13857 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13859 /* Add spill loads/stores */
13860 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13863 if (cfg->verbose_level > 2)
13864 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13866 /* Clear vreg_to_lvreg array */
13867 for (i = 0; i < lvregs_len; i++)
13868 vreg_to_lvreg [lvregs [i]] = 0;
13872 MONO_BB_FOR_EACH_INS (bb, ins) {
13873 const char *spec = INS_INFO (ins->opcode);
13874 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13875 gboolean store, no_lvreg;
13876 int sregs [MONO_MAX_SRC_REGS];
13878 if (G_UNLIKELY (cfg->verbose_level > 2))
13879 mono_print_ins (ins);
13881 if (ins->opcode == OP_NOP)
13885 * We handle LDADDR here as well, since it can only be decomposed
13886 * when variable addresses are known.
13888 if (ins->opcode == OP_LDADDR) {
13889 MonoInst *var = ins->inst_p0;
13891 if (var->opcode == OP_VTARG_ADDR) {
13892 /* Happens on SPARC/S390 where vtypes are passed by reference */
13893 MonoInst *vtaddr = var->inst_left;
13894 if (vtaddr->opcode == OP_REGVAR) {
13895 ins->opcode = OP_MOVE;
13896 ins->sreg1 = vtaddr->dreg;
13898 else if (var->inst_left->opcode == OP_REGOFFSET) {
13899 ins->opcode = OP_LOAD_MEMBASE;
13900 ins->inst_basereg = vtaddr->inst_basereg;
13901 ins->inst_offset = vtaddr->inst_offset;
13904 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13905 /* gsharedvt arg passed by ref */
13906 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13908 ins->opcode = OP_LOAD_MEMBASE;
13909 ins->inst_basereg = var->inst_basereg;
13910 ins->inst_offset = var->inst_offset;
13911 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13912 MonoInst *load, *load2, *load3;
13913 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13914 int reg1, reg2, reg3;
13915 MonoInst *info_var = cfg->gsharedvt_info_var;
13916 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13920 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13923 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13925 g_assert (info_var);
13926 g_assert (locals_var);
13928 /* Mark the instruction used to compute the locals var as used */
13929 cfg->gsharedvt_locals_var_ins = NULL;
13931 /* Load the offset */
13932 if (info_var->opcode == OP_REGOFFSET) {
13933 reg1 = alloc_ireg (cfg);
13934 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13935 } else if (info_var->opcode == OP_REGVAR) {
13937 reg1 = info_var->dreg;
13939 g_assert_not_reached ();
13941 reg2 = alloc_ireg (cfg);
13942 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13943 /* Load the locals area address */
13944 reg3 = alloc_ireg (cfg);
13945 if (locals_var->opcode == OP_REGOFFSET) {
13946 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13947 } else if (locals_var->opcode == OP_REGVAR) {
13948 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13950 g_assert_not_reached ();
13952 /* Compute the address */
13953 ins->opcode = OP_PADD;
13957 mono_bblock_insert_before_ins (bb, ins, load3);
13958 mono_bblock_insert_before_ins (bb, load3, load2);
13960 mono_bblock_insert_before_ins (bb, load2, load);
13962 g_assert (var->opcode == OP_REGOFFSET);
13964 ins->opcode = OP_ADD_IMM;
13965 ins->sreg1 = var->inst_basereg;
13966 ins->inst_imm = var->inst_offset;
13969 *need_local_opts = TRUE;
13970 spec = INS_INFO (ins->opcode);
13973 if (ins->opcode < MONO_CEE_LAST) {
13974 mono_print_ins (ins);
13975 g_assert_not_reached ();
13979 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13983 if (MONO_IS_STORE_MEMBASE (ins)) {
13984 tmp_reg = ins->dreg;
13985 ins->dreg = ins->sreg2;
13986 ins->sreg2 = tmp_reg;
13989 spec2 [MONO_INST_DEST] = ' ';
13990 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13991 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13992 spec2 [MONO_INST_SRC3] = ' ';
13994 } else if (MONO_IS_STORE_MEMINDEX (ins))
13995 g_assert_not_reached ();
14000 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14001 printf ("\t %.3s %d", spec, ins->dreg);
14002 num_sregs = mono_inst_get_src_registers (ins, sregs);
14003 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14004 printf (" %d", sregs [srcindex]);
14011 regtype = spec [MONO_INST_DEST];
14012 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14015 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14016 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14017 MonoInst *store_ins;
14019 MonoInst *def_ins = ins;
14020 int dreg = ins->dreg; /* The original vreg */
14022 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14024 if (var->opcode == OP_REGVAR) {
14025 ins->dreg = var->dreg;
14026 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14028 * Instead of emitting a load+store, use a _membase opcode.
14030 g_assert (var->opcode == OP_REGOFFSET);
14031 if (ins->opcode == OP_MOVE) {
14035 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14036 ins->inst_basereg = var->inst_basereg;
14037 ins->inst_offset = var->inst_offset;
14040 spec = INS_INFO (ins->opcode);
14044 g_assert (var->opcode == OP_REGOFFSET);
14046 prev_dreg = ins->dreg;
14048 /* Invalidate any previous lvreg for this vreg */
14049 vreg_to_lvreg [ins->dreg] = 0;
14053 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14055 store_opcode = OP_STOREI8_MEMBASE_REG;
14058 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14060 #if SIZEOF_REGISTER != 8
14061 if (regtype == 'l') {
14062 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14063 mono_bblock_insert_after_ins (bb, ins, store_ins);
14064 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14065 mono_bblock_insert_after_ins (bb, ins, store_ins);
14066 def_ins = store_ins;
14071 g_assert (store_opcode != OP_STOREV_MEMBASE);
14073 /* Try to fuse the store into the instruction itself */
14074 /* FIXME: Add more instructions */
14075 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14076 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14077 ins->inst_imm = ins->inst_c0;
14078 ins->inst_destbasereg = var->inst_basereg;
14079 ins->inst_offset = var->inst_offset;
14080 spec = INS_INFO (ins->opcode);
14081 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14082 ins->opcode = store_opcode;
14083 ins->inst_destbasereg = var->inst_basereg;
14084 ins->inst_offset = var->inst_offset;
14088 tmp_reg = ins->dreg;
14089 ins->dreg = ins->sreg2;
14090 ins->sreg2 = tmp_reg;
14093 spec2 [MONO_INST_DEST] = ' ';
14094 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14095 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14096 spec2 [MONO_INST_SRC3] = ' ';
14098 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14099 // FIXME: The backends expect the base reg to be in inst_basereg
14100 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14102 ins->inst_basereg = var->inst_basereg;
14103 ins->inst_offset = var->inst_offset;
14104 spec = INS_INFO (ins->opcode);
14106 /* printf ("INS: "); mono_print_ins (ins); */
14107 /* Create a store instruction */
14108 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14110 /* Insert it after the instruction */
14111 mono_bblock_insert_after_ins (bb, ins, store_ins);
14113 def_ins = store_ins;
14116 * We can't assign ins->dreg to var->dreg here, since the
14117 * sregs could use it. So set a flag, and do it after
14120 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14121 dest_has_lvreg = TRUE;
14126 if (def_ins && !live_range_start [dreg]) {
14127 live_range_start [dreg] = def_ins;
14128 live_range_start_bb [dreg] = bb;
14131 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14134 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14135 tmp->inst_c1 = dreg;
14136 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14143 num_sregs = mono_inst_get_src_registers (ins, sregs);
14144 for (srcindex = 0; srcindex < 3; ++srcindex) {
14145 regtype = spec [MONO_INST_SRC1 + srcindex];
14146 sreg = sregs [srcindex];
14148 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14149 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14150 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14151 MonoInst *use_ins = ins;
14152 MonoInst *load_ins;
14153 guint32 load_opcode;
14155 if (var->opcode == OP_REGVAR) {
14156 sregs [srcindex] = var->dreg;
14157 //mono_inst_set_src_registers (ins, sregs);
14158 live_range_end [sreg] = use_ins;
14159 live_range_end_bb [sreg] = bb;
14161 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14164 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14165 /* var->dreg is a hreg */
14166 tmp->inst_c1 = sreg;
14167 mono_bblock_insert_after_ins (bb, ins, tmp);
14173 g_assert (var->opcode == OP_REGOFFSET);
14175 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14177 g_assert (load_opcode != OP_LOADV_MEMBASE);
14179 if (vreg_to_lvreg [sreg]) {
14180 g_assert (vreg_to_lvreg [sreg] != -1);
14182 /* The variable is already loaded to an lvreg */
14183 if (G_UNLIKELY (cfg->verbose_level > 2))
14184 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14185 sregs [srcindex] = vreg_to_lvreg [sreg];
14186 //mono_inst_set_src_registers (ins, sregs);
14190 /* Try to fuse the load into the instruction */
14191 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14192 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14193 sregs [0] = var->inst_basereg;
14194 //mono_inst_set_src_registers (ins, sregs);
14195 ins->inst_offset = var->inst_offset;
14196 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14197 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14198 sregs [1] = var->inst_basereg;
14199 //mono_inst_set_src_registers (ins, sregs);
14200 ins->inst_offset = var->inst_offset;
14202 if (MONO_IS_REAL_MOVE (ins)) {
14203 ins->opcode = OP_NOP;
14206 //printf ("%d ", srcindex); mono_print_ins (ins);
14208 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14210 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14211 if (var->dreg == prev_dreg) {
14213 * sreg refers to the value loaded by the load
14214 * emitted below, but we need to use ins->dreg
14215 * since it refers to the store emitted earlier.
14219 g_assert (sreg != -1);
14220 vreg_to_lvreg [var->dreg] = sreg;
14221 g_assert (lvregs_len < 1024);
14222 lvregs [lvregs_len ++] = var->dreg;
14226 sregs [srcindex] = sreg;
14227 //mono_inst_set_src_registers (ins, sregs);
14229 #if SIZEOF_REGISTER != 8
14230 if (regtype == 'l') {
14231 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14232 mono_bblock_insert_before_ins (bb, ins, load_ins);
14233 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14234 mono_bblock_insert_before_ins (bb, ins, load_ins);
14235 use_ins = load_ins;
14240 #if SIZEOF_REGISTER == 4
14241 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14243 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14244 mono_bblock_insert_before_ins (bb, ins, load_ins);
14245 use_ins = load_ins;
14249 if (var->dreg < orig_next_vreg) {
14250 live_range_end [var->dreg] = use_ins;
14251 live_range_end_bb [var->dreg] = bb;
14254 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14257 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14258 tmp->inst_c1 = var->dreg;
14259 mono_bblock_insert_after_ins (bb, ins, tmp);
14263 mono_inst_set_src_registers (ins, sregs);
14265 if (dest_has_lvreg) {
14266 g_assert (ins->dreg != -1);
14267 vreg_to_lvreg [prev_dreg] = ins->dreg;
14268 g_assert (lvregs_len < 1024);
14269 lvregs [lvregs_len ++] = prev_dreg;
14270 dest_has_lvreg = FALSE;
14274 tmp_reg = ins->dreg;
14275 ins->dreg = ins->sreg2;
14276 ins->sreg2 = tmp_reg;
14279 if (MONO_IS_CALL (ins)) {
14280 /* Clear vreg_to_lvreg array */
14281 for (i = 0; i < lvregs_len; i++)
14282 vreg_to_lvreg [lvregs [i]] = 0;
14284 } else if (ins->opcode == OP_NOP) {
14286 MONO_INST_NULLIFY_SREGS (ins);
14289 if (cfg->verbose_level > 2)
14290 mono_print_ins_index (1, ins);
14293 /* Extend the live range based on the liveness info */
14294 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14295 for (i = 0; i < cfg->num_varinfo; i ++) {
14296 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14298 if (vreg_is_volatile (cfg, vi->vreg))
14299 /* The liveness info is incomplete */
14302 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14303 /* Live from at least the first ins of this bb */
14304 live_range_start [vi->vreg] = bb->code;
14305 live_range_start_bb [vi->vreg] = bb;
14308 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14309 /* Live at least until the last ins of this bb */
14310 live_range_end [vi->vreg] = bb->last_ins;
14311 live_range_end_bb [vi->vreg] = bb;
14317 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14319 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14320 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14322 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14323 for (i = 0; i < cfg->num_varinfo; ++i) {
14324 int vreg = MONO_VARINFO (cfg, i)->vreg;
14327 if (live_range_start [vreg]) {
14328 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14330 ins->inst_c1 = vreg;
14331 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14333 if (live_range_end [vreg]) {
14334 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14336 ins->inst_c1 = vreg;
14337 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14338 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14340 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14346 if (cfg->gsharedvt_locals_var_ins) {
14347 /* Nullify if unused */
14348 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14349 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14352 g_free (live_range_start);
14353 g_free (live_range_end);
14354 g_free (live_range_start_bb);
14355 g_free (live_range_end_bb);
14360 * - use 'iadd' instead of 'int_add'
14361 * - handling ovf opcodes: decompose in method_to_ir.
14362 * - unify iregs/fregs
14363 * -> partly done, the missing parts are:
14364 * - a more complete unification would involve unifying the hregs as well, so
14365 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14366 * would no longer map to the machine hregs, so the code generators would need to
14367 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14368 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14369 * fp/non-fp branches speeds it up by about 15%.
14370 * - use sext/zext opcodes instead of shifts
14372 * - get rid of TEMPLOADs if possible and use vregs instead
14373 * - clean up usage of OP_P/OP_ opcodes
14374 * - cleanup usage of DUMMY_USE
14375 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14377 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14378 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14379 * - make sure handle_stack_args () is called before the branch is emitted
14380 * - when the new IR is done, get rid of all unused stuff
14381 * - COMPARE/BEQ as separate instructions or unify them ?
14382 * - keeping them separate allows specialized compare instructions like
14383 * compare_imm, compare_membase
14384 * - most back ends unify fp compare+branch, fp compare+ceq
14385 * - integrate mono_save_args into inline_method
14386 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14387 * - handle long shift opts on 32 bit platforms somehow: they require
14388 * 3 sregs (2 for arg1 and 1 for arg2)
14389 * - make byref a 'normal' type.
14390 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14391 * variable if needed.
14392 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14393 * like inline_method.
14394 * - remove inlining restrictions
14395 * - fix LNEG and enable cfold of INEG
14396 * - generalize x86 optimizations like ldelema as a peephole optimization
14397 * - add store_mem_imm for amd64
14398 * - optimize the loading of the interruption flag in the managed->native wrappers
14399 * - avoid special handling of OP_NOP in passes
14400 * - move code inserting instructions into one function/macro.
14401 * - try a coalescing phase after liveness analysis
14402 * - add float -> vreg conversion + local optimizations on !x86
14403 * - figure out how to handle decomposed branches during optimizations, ie.
14404 * compare+branch, op_jump_table+op_br etc.
14405 * - promote RuntimeXHandles to vregs
14406 * - vtype cleanups:
14407 * - add a NEW_VARLOADA_VREG macro
14408 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14409 * accessing vtype fields.
14410 * - get rid of I8CONST on 64 bit platforms
14411 * - dealing with the increase in code size due to branches created during opcode
14413 * - use extended basic blocks
14414 * - all parts of the JIT
14415 * - handle_global_vregs () && local regalloc
14416 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14417 * - sources of increase in code size:
14420 * - isinst and castclass
14421 * - lvregs not allocated to global registers even if used multiple times
14422 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14424 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14425 * - add all micro optimizations from the old JIT
14426 * - put tree optimizations into the deadce pass
14427 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14428 * specific function.
14429 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14430 * fcompare + branchCC.
14431 * - create a helper function for allocating a stack slot, taking into account
14432 * MONO_CFG_HAS_SPILLUP.
14434 * - merge the ia64 switch changes.
14435 * - optimize mono_regstate2_alloc_int/float.
14436 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14437 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14438 * parts of the tree could be separated by other instructions, killing the tree
14439 * arguments, or stores killing loads etc. Also, should we fold loads into other
14440 * instructions if the result of the load is used multiple times ?
14441 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14442 * - LAST MERGE: 108395.
14443 * - when returning vtypes in registers, generate IR and append it to the end of the
14444 * last bb instead of doing it in the epilog.
14445 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14453 - When to decompose opcodes:
14454 - earlier: this makes some optimizations hard to implement, since the low level IR
14455 no longer contains the neccessary information. But it is easier to do.
14456 - later: harder to implement, enables more optimizations.
14457 - Branches inside bblocks:
14458 - created when decomposing complex opcodes.
14459 - branches to another bblock: harmless, but not tracked by the branch
14460 optimizations, so need to branch to a label at the start of the bblock.
14461 - branches to inside the same bblock: very problematic, trips up the local
14462 reg allocator. Can be fixed by spitting the current bblock, but that is a
14463 complex operation, since some local vregs can become global vregs etc.
14464 - Local/global vregs:
14465 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14466 local register allocator.
14467 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14468 structure, created by mono_create_var (). Assigned to hregs or the stack by
14469 the global register allocator.
14470 - When to do optimizations like alu->alu_imm:
14471 - earlier -> saves work later on since the IR will be smaller/simpler
14472 - later -> can work on more instructions
14473 - Handling of valuetypes:
14474 - When a vtype is pushed on the stack, a new temporary is created, an
14475 instruction computing its address (LDADDR) is emitted and pushed on
14476 the stack. Need to optimize cases when the vtype is used immediately as in
14477 argument passing, stloc etc.
14478 - Instead of the to_end stuff in the old JIT, simply call the function handling
14479 the values on the stack before emitting the last instruction of the bb.
14482 #endif /* DISABLE_JIT */