2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
155 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
158 * Instruction metadata
166 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
167 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
173 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
178 /* keep in sync with the enum in mini.h */
181 #include "mini-ops.h"
186 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
187 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
189 * This should contain the index of the last sreg + 1. This is not the same
190 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
192 const gint8 ins_sreg_counts[] = {
193 #include "mini-ops.h"
198 #define MONO_INIT_VARINFO(vi,id) do { \
199 (vi)->range.first_use.pos.bid = 0xffff; \
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (cfg, type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 type = &type->data.generic_class->container_class->byval_arg;
327 g_assert (cfg->generic_sharing_context);
328 if (mini_type_var_is_vt (cfg, type))
331 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
333 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
339 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 printf ("\n%s %d: [IN: ", msg, bb->block_num);
345 for (i = 0; i < bb->in_count; ++i)
346 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
348 for (i = 0; i < bb->out_count; ++i)
349 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
351 for (tree = bb->code; tree; tree = tree->next)
352 mono_print_ins_index (-1, tree);
356 mono_create_helper_signatures (void)
358 helper_sig_domain_get = mono_create_icall_signature ("ptr");
359 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
360 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
361 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
362 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
363 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
366 static MONO_NEVER_INLINE void
367 break_on_unverified (void)
369 if (mini_get_debug_options ()->break_on_unverified)
373 static MONO_NEVER_INLINE void
374 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
376 char *method_fname = mono_method_full_name (method, TRUE);
377 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
379 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
380 g_free (method_fname);
381 g_free (cil_method_fname);
384 static MONO_NEVER_INLINE void
385 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
387 char *method_fname = mono_method_full_name (method, TRUE);
388 char *field_fname = mono_field_full_name (field);
389 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
390 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
391 g_free (method_fname);
392 g_free (field_fname);
395 static MONO_NEVER_INLINE void
396 inline_failure (MonoCompile *cfg, const char *msg)
398 if (cfg->verbose_level >= 2)
399 printf ("inline failed: %s\n", msg);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
403 static MONO_NEVER_INLINE void
404 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 if (cfg->verbose_level > 2) \
407 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
411 static MONO_NEVER_INLINE void
412 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
414 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
415 if (cfg->verbose_level >= 2)
416 printf ("%s\n", cfg->exception_message);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
421 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
422 * foo<T> (int i) { ldarg.0; box T; }
424 #define UNVERIFIED do { \
425 if (cfg->gsharedvt) { \
426 if (cfg->verbose_level > 2) \
427 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
429 goto exception_exit; \
431 break_on_unverified (); \
435 #define GET_BBLOCK(cfg,tblock,ip) do { \
436 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
438 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
439 NEW_BBLOCK (cfg, (tblock)); \
440 (tblock)->cil_code = (ip); \
441 ADD_BBLOCK (cfg, (tblock)); \
445 #if defined(TARGET_X86) || defined(TARGET_AMD64)
446 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
447 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
448 (dest)->dreg = alloc_ireg_mp ((cfg)); \
449 (dest)->sreg1 = (sr1); \
450 (dest)->sreg2 = (sr2); \
451 (dest)->inst_imm = (imm); \
452 (dest)->backend.shift_amount = (shift); \
453 MONO_ADD_INS ((cfg)->cbb, (dest)); \
457 /* Emit conversions so both operands of a binary opcode are of the same type */
459 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
461 MonoInst *arg1 = *arg1_ref;
462 MonoInst *arg2 = *arg2_ref;
465 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
466 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
469 /* Mixing r4/r8 is allowed by the spec */
470 if (arg1->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
474 conv->type = STACK_R8;
478 if (arg2->type == STACK_R4) {
479 int dreg = alloc_freg (cfg);
481 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
482 conv->type = STACK_R8;
488 #if SIZEOF_REGISTER == 8
489 /* FIXME: Need to add many more cases */
490 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
493 int dr = alloc_preg (cfg);
494 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
495 (ins)->sreg2 = widen->dreg;
500 #define ADD_BINOP(op) do { \
501 MONO_INST_NEW (cfg, ins, (op)); \
503 ins->sreg1 = sp [0]->dreg; \
504 ins->sreg2 = sp [1]->dreg; \
505 type_from_op (cfg, ins, sp [0], sp [1]); \
507 /* Have to insert a widening op */ \
508 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
509 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
510 MONO_ADD_INS ((cfg)->cbb, (ins)); \
511 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
514 #define ADD_UNOP(op) do { \
515 MONO_INST_NEW (cfg, ins, (op)); \
517 ins->sreg1 = sp [0]->dreg; \
518 type_from_op (cfg, ins, sp [0], NULL); \
520 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
521 MONO_ADD_INS ((cfg)->cbb, (ins)); \
522 *sp++ = mono_decompose_opcode (cfg, ins); \
525 #define ADD_BINCOND(next_block) do { \
528 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
529 cmp->sreg1 = sp [0]->dreg; \
530 cmp->sreg2 = sp [1]->dreg; \
531 type_from_op (cfg, cmp, sp [0], sp [1]); \
533 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
536 GET_BBLOCK (cfg, tblock, target); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_true_bb = tblock; \
539 if ((next_block)) { \
540 link_bblock (cfg, cfg->cbb, (next_block)); \
541 ins->inst_false_bb = (next_block); \
542 start_new_bblock = 1; \
544 GET_BBLOCK (cfg, tblock, ip); \
545 link_bblock (cfg, cfg->cbb, tblock); \
546 ins->inst_false_bb = tblock; \
547 start_new_bblock = 2; \
549 if (sp != stack_start) { \
550 handle_stack_args (cfg, stack_start, sp - stack_start); \
551 CHECK_UNVERIFIABLE (cfg); \
553 MONO_ADD_INS (cfg->cbb, cmp); \
554 MONO_ADD_INS (cfg->cbb, ins); \
558 * link_bblock: Links two basic blocks
560 * links two basic blocks in the control flow graph, the 'from'
561 * argument is the starting block and the 'to' argument is the block
562 * the control flow ends to after 'from'.
565 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
567 MonoBasicBlock **newa;
571 if (from->cil_code) {
573 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
575 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
578 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
580 printf ("edge from entry to exit\n");
585 for (i = 0; i < from->out_count; ++i) {
586 if (to == from->out_bb [i]) {
592 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
593 for (i = 0; i < from->out_count; ++i) {
594 newa [i] = from->out_bb [i];
602 for (i = 0; i < to->in_count; ++i) {
603 if (from == to->in_bb [i]) {
609 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
610 for (i = 0; i < to->in_count; ++i) {
611 newa [i] = to->in_bb [i];
620 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
622 link_bblock (cfg, from, to);
626 * mono_find_block_region:
628 * We mark each basic block with a region ID. We use that to avoid BB
629 * optimizations when blocks are in different regions.
632 * A region token that encodes where this region is, and information
633 * about the clause owner for this block.
635 * The region encodes the try/catch/filter clause that owns this block
636 * as well as the type. -1 is a special value that represents a block
637 * that is in none of try/catch/filter.
640 mono_find_block_region (MonoCompile *cfg, int offset)
642 MonoMethodHeader *header = cfg->header;
643 MonoExceptionClause *clause;
646 for (i = 0; i < header->num_clauses; ++i) {
647 clause = &header->clauses [i];
648 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
649 (offset < (clause->handler_offset)))
650 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
652 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
653 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
654 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
655 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
656 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
658 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
661 for (i = 0; i < header->num_clauses; ++i) {
662 clause = &header->clauses [i];
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (cfg, type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->generic_sharing_context);
801 if (mini_is_gsharedvt_type (cfg, type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1167 switch (args->type) {
1177 for (i = 0; i < sig->param_count; ++i) {
1178 switch (args [i].type) {
1182 if (!sig->params [i]->byref)
1186 if (sig->params [i]->byref)
1188 switch (sig->params [i]->type) {
1189 case MONO_TYPE_CLASS:
1190 case MONO_TYPE_STRING:
1191 case MONO_TYPE_OBJECT:
1192 case MONO_TYPE_SZARRAY:
1193 case MONO_TYPE_ARRAY:
1200 if (sig->params [i]->byref)
1202 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1211 /*if (!param_table [args [i].type] [sig->params [i]->type])
1219 * When we need a pointer to the current domain many times in a method, we
1220 * call mono_domain_get() once and we store the result in a local variable.
1221 * This function returns the variable that represents the MonoDomain*.
1223 inline static MonoInst *
1224 mono_get_domainvar (MonoCompile *cfg)
1226 if (!cfg->domainvar)
1227 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1228 return cfg->domainvar;
1232 * The got_var contains the address of the Global Offset Table when AOT
1236 mono_get_got_var (MonoCompile *cfg)
1238 #ifdef MONO_ARCH_NEED_GOT_VAR
1239 if (!cfg->compile_aot)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1251 mono_get_vtable_var (MonoCompile *cfg)
1253 g_assert (cfg->generic_sharing_context);
1255 if (!cfg->rgctx_var) {
1256 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 /* force the var to be stack allocated */
1258 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1261 return cfg->rgctx_var;
1265 type_from_stack_type (MonoInst *ins) {
1266 switch (ins->type) {
1267 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1268 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1269 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1270 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1271 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1273 return &ins->klass->this_arg;
1274 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1275 case STACK_VTYPE: return &ins->klass->byval_arg;
1277 g_error ("stack type %d to monotype not handled\n", ins->type);
1282 static G_GNUC_UNUSED int
1283 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1285 t = mono_type_get_underlying_type (t);
1297 case MONO_TYPE_FNPTR:
1299 case MONO_TYPE_CLASS:
1300 case MONO_TYPE_STRING:
1301 case MONO_TYPE_OBJECT:
1302 case MONO_TYPE_SZARRAY:
1303 case MONO_TYPE_ARRAY:
1309 return cfg->r4_stack_type;
1312 case MONO_TYPE_VALUETYPE:
1313 case MONO_TYPE_TYPEDBYREF:
1315 case MONO_TYPE_GENERICINST:
1316 if (mono_type_generic_inst_is_valuetype (t))
1322 g_assert_not_reached ();
1329 array_access_to_klass (int opcode)
1333 return mono_defaults.byte_class;
1335 return mono_defaults.uint16_class;
1338 return mono_defaults.int_class;
1341 return mono_defaults.sbyte_class;
1344 return mono_defaults.int16_class;
1347 return mono_defaults.int32_class;
1349 return mono_defaults.uint32_class;
1352 return mono_defaults.int64_class;
1355 return mono_defaults.single_class;
1358 return mono_defaults.double_class;
1359 case CEE_LDELEM_REF:
1360 case CEE_STELEM_REF:
1361 return mono_defaults.object_class;
1363 g_assert_not_reached ();
1369 * We try to share variables when possible
1372 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1377 /* inlining can result in deeper stacks */
1378 if (slot >= cfg->header->max_stack)
1379 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 pos = ins->type - 1 + slot * STACK_MAX;
1383 switch (ins->type) {
1390 if ((vnum = cfg->intvars [pos]))
1391 return cfg->varinfo [vnum];
1392 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 cfg->intvars [pos] = res->inst_c0;
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1402 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1405 * Don't use this if a generic_context is set, since that means AOT can't
1406 * look up the method using just the image+token.
1407 * table == 0 means this is a reference made from a wrapper.
1409 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1410 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1411 jump_info_token->image = image;
1412 jump_info_token->token = token;
1413 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1418 * This function is called to handle items that are left on the evaluation stack
1419 * at basic block boundaries. What happens is that we save the values to local variables
1420 * and we reload them later when first entering the target basic block (with the
1421 * handle_loaded_temps () function).
1422 * A single joint point will use the same variables (stored in the array bb->out_stack or
1423 * bb->in_stack, if the basic block is before or after the joint point).
1425 * This function needs to be called _before_ emitting the last instruction of
1426 * the bb (i.e. before emitting a branch).
1427 * If the stack merge fails at a join point, cfg->unverifiable is set.
1430 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1433 MonoBasicBlock *bb = cfg->cbb;
1434 MonoBasicBlock *outb;
1435 MonoInst *inst, **locals;
1440 if (cfg->verbose_level > 3)
1441 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1442 if (!bb->out_scount) {
1443 bb->out_scount = count;
1444 //printf ("bblock %d has out:", bb->block_num);
1446 for (i = 0; i < bb->out_count; ++i) {
1447 outb = bb->out_bb [i];
1448 /* exception handlers are linked, but they should not be considered for stack args */
1449 if (outb->flags & BB_EXCEPTION_HANDLER)
1451 //printf (" %d", outb->block_num);
1452 if (outb->in_stack) {
1454 bb->out_stack = outb->in_stack;
1460 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1461 for (i = 0; i < count; ++i) {
1463 * try to reuse temps already allocated for this purpouse, if they occupy the same
1464 * stack slot and if they are of the same type.
1465 * This won't cause conflicts since if 'local' is used to
1466 * store one of the values in the in_stack of a bblock, then
1467 * the same variable will be used for the same outgoing stack
1469 * This doesn't work when inlining methods, since the bblocks
1470 * in the inlined methods do not inherit their in_stack from
1471 * the bblock they are inlined to. See bug #58863 for an
1474 if (cfg->inlined_method)
1475 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1477 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1482 for (i = 0; i < bb->out_count; ++i) {
1483 outb = bb->out_bb [i];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER)
1487 if (outb->in_scount) {
1488 if (outb->in_scount != bb->out_scount) {
1489 cfg->unverifiable = TRUE;
1492 continue; /* check they are the same locals */
1494 outb->in_scount = count;
1495 outb->in_stack = bb->out_stack;
1498 locals = bb->out_stack;
1500 for (i = 0; i < count; ++i) {
1501 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1502 inst->cil_code = sp [i]->cil_code;
1503 sp [i] = locals [i];
1504 if (cfg->verbose_level > 3)
1505 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1509 * It is possible that the out bblocks already have in_stack assigned, and
1510 * the in_stacks differ. In this case, we will store to all the different
1517 /* Find a bblock which has a different in_stack */
1519 while (bindex < bb->out_count) {
1520 outb = bb->out_bb [bindex];
1521 /* exception handlers are linked, but they should not be considered for stack args */
1522 if (outb->flags & BB_EXCEPTION_HANDLER) {
1526 if (outb->in_stack != locals) {
1527 for (i = 0; i < count; ++i) {
1528 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1529 inst->cil_code = sp [i]->cil_code;
1530 sp [i] = locals [i];
1531 if (cfg->verbose_level > 3)
1532 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1534 locals = outb->in_stack;
1544 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1546 int ibitmap_reg = alloc_preg (cfg);
1547 #ifdef COMPRESSED_INTERFACE_BITMAP
1549 MonoInst *res, *ins;
1550 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1551 MONO_ADD_INS (cfg->cbb, ins);
1553 if (cfg->compile_aot)
1554 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1556 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1557 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1558 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1560 int ibitmap_byte_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1564 if (cfg->compile_aot) {
1565 int iid_reg = alloc_preg (cfg);
1566 int shifted_iid_reg = alloc_preg (cfg);
1567 int ibitmap_byte_address_reg = alloc_preg (cfg);
1568 int masked_iid_reg = alloc_preg (cfg);
1569 int iid_one_bit_reg = alloc_preg (cfg);
1570 int iid_bit_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1573 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1576 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1577 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1587 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1588 * stored in "klass_reg" implements the interface "klass".
1591 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1593 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1597 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1598 * stored in "vtable_reg" implements the interface "klass".
1601 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1603 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1607 * Emit code which checks whenever the interface id of @klass is smaller than
1608 * than the value given by max_iid_reg.
1611 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 if (cfg->compile_aot) {
1615 int iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1617 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 /* Same as above, but obtains max_iid from a vtable */
1629 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1630 MonoBasicBlock *false_target)
1632 int max_iid_reg = alloc_preg (cfg);
1634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1635 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1638 /* Same as above, but obtains max_iid from a klass */
1640 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1641 MonoBasicBlock *false_target)
1643 int max_iid_reg = alloc_preg (cfg);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1646 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1650 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1652 int idepth_reg = alloc_preg (cfg);
1653 int stypes_reg = alloc_preg (cfg);
1654 int stype = alloc_preg (cfg);
1656 mono_class_setup_supertypes (klass);
1658 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1659 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1666 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1667 } else if (cfg->compile_aot) {
1668 int const_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1670 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1678 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1680 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1684 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1686 int intf_reg = alloc_preg (cfg);
1688 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1689 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1694 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1698 * Variant of the above that takes a register to the class, not the vtable.
1701 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1703 int intf_bit_reg = alloc_preg (cfg);
1705 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1706 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1715 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1719 } else if (cfg->compile_aot) {
1720 int const_reg = alloc_preg (cfg);
1721 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1730 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1732 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1736 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1738 if (cfg->compile_aot) {
1739 int const_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1749 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1752 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1755 int rank_reg = alloc_preg (cfg);
1756 int eclass_reg = alloc_preg (cfg);
1758 g_assert (!klass_inst);
1759 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1761 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1762 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1764 if (klass->cast_class == mono_defaults.object_class) {
1765 int parent_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1767 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1768 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1769 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1770 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1771 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1772 } else if (klass->cast_class == mono_defaults.enum_class) {
1773 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1774 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1775 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1777 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1778 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1781 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1782 /* Check that the object is a vector too */
1783 int bounds_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1785 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1786 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1789 int idepth_reg = alloc_preg (cfg);
1790 int stypes_reg = alloc_preg (cfg);
1791 int stype = alloc_preg (cfg);
1793 mono_class_setup_supertypes (klass);
1795 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1796 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1798 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1802 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1807 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1809 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1813 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1817 g_assert (val == 0);
1822 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1825 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1828 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1833 #if SIZEOF_REGISTER == 8
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1841 val_reg = alloc_preg (cfg);
1843 if (SIZEOF_REGISTER == 8)
1844 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1846 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1849 /* This could be optimized further if neccesary */
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1858 #if !NO_UNALIGNED_ACCESS
1859 if (SIZEOF_REGISTER == 8) {
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1874 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1891 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1898 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1899 g_assert (size < 10000);
1902 /* This could be optimized further if neccesary */
1904 cur_reg = alloc_preg (cfg);
1905 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1913 #if !NO_UNALIGNED_ACCESS
1914 if (SIZEOF_REGISTER == 8) {
1916 cur_reg = alloc_preg (cfg);
1917 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1943 cur_reg = alloc_preg (cfg);
1944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1953 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1957 if (cfg->compile_aot) {
1958 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1959 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1961 ins->sreg2 = c->dreg;
1962 MONO_ADD_INS (cfg->cbb, ins);
1964 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1966 ins->inst_offset = mini_get_tls_offset (tls_key);
1967 MONO_ADD_INS (cfg->cbb, ins);
1974 * Emit IR to push the current LMF onto the LMF stack.
1977 emit_push_lmf (MonoCompile *cfg)
1980 * Emit IR to push the LMF:
1981 * lmf_addr = <lmf_addr from tls>
1982 * lmf->lmf_addr = lmf_addr
1983 * lmf->prev_lmf = *lmf_addr
1986 int lmf_reg, prev_lmf_reg;
1987 MonoInst *ins, *lmf_ins;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load current lmf */
1994 lmf_ins = mono_get_lmf_intrinsic (cfg);
1996 MONO_ADD_INS (cfg->cbb, lmf_ins);
1997 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1998 lmf_reg = ins->dreg;
1999 /* Save previous_lmf */
2000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2002 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2005 * Store lmf_addr in a variable, so it can be allocated to a global register.
2007 if (!cfg->lmf_addr_var)
2008 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2011 ins = mono_get_jit_tls_intrinsic (cfg);
2013 int jit_tls_dreg = ins->dreg;
2015 MONO_ADD_INS (cfg->cbb, ins);
2016 lmf_reg = alloc_preg (cfg);
2017 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2022 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2024 MONO_ADD_INS (cfg->cbb, lmf_ins);
2027 MonoInst *args [16], *jit_tls_ins, *ins;
2029 /* Inline mono_get_lmf_addr () */
2030 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2032 /* Load mono_jit_tls_id */
2033 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2034 /* call pthread_getspecific () */
2035 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2036 /* lmf_addr = &jit_tls->lmf */
2037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2040 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2046 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2047 lmf_reg = ins->dreg;
2049 prev_lmf_reg = alloc_preg (cfg);
2050 /* Save previous_lmf */
2051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2052 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2054 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2061 * Emit IR to pop the current LMF from the LMF stack.
2064 emit_pop_lmf (MonoCompile *cfg)
2066 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2072 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2073 lmf_reg = ins->dreg;
2075 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2076 /* Load previous_lmf */
2077 prev_lmf_reg = alloc_preg (cfg);
2078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2080 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2083 * Emit IR to pop the LMF:
2084 * *(lmf->lmf_addr) = lmf->prev_lmf
2086 /* This could be called before emit_push_lmf () */
2087 if (!cfg->lmf_addr_var)
2088 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2089 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2091 prev_lmf_reg = alloc_preg (cfg);
2092 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2093 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2098 emit_instrumentation_call (MonoCompile *cfg, void *func)
2100 MonoInst *iargs [1];
2103 * Avoid instrumenting inlined methods since it can
2104 * distort profiling results.
2106 if (cfg->method != cfg->current_method)
2109 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2110 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2111 mono_emit_jit_icall (cfg, func, iargs);
2116 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2119 type = mini_get_underlying_type (cfg, type);
2120 switch (type->type) {
2121 case MONO_TYPE_VOID:
2122 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2129 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2133 case MONO_TYPE_FNPTR:
2134 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2135 case MONO_TYPE_CLASS:
2136 case MONO_TYPE_STRING:
2137 case MONO_TYPE_OBJECT:
2138 case MONO_TYPE_SZARRAY:
2139 case MONO_TYPE_ARRAY:
2140 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2143 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2146 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2148 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2150 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2151 case MONO_TYPE_VALUETYPE:
2152 if (type->data.klass->enumtype) {
2153 type = mono_class_enum_basetype (type->data.klass);
2156 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2157 case MONO_TYPE_TYPEDBYREF:
2158 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2159 case MONO_TYPE_GENERICINST:
2160 type = &type->data.generic_class->container_class->byval_arg;
2163 case MONO_TYPE_MVAR:
2165 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2167 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2173 * target_type_is_incompatible:
2174 * @cfg: MonoCompile context
2176 * Check that the item @arg on the evaluation stack can be stored
2177 * in the target type (can be a local, or field, etc).
2178 * The cfg arg can be used to check if we need verification or just
2181 * Returns: non-0 value if arg can't be stored on a target.
2184 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2186 MonoType *simple_type;
2189 if (target->byref) {
2190 /* FIXME: check that the pointed to types match */
2191 if (arg->type == STACK_MP)
2192 return arg->klass != mono_class_from_mono_type (target);
2193 if (arg->type == STACK_PTR)
2198 simple_type = mini_get_underlying_type (cfg, target);
2199 switch (simple_type->type) {
2200 case MONO_TYPE_VOID:
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2212 /* STACK_MP is needed when setting pinned locals */
2213 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2218 case MONO_TYPE_FNPTR:
2220 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2221 * in native int. (#688008).
2223 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2226 case MONO_TYPE_CLASS:
2227 case MONO_TYPE_STRING:
2228 case MONO_TYPE_OBJECT:
2229 case MONO_TYPE_SZARRAY:
2230 case MONO_TYPE_ARRAY:
2231 if (arg->type != STACK_OBJ)
2233 /* FIXME: check type compatibility */
2237 if (arg->type != STACK_I8)
2241 if (arg->type != cfg->r4_stack_type)
2245 if (arg->type != STACK_R8)
2248 case MONO_TYPE_VALUETYPE:
2249 if (arg->type != STACK_VTYPE)
2251 klass = mono_class_from_mono_type (simple_type);
2252 if (klass != arg->klass)
2255 case MONO_TYPE_TYPEDBYREF:
2256 if (arg->type != STACK_VTYPE)
2258 klass = mono_class_from_mono_type (simple_type);
2259 if (klass != arg->klass)
2262 case MONO_TYPE_GENERICINST:
2263 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2266 klass = mono_class_from_mono_type (simple_type);
2267 /* The second cases is needed when doing partial sharing */
2268 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2272 if (arg->type != STACK_OBJ)
2274 /* FIXME: check type compatibility */
2278 case MONO_TYPE_MVAR:
2279 g_assert (cfg->generic_sharing_context);
2280 if (mini_type_var_is_vt (cfg, simple_type)) {
2281 if (arg->type != STACK_VTYPE)
2284 if (arg->type != STACK_OBJ)
2289 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2295 * Prepare arguments for passing to a function call.
2296 * Return a non-zero value if the arguments can't be passed to the given
2298 * The type checks are not yet complete and some conversions may need
2299 * casts on 32 or 64 bit architectures.
2301 * FIXME: implement this using target_type_is_incompatible ()
2304 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2306 MonoType *simple_type;
2310 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2314 for (i = 0; i < sig->param_count; ++i) {
2315 if (sig->params [i]->byref) {
2316 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2320 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2322 switch (simple_type->type) {
2323 case MONO_TYPE_VOID:
2332 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2338 case MONO_TYPE_FNPTR:
2339 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2342 case MONO_TYPE_CLASS:
2343 case MONO_TYPE_STRING:
2344 case MONO_TYPE_OBJECT:
2345 case MONO_TYPE_SZARRAY:
2346 case MONO_TYPE_ARRAY:
2347 if (args [i]->type != STACK_OBJ)
2352 if (args [i]->type != STACK_I8)
2356 if (args [i]->type != cfg->r4_stack_type)
2360 if (args [i]->type != STACK_R8)
2363 case MONO_TYPE_VALUETYPE:
2364 if (simple_type->data.klass->enumtype) {
2365 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2368 if (args [i]->type != STACK_VTYPE)
2371 case MONO_TYPE_TYPEDBYREF:
2372 if (args [i]->type != STACK_VTYPE)
2375 case MONO_TYPE_GENERICINST:
2376 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2379 case MONO_TYPE_MVAR:
2381 if (args [i]->type != STACK_VTYPE)
2385 g_error ("unknown type 0x%02x in check_call_signature",
2393 callvirt_to_call (int opcode)
2396 case OP_CALL_MEMBASE:
2398 case OP_VOIDCALL_MEMBASE:
2400 case OP_FCALL_MEMBASE:
2402 case OP_RCALL_MEMBASE:
2404 case OP_VCALL_MEMBASE:
2406 case OP_LCALL_MEMBASE:
2409 g_assert_not_reached ();
2415 /* Either METHOD or IMT_ARG needs to be set */
2417 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2421 if (COMPILE_LLVM (cfg)) {
2422 method_reg = alloc_preg (cfg);
2425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2426 } else if (cfg->compile_aot) {
2427 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2430 MONO_INST_NEW (cfg, ins, OP_PCONST);
2431 ins->inst_p0 = method;
2432 ins->dreg = method_reg;
2433 MONO_ADD_INS (cfg->cbb, ins);
2437 call->imt_arg_reg = method_reg;
2439 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2457 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2460 static MonoJumpInfo *
2461 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2463 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2467 ji->data.target = target;
2473 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2475 if (cfg->generic_sharing_context)
2476 return mono_class_check_context_used (klass);
2482 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2484 if (cfg->generic_sharing_context)
2485 return mono_method_check_context_used (method);
2491 * check_method_sharing:
2493 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2496 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2498 gboolean pass_vtable = FALSE;
2499 gboolean pass_mrgctx = FALSE;
2501 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2502 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2503 gboolean sharable = FALSE;
2505 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2509 * Pass vtable iff target method might
2510 * be shared, which means that sharing
2511 * is enabled for its class and its
2512 * context is sharable (and it's not a
2515 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2519 if (mini_method_get_context (cmethod) &&
2520 mini_method_get_context (cmethod)->method_inst) {
2521 g_assert (!pass_vtable);
2523 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2526 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2531 if (out_pass_vtable)
2532 *out_pass_vtable = pass_vtable;
2533 if (out_pass_mrgctx)
2534 *out_pass_mrgctx = pass_mrgctx;
2537 inline static MonoCallInst *
2538 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2539 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2543 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2548 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2550 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2552 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2555 call->signature = sig;
2556 call->rgctx_reg = rgctx;
2557 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2559 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2562 if (mini_type_is_vtype (cfg, sig_ret)) {
2563 call->vret_var = cfg->vret_addr;
2564 //g_assert_not_reached ();
2566 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2567 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2570 temp->backend.is_pinvoke = sig->pinvoke;
2573 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2574 * address of return value to increase optimization opportunities.
2575 * Before vtype decomposition, the dreg of the call ins itself represents the
2576 * fact the call modifies the return value. After decomposition, the call will
2577 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2578 * will be transformed into an LDADDR.
2580 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2581 loada->dreg = alloc_preg (cfg);
2582 loada->inst_p0 = temp;
2583 /* We reference the call too since call->dreg could change during optimization */
2584 loada->inst_p1 = call;
2585 MONO_ADD_INS (cfg->cbb, loada);
2587 call->inst.dreg = temp->dreg;
2589 call->vret_var = loada;
2590 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2591 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2593 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2594 if (COMPILE_SOFT_FLOAT (cfg)) {
2596 * If the call has a float argument, we would need to do an r8->r4 conversion using
2597 * an icall, but that cannot be done during the call sequence since it would clobber
2598 * the call registers + the stack. So we do it before emitting the call.
2600 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2602 MonoInst *in = call->args [i];
2604 if (i >= sig->hasthis)
2605 t = sig->params [i - sig->hasthis];
2607 t = &mono_defaults.int_class->byval_arg;
2608 t = mono_type_get_underlying_type (t);
2610 if (!t->byref && t->type == MONO_TYPE_R4) {
2611 MonoInst *iargs [1];
2615 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2617 /* The result will be in an int vreg */
2618 call->args [i] = conv;
2624 call->need_unbox_trampoline = unbox_trampoline;
2627 if (COMPILE_LLVM (cfg))
2628 mono_llvm_emit_call (cfg, call);
2630 mono_arch_emit_call (cfg, call);
2632 mono_arch_emit_call (cfg, call);
2635 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2636 cfg->flags |= MONO_CFG_HAS_CALLS;
2642 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2644 #ifdef MONO_ARCH_RGCTX_REG
2645 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2646 cfg->uses_rgctx_reg = TRUE;
2647 call->rgctx_reg = TRUE;
2649 call->rgctx_arg_reg = rgctx_reg;
2656 inline static MonoInst*
2657 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2662 gboolean check_sp = FALSE;
2664 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2665 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2667 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2672 rgctx_reg = mono_alloc_preg (cfg);
2673 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2677 if (!cfg->stack_inbalance_var)
2678 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2680 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2681 ins->dreg = cfg->stack_inbalance_var->dreg;
2682 MONO_ADD_INS (cfg->cbb, ins);
2685 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2687 call->inst.sreg1 = addr->dreg;
2690 emit_imt_argument (cfg, call, NULL, imt_arg);
2692 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2697 sp_reg = mono_alloc_preg (cfg);
2699 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2701 MONO_ADD_INS (cfg->cbb, ins);
2703 /* Restore the stack so we don't crash when throwing the exception */
2704 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2705 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2706 MONO_ADD_INS (cfg->cbb, ins);
2708 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2709 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2713 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2715 return (MonoInst*)call;
2719 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2722 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2724 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2727 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2728 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2730 #ifndef DISABLE_REMOTING
2731 gboolean might_be_remote = FALSE;
2733 gboolean virtual = this != NULL;
2734 gboolean enable_for_aot = TRUE;
2738 gboolean need_unbox_trampoline;
2741 sig = mono_method_signature (method);
2744 rgctx_reg = mono_alloc_preg (cfg);
2745 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2748 if (method->string_ctor) {
2749 /* Create the real signature */
2750 /* FIXME: Cache these */
2751 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2752 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2757 context_used = mini_method_check_context_used (cfg, method);
2759 #ifndef DISABLE_REMOTING
2760 might_be_remote = this && sig->hasthis &&
2761 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2762 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2764 if (might_be_remote && context_used) {
2767 g_assert (cfg->generic_sharing_context);
2769 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2771 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2775 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2777 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2779 #ifndef DISABLE_REMOTING
2780 if (might_be_remote)
2781 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2784 call->method = method;
2785 call->inst.flags |= MONO_INST_HAS_METHOD;
2786 call->inst.inst_left = this;
2787 call->tail_call = tail;
2790 int vtable_reg, slot_reg, this_reg;
2793 this_reg = this->dreg;
2795 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2796 MonoInst *dummy_use;
2798 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2800 /* Make a call to delegate->invoke_impl */
2801 call->inst.inst_basereg = this_reg;
2802 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2803 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2805 /* We must emit a dummy use here because the delegate trampoline will
2806 replace the 'this' argument with the delegate target making this activation
2807 no longer a root for the delegate.
2808 This is an issue for delegates that target collectible code such as dynamic
2809 methods of GC'able assemblies.
2811 For a test case look into #667921.
2813 FIXME: a dummy use is not the best way to do it as the local register allocator
2814 will put it on a caller save register and spil it around the call.
2815 Ideally, we would either put it on a callee save register or only do the store part.
2817 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2819 return (MonoInst*)call;
2822 if ((!cfg->compile_aot || enable_for_aot) &&
2823 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2824 (MONO_METHOD_IS_FINAL (method) &&
2825 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2826 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2828 * the method is not virtual, we just need to ensure this is not null
2829 * and then we can call the method directly.
2831 #ifndef DISABLE_REMOTING
2832 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2834 * The check above ensures method is not gshared, this is needed since
2835 * gshared methods can't have wrappers.
2837 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2841 if (!method->string_ctor)
2842 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2844 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2845 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2847 * the method is virtual, but we can statically dispatch since either
2848 * it's class or the method itself are sealed.
2849 * But first we need to ensure it's not a null reference.
2851 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2853 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2855 vtable_reg = alloc_preg (cfg);
2856 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2857 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2858 guint32 imt_slot = mono_method_get_imt_slot (method);
2859 emit_imt_argument (cfg, call, call->method, imt_arg);
2860 slot_reg = vtable_reg;
2861 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2863 slot_reg = vtable_reg;
2864 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2865 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2867 g_assert (mono_method_signature (method)->generic_param_count);
2868 emit_imt_argument (cfg, call, call->method, imt_arg);
2872 call->inst.sreg1 = slot_reg;
2873 call->inst.inst_offset = offset;
2874 call->virtual = TRUE;
2878 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2881 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2883 return (MonoInst*)call;
2887 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2889 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2893 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2900 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2903 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2905 return (MonoInst*)call;
2909 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2911 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2915 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2919 * mono_emit_abs_call:
2921 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2923 inline static MonoInst*
2924 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2925 MonoMethodSignature *sig, MonoInst **args)
2927 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2931 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2934 if (cfg->abs_patches == NULL)
2935 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2936 g_hash_table_insert (cfg->abs_patches, ji, ji);
2937 ins = mono_emit_native_call (cfg, ji, sig, args);
2938 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2943 direct_icalls_enabled (MonoCompile *cfg)
2945 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2947 if (cfg->compile_llvm)
2950 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2956 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
2959 * Call the jit icall without a wrapper if possible.
2960 * The wrapper is needed for the following reasons:
2961 * - to handle exceptions thrown using mono_raise_exceptions () from the
2962 * icall function. The EH code needs the lmf frame pushed by the
2963 * wrapper to be able to unwind back to managed code.
2964 * - to be able to do stack walks for asynchronously suspended
2965 * threads when debugging.
2967 if (info->no_raise && direct_icalls_enabled (cfg)) {
2971 if (!info->wrapper_method) {
2972 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2973 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2975 mono_memory_barrier ();
2979 * Inline the wrapper method, which is basically a call to the C icall, and
2980 * an exception check.
2982 costs = inline_method (cfg, info->wrapper_method, NULL,
2983 args, NULL, cfg->real_offset, TRUE);
2984 g_assert (costs > 0);
2985 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2989 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2994 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2996 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2997 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3001 * Native code might return non register sized integers
3002 * without initializing the upper bits.
3004 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3005 case OP_LOADI1_MEMBASE:
3006 widen_op = OP_ICONV_TO_I1;
3008 case OP_LOADU1_MEMBASE:
3009 widen_op = OP_ICONV_TO_U1;
3011 case OP_LOADI2_MEMBASE:
3012 widen_op = OP_ICONV_TO_I2;
3014 case OP_LOADU2_MEMBASE:
3015 widen_op = OP_ICONV_TO_U2;
3021 if (widen_op != -1) {
3022 int dreg = alloc_preg (cfg);
3025 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3026 widen->type = ins->type;
3036 get_memcpy_method (void)
3038 static MonoMethod *memcpy_method = NULL;
3039 if (!memcpy_method) {
3040 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3042 g_error ("Old corlib found. Install a new one");
3044 return memcpy_method;
3048 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3050 MonoClassField *field;
3051 gpointer iter = NULL;
3053 while ((field = mono_class_get_fields (klass, &iter))) {
3056 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3058 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3059 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3060 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3061 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3063 MonoClass *field_class = mono_class_from_mono_type (field->type);
3064 if (field_class->has_references)
3065 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3071 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3073 int card_table_shift_bits;
3074 gpointer card_table_mask;
3076 MonoInst *dummy_use;
3077 int nursery_shift_bits;
3078 size_t nursery_size;
3079 gboolean has_card_table_wb = FALSE;
3081 if (!cfg->gen_write_barriers)
3084 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3086 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3088 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3089 has_card_table_wb = TRUE;
3092 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3095 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3096 wbarrier->sreg1 = ptr->dreg;
3097 wbarrier->sreg2 = value->dreg;
3098 MONO_ADD_INS (cfg->cbb, wbarrier);
3099 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3100 int offset_reg = alloc_preg (cfg);
3101 int card_reg = alloc_preg (cfg);
3104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3105 if (card_table_mask)
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3108 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3109 * IMM's larger than 32bits.
3111 if (cfg->compile_aot) {
3112 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3114 MONO_INST_NEW (cfg, ins, OP_PCONST);
3115 ins->inst_p0 = card_table;
3116 ins->dreg = card_reg;
3117 MONO_ADD_INS (cfg->cbb, ins);
3120 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3121 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3123 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3124 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3127 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3131 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3133 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3134 unsigned need_wb = 0;
3139 /*types with references can't have alignment smaller than sizeof(void*) */
3140 if (align < SIZEOF_VOID_P)
3143 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3144 if (size > 32 * SIZEOF_VOID_P)
3147 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3149 /* We don't unroll more than 5 stores to avoid code bloat. */
3150 if (size > 5 * SIZEOF_VOID_P) {
3151 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3152 size += (SIZEOF_VOID_P - 1);
3153 size &= ~(SIZEOF_VOID_P - 1);
3155 EMIT_NEW_ICONST (cfg, iargs [2], size);
3156 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3157 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3161 destreg = iargs [0]->dreg;
3162 srcreg = iargs [1]->dreg;
3165 dest_ptr_reg = alloc_preg (cfg);
3166 tmp_reg = alloc_preg (cfg);
3169 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3171 while (size >= SIZEOF_VOID_P) {
3172 MonoInst *load_inst;
3173 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3174 load_inst->dreg = tmp_reg;
3175 load_inst->inst_basereg = srcreg;
3176 load_inst->inst_offset = offset;
3177 MONO_ADD_INS (cfg->cbb, load_inst);
3179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3182 emit_write_barrier (cfg, iargs [0], load_inst);
3184 offset += SIZEOF_VOID_P;
3185 size -= SIZEOF_VOID_P;
3188 /*tmp += sizeof (void*)*/
3189 if (size >= SIZEOF_VOID_P) {
3190 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3191 MONO_ADD_INS (cfg->cbb, iargs [0]);
3195 /* Those cannot be references since size < sizeof (void*) */
3197 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3204 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3205 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3211 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3212 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3221 * Emit code to copy a valuetype of type @klass whose address is stored in
3222 * @src->dreg to memory whose address is stored at @dest->dreg.
3225 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3227 MonoInst *iargs [4];
3230 MonoMethod *memcpy_method;
3231 MonoInst *size_ins = NULL;
3232 MonoInst *memcpy_ins = NULL;
3235 if (cfg->generic_sharing_context)
3236 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3239 * This check breaks with spilled vars... need to handle it during verification anyway.
3240 * g_assert (klass && klass == src->klass && klass == dest->klass);
3243 if (mini_is_gsharedvt_klass (cfg, klass)) {
3245 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3246 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3250 n = mono_class_native_size (klass, &align);
3252 n = mono_class_value_size (klass, &align);
3254 /* if native is true there should be no references in the struct */
3255 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3256 /* Avoid barriers when storing to the stack */
3257 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3258 (dest->opcode == OP_LDADDR))) {
3264 context_used = mini_class_check_context_used (cfg, klass);
3266 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3267 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3269 } else if (context_used) {
3270 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3272 if (cfg->compile_aot) {
3273 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3275 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3276 mono_class_compute_gc_descriptor (klass);
3281 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3283 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3288 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3289 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3290 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3295 iargs [2] = size_ins;
3297 EMIT_NEW_ICONST (cfg, iargs [2], n);
3299 memcpy_method = get_memcpy_method ();
3301 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3303 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3308 get_memset_method (void)
3310 static MonoMethod *memset_method = NULL;
3311 if (!memset_method) {
3312 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3314 g_error ("Old corlib found. Install a new one");
3316 return memset_method;
3320 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3322 MonoInst *iargs [3];
3325 MonoMethod *memset_method;
3326 MonoInst *size_ins = NULL;
3327 MonoInst *bzero_ins = NULL;
3328 static MonoMethod *bzero_method;
3330 /* FIXME: Optimize this for the case when dest is an LDADDR */
3331 mono_class_init (klass);
3332 if (mini_is_gsharedvt_klass (cfg, klass)) {
3333 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3334 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3336 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3337 g_assert (bzero_method);
3339 iargs [1] = size_ins;
3340 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3344 n = mono_class_value_size (klass, &align);
3346 if (n <= sizeof (gpointer) * 8) {
3347 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3350 memset_method = get_memset_method ();
3352 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3353 EMIT_NEW_ICONST (cfg, iargs [2], n);
3354 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3361 * Emit IR to return either the this pointer for instance method,
3362 * or the mrgctx for static methods.
3365 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3367 MonoInst *this = NULL;
3369 g_assert (cfg->generic_sharing_context);
3371 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3372 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3373 !method->klass->valuetype)
3374 EMIT_NEW_ARGLOAD (cfg, this, 0);
3376 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3377 MonoInst *mrgctx_loc, *mrgctx_var;
3380 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3382 mrgctx_loc = mono_get_vtable_var (cfg);
3383 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3386 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3387 MonoInst *vtable_loc, *vtable_var;
3391 vtable_loc = mono_get_vtable_var (cfg);
3392 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3394 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3395 MonoInst *mrgctx_var = vtable_var;
3398 vtable_reg = alloc_preg (cfg);
3399 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3400 vtable_var->type = STACK_PTR;
3408 vtable_reg = alloc_preg (cfg);
3409 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3414 static MonoJumpInfoRgctxEntry *
3415 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3417 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3418 res->method = method;
3419 res->in_mrgctx = in_mrgctx;
3420 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3421 res->data->type = patch_type;
3422 res->data->data.target = patch_data;
3423 res->info_type = info_type;
3431 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3434 static inline MonoInst*
3435 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3437 /* Inline version, not currently used */
3438 // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
3440 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3442 MonoBasicBlock *is_null_bb, *end_bb;
3443 MonoInst *res, *ins, *call;
3446 slot = mini_get_rgctx_entry_slot (entry);
3448 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3449 index = MONO_RGCTX_SLOT_INDEX (slot);
3451 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3452 for (depth = 0; ; ++depth) {
3453 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3455 if (index < size - 1)
3460 NEW_BBLOCK (cfg, end_bb);
3461 NEW_BBLOCK (cfg, is_null_bb);
3464 rgctx_reg = rgctx->dreg;
3466 rgctx_reg = alloc_preg (cfg);
3468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3469 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3470 NEW_BBLOCK (cfg, is_null_bb);
3472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3476 for (i = 0; i < depth; ++i) {
3477 int array_reg = alloc_preg (cfg);
3479 /* load ptr to next array */
3480 if (mrgctx && i == 0)
3481 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3484 rgctx_reg = array_reg;
3485 /* is the ptr null? */
3486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3487 /* if yes, jump to actual trampoline */
3488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3492 val_reg = alloc_preg (cfg);
3493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3494 /* is the slot null? */
3495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3496 /* if yes, jump to actual trampoline */
3497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3500 res_reg = alloc_preg (cfg);
3501 MONO_INST_NEW (cfg, ins, OP_MOVE);
3502 ins->dreg = res_reg;
3503 ins->sreg1 = val_reg;
3504 MONO_ADD_INS (cfg->cbb, ins);
3506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3509 MONO_START_BB (cfg, is_null_bb);
3511 EMIT_NEW_ICONST (cfg, args [1], index);
3513 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3515 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3516 MONO_INST_NEW (cfg, ins, OP_MOVE);
3517 ins->dreg = res_reg;
3518 ins->sreg1 = call->dreg;
3519 MONO_ADD_INS (cfg->cbb, ins);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3522 MONO_START_BB (cfg, end_bb);
3526 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3531 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3532 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3534 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3535 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3537 return emit_rgctx_fetch (cfg, rgctx, entry);
3541 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3542 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3544 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3545 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3547 return emit_rgctx_fetch (cfg, rgctx, entry);
3551 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3552 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3554 MonoJumpInfoGSharedVtCall *call_info;
3555 MonoJumpInfoRgctxEntry *entry;
3558 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3559 call_info->sig = sig;
3560 call_info->method = cmethod;
3562 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3563 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3565 return emit_rgctx_fetch (cfg, rgctx, entry);
3569 * emit_get_rgctx_virt_method:
3571 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3574 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3575 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3577 MonoJumpInfoVirtMethod *info;
3578 MonoJumpInfoRgctxEntry *entry;
3581 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3582 info->klass = klass;
3583 info->method = virt_method;
3585 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3586 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3588 return emit_rgctx_fetch (cfg, rgctx, entry);
3592 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3593 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3595 MonoJumpInfoRgctxEntry *entry;
3598 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3599 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3601 return emit_rgctx_fetch (cfg, rgctx, entry);
3605 * emit_get_rgctx_method:
3607 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3608 * normal constants, else emit a load from the rgctx.
3611 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3612 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3614 if (!context_used) {
3617 switch (rgctx_type) {
3618 case MONO_RGCTX_INFO_METHOD:
3619 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3621 case MONO_RGCTX_INFO_METHOD_RGCTX:
3622 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3625 g_assert_not_reached ();
3628 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3629 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3631 return emit_rgctx_fetch (cfg, rgctx, entry);
3636 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3637 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3639 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3640 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3642 return emit_rgctx_fetch (cfg, rgctx, entry);
3646 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3648 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3649 MonoRuntimeGenericContextInfoTemplate *template;
3654 for (i = 0; i < info->num_entries; ++i) {
3655 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3657 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3661 if (info->num_entries == info->count_entries) {
3662 MonoRuntimeGenericContextInfoTemplate *new_entries;
3663 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3665 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3667 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3668 info->entries = new_entries;
3669 info->count_entries = new_count_entries;
3672 idx = info->num_entries;
3673 template = &info->entries [idx];
3674 template->info_type = rgctx_type;
3675 template->data = data;
3677 info->num_entries ++;
3683 * emit_get_gsharedvt_info:
3685 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3688 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3693 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3694 /* Load info->entries [idx] */
3695 dreg = alloc_preg (cfg);
3696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3702 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3704 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3708 * On return the caller must check @klass for load errors.
3711 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3713 MonoInst *vtable_arg;
3715 gboolean use_op_generic_class_init = FALSE;
3717 context_used = mini_class_check_context_used (cfg, klass);
3720 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3721 klass, MONO_RGCTX_INFO_VTABLE);
3723 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3727 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3730 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3731 if (!COMPILE_LLVM (cfg))
3732 use_op_generic_class_init = TRUE;
3735 if (use_op_generic_class_init) {
3739 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3740 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3742 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3743 ins->sreg1 = vtable_arg->dreg;
3744 MONO_ADD_INS (cfg->cbb, ins);
3746 static int byte_offset = -1;
3747 static guint8 bitmask;
3748 int bits_reg, inited_reg;
3749 MonoBasicBlock *inited_bb;
3750 MonoInst *args [16];
3752 if (byte_offset < 0)
3753 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3755 bits_reg = alloc_ireg (cfg);
3756 inited_reg = alloc_ireg (cfg);
3758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3761 NEW_BBLOCK (cfg, inited_bb);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3766 args [0] = vtable_arg;
3767 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3769 MONO_START_BB (cfg, inited_bb);
3775 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3777 /* This could be used as a fallback if needed */
3778 if (cfg->compile_aot) {
3779 /* With the overhead of plt entries, the inline version is comparable in size/speed */
3780 emit_generic_class_init (cfg, klass);
3784 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
3788 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3792 if (cfg->gen_seq_points && cfg->method == method) {
3793 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3795 ins->flags |= MONO_INST_NONEMPTY_STACK;
3796 MONO_ADD_INS (cfg->cbb, ins);
3801 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3803 if (mini_get_debug_options ()->better_cast_details) {
3804 int vtable_reg = alloc_preg (cfg);
3805 int klass_reg = alloc_preg (cfg);
3806 MonoBasicBlock *is_null_bb = NULL;
3808 int to_klass_reg, context_used;
3811 NEW_BBLOCK (cfg, is_null_bb);
3813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3817 tls_get = mono_get_jit_tls_intrinsic (cfg);
3819 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3823 MONO_ADD_INS (cfg->cbb, tls_get);
3824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3829 context_used = mini_class_check_context_used (cfg, klass);
3831 MonoInst *class_ins;
3833 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3834 to_klass_reg = class_ins->dreg;
3836 to_klass_reg = alloc_preg (cfg);
3837 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3842 MONO_START_BB (cfg, is_null_bb);
3847 reset_cast_details (MonoCompile *cfg)
3849 /* Reset the variables holding the cast details */
3850 if (mini_get_debug_options ()->better_cast_details) {
3851 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3853 MONO_ADD_INS (cfg->cbb, tls_get);
3854 /* It is enough to reset the from field */
3855 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3860 * On return the caller must check @array_class for load errors
3863 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3865 int vtable_reg = alloc_preg (cfg);
3868 context_used = mini_class_check_context_used (cfg, array_class);
3870 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3872 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3874 if (cfg->opt & MONO_OPT_SHARED) {
3875 int class_reg = alloc_preg (cfg);
3876 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3877 if (cfg->compile_aot) {
3878 int klass_reg = alloc_preg (cfg);
3879 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3880 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3884 } else if (context_used) {
3885 MonoInst *vtable_ins;
3887 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3888 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3890 if (cfg->compile_aot) {
3894 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3896 vt_reg = alloc_preg (cfg);
3897 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3898 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3901 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3907 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3909 reset_cast_details (cfg);
3913 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3914 * generic code is generated.
3917 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3919 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3922 MonoInst *rgctx, *addr;
3924 /* FIXME: What if the class is shared? We might not
3925 have to get the address of the method from the
3927 addr = emit_get_rgctx_method (cfg, context_used, method,
3928 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3930 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3932 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3934 gboolean pass_vtable, pass_mrgctx;
3935 MonoInst *rgctx_arg = NULL;
3937 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3938 g_assert (!pass_mrgctx);
3941 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3944 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3947 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3952 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3956 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3957 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3958 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3959 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3961 obj_reg = sp [0]->dreg;
3962 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3965 /* FIXME: generics */
3966 g_assert (klass->rank == 0);
3969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3970 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3972 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3973 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3976 MonoInst *element_class;
3978 /* This assertion is from the unboxcast insn */
3979 g_assert (klass->rank == 0);
3981 element_class = emit_get_rgctx_klass (cfg, context_used,
3982 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3984 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3985 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3987 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3988 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3989 reset_cast_details (cfg);
3992 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3993 MONO_ADD_INS (cfg->cbb, add);
3994 add->type = STACK_MP;
4001 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4003 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4004 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4008 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4014 args [1] = klass_inst;
4017 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4019 NEW_BBLOCK (cfg, is_ref_bb);
4020 NEW_BBLOCK (cfg, is_nullable_bb);
4021 NEW_BBLOCK (cfg, end_bb);
4022 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4026 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4027 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4029 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4030 addr_reg = alloc_dreg (cfg, STACK_MP);
4034 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4035 MONO_ADD_INS (cfg->cbb, addr);
4037 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4040 MONO_START_BB (cfg, is_ref_bb);
4042 /* Save the ref to a temporary */
4043 dreg = alloc_ireg (cfg);
4044 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4045 addr->dreg = addr_reg;
4046 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4047 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4050 MONO_START_BB (cfg, is_nullable_bb);
4053 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4054 MonoInst *unbox_call;
4055 MonoMethodSignature *unbox_sig;
4057 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4058 unbox_sig->ret = &klass->byval_arg;
4059 unbox_sig->param_count = 1;
4060 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4061 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4063 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4064 addr->dreg = addr_reg;
4067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4070 MONO_START_BB (cfg, end_bb);
4073 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4079 * Returns NULL and set the cfg exception on error.
4082 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4084 MonoInst *iargs [2];
4090 MonoInst *iargs [2];
4091 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
4093 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4095 if (cfg->opt & MONO_OPT_SHARED)
4096 rgctx_info = MONO_RGCTX_INFO_KLASS;
4098 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4099 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4101 if (cfg->opt & MONO_OPT_SHARED) {
4102 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4104 alloc_ftn = mono_object_new;
4107 alloc_ftn = mono_object_new_specific;
4110 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4111 if (known_instance_size) {
4112 int size = mono_class_instance_size (klass);
4113 if (size < sizeof (MonoObject))
4114 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4116 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4118 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4121 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4124 if (cfg->opt & MONO_OPT_SHARED) {
4125 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4126 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4128 alloc_ftn = mono_object_new;
4129 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4130 /* This happens often in argument checking code, eg. throw new FooException... */
4131 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4132 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4133 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4135 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4136 MonoMethod *managed_alloc = NULL;
4140 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4141 cfg->exception_ptr = klass;
4145 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4147 if (managed_alloc) {
4148 int size = mono_class_instance_size (klass);
4149 if (size < sizeof (MonoObject))
4150 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4152 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4153 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4154 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4156 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4158 guint32 lw = vtable->klass->instance_size;
4159 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4160 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4161 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4164 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4168 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4172 * Returns NULL and set the cfg exception on error.
4175 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4177 MonoInst *alloc, *ins;
4179 if (mono_class_is_nullable (klass)) {
4180 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4183 /* FIXME: What if the class is shared? We might not
4184 have to get the method address from the RGCTX. */
4185 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4186 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4187 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4189 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4191 gboolean pass_vtable, pass_mrgctx;
4192 MonoInst *rgctx_arg = NULL;
4194 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4195 g_assert (!pass_mrgctx);
4198 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4201 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4204 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4208 if (mini_is_gsharedvt_klass (cfg, klass)) {
4209 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4210 MonoInst *res, *is_ref, *src_var, *addr;
4213 dreg = alloc_ireg (cfg);
4215 NEW_BBLOCK (cfg, is_ref_bb);
4216 NEW_BBLOCK (cfg, is_nullable_bb);
4217 NEW_BBLOCK (cfg, end_bb);
4218 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4220 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4226 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4229 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4230 ins->opcode = OP_STOREV_MEMBASE;
4232 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4233 res->type = STACK_OBJ;
4235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4238 MONO_START_BB (cfg, is_ref_bb);
4240 /* val is a vtype, so has to load the value manually */
4241 src_var = get_vreg_to_inst (cfg, val->dreg);
4243 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4244 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4246 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4249 MONO_START_BB (cfg, is_nullable_bb);
4252 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4253 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4255 MonoMethodSignature *box_sig;
4258 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4259 * construct that method at JIT time, so have to do things by hand.
4261 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4262 box_sig->ret = &mono_defaults.object_class->byval_arg;
4263 box_sig->param_count = 1;
4264 box_sig->params [0] = &klass->byval_arg;
4265 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4266 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4267 res->type = STACK_OBJ;
4271 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4273 MONO_START_BB (cfg, end_bb);
4277 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4281 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4287 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4290 MonoGenericContainer *container;
4291 MonoGenericInst *ginst;
4293 if (klass->generic_class) {
4294 container = klass->generic_class->container_class->generic_container;
4295 ginst = klass->generic_class->context.class_inst;
4296 } else if (klass->generic_container && context_used) {
4297 container = klass->generic_container;
4298 ginst = container->context.class_inst;
4303 for (i = 0; i < container->type_argc; ++i) {
4305 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4307 type = ginst->type_argv [i];
4308 if (mini_type_is_reference (cfg, type))
4314 static GHashTable* direct_icall_type_hash;
4317 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4319 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4320 if (!direct_icalls_enabled (cfg))
4324 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4325 * Whitelist a few icalls for now.
4327 if (!direct_icall_type_hash) {
4328 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4330 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4331 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4332 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4333 mono_memory_barrier ();
4334 direct_icall_type_hash = h;
4337 if (cmethod->klass == mono_defaults.math_class)
4339 /* No locking needed */
4340 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4345 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4348 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4350 MonoMethod *mono_castclass;
4353 mono_castclass = mono_marshal_get_castclass_with_cache ();
4355 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4356 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4357 reset_cast_details (cfg);
4363 get_castclass_cache_idx (MonoCompile *cfg)
4365 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4366 cfg->castclass_cache_index ++;
4367 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4371 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4380 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4383 if (cfg->compile_aot) {
4384 idx = get_castclass_cache_idx (cfg);
4385 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4387 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4390 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4391 return emit_castclass_with_cache (cfg, klass, args);
4395 * Returns NULL and set the cfg exception on error.
4398 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4400 MonoBasicBlock *is_null_bb;
4401 int obj_reg = src->dreg;
4402 int vtable_reg = alloc_preg (cfg);
4404 MonoInst *klass_inst = NULL, *res;
4406 context_used = mini_class_check_context_used (cfg, klass);
4408 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4409 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4410 (*inline_costs) += 2;
4412 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4413 MonoMethod *mono_castclass;
4414 MonoInst *iargs [1];
4417 mono_castclass = mono_marshal_get_castclass (klass);
4420 save_cast_details (cfg, klass, src->dreg, TRUE);
4421 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4422 iargs, ip, cfg->real_offset, TRUE);
4423 reset_cast_details (cfg);
4424 CHECK_CFG_EXCEPTION;
4425 g_assert (costs > 0);
4427 cfg->real_offset += 5;
4429 (*inline_costs) += costs;
4437 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4438 MonoInst *cache_ins;
4440 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4445 /* klass - it's the second element of the cache entry*/
4446 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4449 args [2] = cache_ins;
4451 return emit_castclass_with_cache (cfg, klass, args);
4454 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4457 NEW_BBLOCK (cfg, is_null_bb);
4459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4462 save_cast_details (cfg, klass, obj_reg, FALSE);
4464 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4466 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4468 int klass_reg = alloc_preg (cfg);
4470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4472 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4473 /* the remoting code is broken, access the class for now */
4474 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4475 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4477 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4478 cfg->exception_ptr = klass;
4481 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4486 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4489 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4493 MONO_START_BB (cfg, is_null_bb);
4495 reset_cast_details (cfg);
4504 * Returns NULL and set the cfg exception on error.
4507 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4510 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4511 int obj_reg = src->dreg;
4512 int vtable_reg = alloc_preg (cfg);
4513 int res_reg = alloc_ireg_ref (cfg);
4514 MonoInst *klass_inst = NULL;
4519 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4520 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4521 MonoInst *cache_ins;
4523 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4528 /* klass - it's the second element of the cache entry*/
4529 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4532 args [2] = cache_ins;
4534 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4537 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4540 NEW_BBLOCK (cfg, is_null_bb);
4541 NEW_BBLOCK (cfg, false_bb);
4542 NEW_BBLOCK (cfg, end_bb);
4544 /* Do the assignment at the beginning, so the other assignment can be if converted */
4545 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4546 ins->type = STACK_OBJ;
4549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4554 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4555 g_assert (!context_used);
4556 /* the is_null_bb target simply copies the input register to the output */
4557 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4559 int klass_reg = alloc_preg (cfg);
4562 int rank_reg = alloc_preg (cfg);
4563 int eclass_reg = alloc_preg (cfg);
4565 g_assert (!context_used);
4566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4571 if (klass->cast_class == mono_defaults.object_class) {
4572 int parent_reg = alloc_preg (cfg);
4573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4574 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4575 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4576 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4577 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4578 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4579 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4581 } else if (klass->cast_class == mono_defaults.enum_class) {
4582 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4584 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4585 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4587 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4588 /* Check that the object is a vector too */
4589 int bounds_reg = alloc_preg (cfg);
4590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4595 /* the is_null_bb target simply copies the input register to the output */
4596 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4598 } else if (mono_class_is_nullable (klass)) {
4599 g_assert (!context_used);
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4601 /* the is_null_bb target simply copies the input register to the output */
4602 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4604 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4605 g_assert (!context_used);
4606 /* the remoting code is broken, access the class for now */
4607 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4608 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4610 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4611 cfg->exception_ptr = klass;
4614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4623 /* the is_null_bb target simply copies the input register to the output */
4624 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4629 MONO_START_BB (cfg, false_bb);
4631 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4634 MONO_START_BB (cfg, is_null_bb);
4636 MONO_START_BB (cfg, end_bb);
4642 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4644 /* This opcode takes as input an object reference and a class, and returns:
4645 0) if the object is an instance of the class,
4646 1) if the object is not instance of the class,
4647 2) if the object is a proxy whose type cannot be determined */
4650 #ifndef DISABLE_REMOTING
4651 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4653 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4655 int obj_reg = src->dreg;
4656 int dreg = alloc_ireg (cfg);
4658 #ifndef DISABLE_REMOTING
4659 int klass_reg = alloc_preg (cfg);
4662 NEW_BBLOCK (cfg, true_bb);
4663 NEW_BBLOCK (cfg, false_bb);
4664 NEW_BBLOCK (cfg, end_bb);
4665 #ifndef DISABLE_REMOTING
4666 NEW_BBLOCK (cfg, false2_bb);
4667 NEW_BBLOCK (cfg, no_proxy_bb);
4670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4673 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4674 #ifndef DISABLE_REMOTING
4675 NEW_BBLOCK (cfg, interface_fail_bb);
4678 tmp_reg = alloc_preg (cfg);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 #ifndef DISABLE_REMOTING
4681 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4682 MONO_START_BB (cfg, interface_fail_bb);
4683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4685 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4687 tmp_reg = alloc_preg (cfg);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4692 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4695 #ifndef DISABLE_REMOTING
4696 tmp_reg = alloc_preg (cfg);
4697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4698 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4700 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4701 tmp_reg = alloc_preg (cfg);
4702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4705 tmp_reg = alloc_preg (cfg);
4706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4708 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4710 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4713 MONO_START_BB (cfg, no_proxy_bb);
4715 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4717 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4721 MONO_START_BB (cfg, false_bb);
4723 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4726 #ifndef DISABLE_REMOTING
4727 MONO_START_BB (cfg, false2_bb);
4729 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4733 MONO_START_BB (cfg, true_bb);
4735 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4737 MONO_START_BB (cfg, end_bb);
4740 MONO_INST_NEW (cfg, ins, OP_ICONST);
4742 ins->type = STACK_I4;
4748 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4750 /* This opcode takes as input an object reference and a class, and returns:
4751 0) if the object is an instance of the class,
4752 1) if the object is a proxy whose type cannot be determined
4753 an InvalidCastException exception is thrown otherwhise*/
4756 #ifndef DISABLE_REMOTING
4757 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4759 MonoBasicBlock *ok_result_bb;
4761 int obj_reg = src->dreg;
4762 int dreg = alloc_ireg (cfg);
4763 int tmp_reg = alloc_preg (cfg);
4765 #ifndef DISABLE_REMOTING
4766 int klass_reg = alloc_preg (cfg);
4767 NEW_BBLOCK (cfg, end_bb);
4770 NEW_BBLOCK (cfg, ok_result_bb);
4772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4773 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4775 save_cast_details (cfg, klass, obj_reg, FALSE);
4777 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4778 #ifndef DISABLE_REMOTING
4779 NEW_BBLOCK (cfg, interface_fail_bb);
4781 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4782 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4783 MONO_START_BB (cfg, interface_fail_bb);
4784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4786 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4788 tmp_reg = alloc_preg (cfg);
4789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4791 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4793 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4794 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4797 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4801 #ifndef DISABLE_REMOTING
4802 NEW_BBLOCK (cfg, no_proxy_bb);
4804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4806 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4808 tmp_reg = alloc_preg (cfg);
4809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4810 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4812 tmp_reg = alloc_preg (cfg);
4813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4814 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4815 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4817 NEW_BBLOCK (cfg, fail_1_bb);
4819 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4821 MONO_START_BB (cfg, fail_1_bb);
4823 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4824 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4826 MONO_START_BB (cfg, no_proxy_bb);
4828 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4830 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4834 MONO_START_BB (cfg, ok_result_bb);
4836 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4838 #ifndef DISABLE_REMOTING
4839 MONO_START_BB (cfg, end_bb);
4843 MONO_INST_NEW (cfg, ins, OP_ICONST);
4845 ins->type = STACK_I4;
4850 static G_GNUC_UNUSED MonoInst*
4851 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4853 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4854 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4857 switch (enum_type->type) {
4860 #if SIZEOF_REGISTER == 8
4872 MonoInst *load, *and, *cmp, *ceq;
4873 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4874 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4875 int dest_reg = alloc_ireg (cfg);
4877 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4878 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4879 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4880 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4882 ceq->type = STACK_I4;
4885 load = mono_decompose_opcode (cfg, load);
4886 and = mono_decompose_opcode (cfg, and);
4887 cmp = mono_decompose_opcode (cfg, cmp);
4888 ceq = mono_decompose_opcode (cfg, ceq);
4896 * Returns NULL and set the cfg exception on error.
4898 static G_GNUC_UNUSED MonoInst*
4899 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4903 gpointer trampoline;
4904 MonoInst *obj, *method_ins, *tramp_ins;
4909 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4912 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4916 obj = handle_alloc (cfg, klass, FALSE, 0);
4920 /* Inline the contents of mono_delegate_ctor */
4922 /* Set target field */
4923 /* Optimize away setting of NULL target */
4924 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4926 if (cfg->gen_write_barriers) {
4927 dreg = alloc_preg (cfg);
4928 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4929 emit_write_barrier (cfg, ptr, target);
4933 /* Set method field */
4934 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4935 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4938 * To avoid looking up the compiled code belonging to the target method
4939 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4940 * store it, and we fill it after the method has been compiled.
4942 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4943 MonoInst *code_slot_ins;
4946 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4948 domain = mono_domain_get ();
4949 mono_domain_lock (domain);
4950 if (!domain_jit_info (domain)->method_code_hash)
4951 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4952 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4954 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4955 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4957 mono_domain_unlock (domain);
4959 if (cfg->compile_aot)
4960 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4962 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4967 if (cfg->compile_aot) {
4968 MonoDelegateClassMethodPair *del_tramp;
4970 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4971 del_tramp->klass = klass;
4972 del_tramp->method = context_used ? NULL : method;
4973 del_tramp->virtual = virtual;
4974 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4977 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4979 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4980 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4983 /* Set invoke_impl field */
4985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4987 dreg = alloc_preg (cfg);
4988 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4989 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4991 dreg = alloc_preg (cfg);
4992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4993 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4996 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5002 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5004 MonoJitICallInfo *info;
5006 /* Need to register the icall so it gets an icall wrapper */
5007 info = mono_get_array_new_va_icall (rank);
5009 cfg->flags |= MONO_CFG_HAS_VARARGS;
5011 /* mono_array_new_va () needs a vararg calling convention */
5012 cfg->disable_llvm = TRUE;
5014 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5015 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5019 * handle_constrained_gsharedvt_call:
5021 * Handle constrained calls where the receiver is a gsharedvt type.
5022 * Return the instruction representing the call. Set the cfg exception on failure.
5025 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5026 gboolean *ref_emit_widen)
5028 MonoInst *ins = NULL;
5029 gboolean emit_widen = *ref_emit_widen;
5032 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5033 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5034 * pack the arguments into an array, and do the rest of the work in in an icall.
5036 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5037 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
5038 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
5039 MonoInst *args [16];
5042 * This case handles calls to
5043 * - object:ToString()/Equals()/GetHashCode(),
5044 * - System.IComparable<T>:CompareTo()
5045 * - System.IEquatable<T>:Equals ()
5046 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5050 if (mono_method_check_context_used (cmethod))
5051 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5053 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5054 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5056 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5057 if (fsig->hasthis && fsig->param_count) {
5058 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5059 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5060 ins->dreg = alloc_preg (cfg);
5061 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5062 MONO_ADD_INS (cfg->cbb, ins);
5065 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
5068 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5070 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5071 addr_reg = ins->dreg;
5072 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5074 EMIT_NEW_ICONST (cfg, args [3], 0);
5075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5078 EMIT_NEW_ICONST (cfg, args [3], 0);
5079 EMIT_NEW_ICONST (cfg, args [4], 0);
5081 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5084 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
5085 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5086 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5091 MONO_ADD_INS (cfg->cbb, add);
5093 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5094 MONO_ADD_INS (cfg->cbb, ins);
5095 /* ins represents the call result */
5098 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5101 *ref_emit_widen = emit_widen;
5110 mono_emit_load_got_addr (MonoCompile *cfg)
5112 MonoInst *getaddr, *dummy_use;
5114 if (!cfg->got_var || cfg->got_var_allocated)
5117 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5118 getaddr->cil_code = cfg->header->code;
5119 getaddr->dreg = cfg->got_var->dreg;
5121 /* Add it to the start of the first bblock */
5122 if (cfg->bb_entry->code) {
5123 getaddr->next = cfg->bb_entry->code;
5124 cfg->bb_entry->code = getaddr;
5127 MONO_ADD_INS (cfg->bb_entry, getaddr);
5129 cfg->got_var_allocated = TRUE;
5132 * Add a dummy use to keep the got_var alive, since real uses might
5133 * only be generated by the back ends.
5134 * Add it to end_bblock, so the variable's lifetime covers the whole
5136 * It would be better to make the usage of the got var explicit in all
5137 * cases when the backend needs it (i.e. calls, throw etc.), so this
5138 * wouldn't be needed.
5140 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5141 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5144 static int inline_limit;
5145 static gboolean inline_limit_inited;
5148 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5150 MonoMethodHeaderSummary header;
5152 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5153 MonoMethodSignature *sig = mono_method_signature (method);
5157 if (cfg->disable_inline)
5159 if (cfg->generic_sharing_context)
5162 if (cfg->inline_depth > 10)
5165 #ifdef MONO_ARCH_HAVE_LMF_OPS
5166 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5167 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5168 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5173 if (!mono_method_get_header_summary (method, &header))
5176 /*runtime, icall and pinvoke are checked by summary call*/
5177 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5178 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5179 (mono_class_is_marshalbyref (method->klass)) ||
5183 /* also consider num_locals? */
5184 /* Do the size check early to avoid creating vtables */
5185 if (!inline_limit_inited) {
5186 if (g_getenv ("MONO_INLINELIMIT"))
5187 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5189 inline_limit = INLINE_LENGTH_LIMIT;
5190 inline_limit_inited = TRUE;
5192 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5196 * if we can initialize the class of the method right away, we do,
5197 * otherwise we don't allow inlining if the class needs initialization,
5198 * since it would mean inserting a call to mono_runtime_class_init()
5199 * inside the inlined code
5201 if (!(cfg->opt & MONO_OPT_SHARED)) {
5202 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5203 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5204 vtable = mono_class_vtable (cfg->domain, method->klass);
5207 if (!cfg->compile_aot)
5208 mono_runtime_class_init (vtable);
5209 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5210 if (cfg->run_cctors && method->klass->has_cctor) {
5211 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5212 if (!method->klass->runtime_info)
5213 /* No vtable created yet */
5215 vtable = mono_class_vtable (cfg->domain, method->klass);
5218 /* This makes so that inline cannot trigger */
5219 /* .cctors: too many apps depend on them */
5220 /* running with a specific order... */
5221 if (! vtable->initialized)
5223 mono_runtime_class_init (vtable);
5225 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5226 if (!method->klass->runtime_info)
5227 /* No vtable created yet */
5229 vtable = mono_class_vtable (cfg->domain, method->klass);
5232 if (!vtable->initialized)
5237 * If we're compiling for shared code
5238 * the cctor will need to be run at aot method load time, for example,
5239 * or at the end of the compilation of the inlining method.
5241 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5245 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5246 if (mono_arch_is_soft_float ()) {
5248 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5250 for (i = 0; i < sig->param_count; ++i)
5251 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5256 if (g_list_find (cfg->dont_inline, method))
5263 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5265 if (!cfg->compile_aot) {
5267 if (vtable->initialized)
5271 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5272 if (cfg->method == method)
5276 if (!mono_class_needs_cctor_run (klass, method))
5279 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5280 /* The initialization is already done before the method is called */
5287 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5291 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5294 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5297 mono_class_init (klass);
5298 size = mono_class_array_element_size (klass);
5301 mult_reg = alloc_preg (cfg);
5302 array_reg = arr->dreg;
5303 index_reg = index->dreg;
5305 #if SIZEOF_REGISTER == 8
5306 /* The array reg is 64 bits but the index reg is only 32 */
5307 if (COMPILE_LLVM (cfg)) {
5309 index2_reg = index_reg;
5311 index2_reg = alloc_preg (cfg);
5312 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5315 if (index->type == STACK_I8) {
5316 index2_reg = alloc_preg (cfg);
5317 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5319 index2_reg = index_reg;
5324 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5326 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5327 if (size == 1 || size == 2 || size == 4 || size == 8) {
5328 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5330 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5331 ins->klass = mono_class_get_element_class (klass);
5332 ins->type = STACK_MP;
5338 add_reg = alloc_ireg_mp (cfg);
5341 MonoInst *rgctx_ins;
5344 g_assert (cfg->generic_sharing_context);
5345 context_used = mini_class_check_context_used (cfg, klass);
5346 g_assert (context_used);
5347 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5348 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5352 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5353 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5354 ins->klass = mono_class_get_element_class (klass);
5355 ins->type = STACK_MP;
5356 MONO_ADD_INS (cfg->cbb, ins);
5361 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5363 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5365 int bounds_reg = alloc_preg (cfg);
5366 int add_reg = alloc_ireg_mp (cfg);
5367 int mult_reg = alloc_preg (cfg);
5368 int mult2_reg = alloc_preg (cfg);
5369 int low1_reg = alloc_preg (cfg);
5370 int low2_reg = alloc_preg (cfg);
5371 int high1_reg = alloc_preg (cfg);
5372 int high2_reg = alloc_preg (cfg);
5373 int realidx1_reg = alloc_preg (cfg);
5374 int realidx2_reg = alloc_preg (cfg);
5375 int sum_reg = alloc_preg (cfg);
5376 int index1, index2, tmpreg;
5380 mono_class_init (klass);
5381 size = mono_class_array_element_size (klass);
5383 index1 = index_ins1->dreg;
5384 index2 = index_ins2->dreg;
5386 #if SIZEOF_REGISTER == 8
5387 /* The array reg is 64 bits but the index reg is only 32 */
5388 if (COMPILE_LLVM (cfg)) {
5391 tmpreg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5394 tmpreg = alloc_preg (cfg);
5395 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5399 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5403 /* range checking */
5404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5405 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5408 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5409 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5411 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5412 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5413 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5416 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5417 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5418 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5419 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5420 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5421 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5423 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5424 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5426 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5427 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5429 ins->type = STACK_MP;
5431 MONO_ADD_INS (cfg->cbb, ins);
5438 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5442 MonoMethod *addr_method;
5444 MonoClass *eclass = cmethod->klass->element_class;
5446 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5449 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5451 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5452 /* emit_ldelema_2 depends on OP_LMUL */
5453 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5454 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5458 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5461 element_size = mono_class_array_element_size (eclass);
5462 addr_method = mono_marshal_get_array_address (rank, element_size);
5463 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5468 static MonoBreakPolicy
5469 always_insert_breakpoint (MonoMethod *method)
5471 return MONO_BREAK_POLICY_ALWAYS;
5474 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5477 * mono_set_break_policy:
5478 * policy_callback: the new callback function
5480 * Allow embedders to decide wherther to actually obey breakpoint instructions
5481 * (both break IL instructions and Debugger.Break () method calls), for example
5482 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5483 * untrusted or semi-trusted code.
5485 * @policy_callback will be called every time a break point instruction needs to
5486 * be inserted with the method argument being the method that calls Debugger.Break()
5487 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5488 * if it wants the breakpoint to not be effective in the given method.
5489 * #MONO_BREAK_POLICY_ALWAYS is the default.
5492 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5494 if (policy_callback)
5495 break_policy_func = policy_callback;
5497 break_policy_func = always_insert_breakpoint;
5501 should_insert_brekpoint (MonoMethod *method) {
5502 switch (break_policy_func (method)) {
5503 case MONO_BREAK_POLICY_ALWAYS:
5505 case MONO_BREAK_POLICY_NEVER:
5507 case MONO_BREAK_POLICY_ON_DBG:
5508 g_warning ("mdb no longer supported");
5511 g_warning ("Incorrect value returned from break policy callback");
5516 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5518 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5520 MonoInst *addr, *store, *load;
5521 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5523 /* the bounds check is already done by the callers */
5524 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5526 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5527 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5528 if (mini_type_is_reference (cfg, fsig->params [2]))
5529 emit_write_barrier (cfg, addr, load);
5531 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5532 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5539 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5541 return mini_type_is_reference (cfg, &klass->byval_arg);
5545 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5547 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5548 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5549 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5550 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5551 MonoInst *iargs [3];
5554 mono_class_setup_vtable (obj_array);
5555 g_assert (helper->slot);
5557 if (sp [0]->type != STACK_OBJ)
5559 if (sp [2]->type != STACK_OBJ)
5566 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5570 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5573 // FIXME-VT: OP_ICONST optimization
5574 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5575 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5576 ins->opcode = OP_STOREV_MEMBASE;
5577 } else if (sp [1]->opcode == OP_ICONST) {
5578 int array_reg = sp [0]->dreg;
5579 int index_reg = sp [1]->dreg;
5580 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5583 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5584 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5586 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5587 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5588 if (generic_class_is_reference_type (cfg, klass))
5589 emit_write_barrier (cfg, addr, sp [2]);
5596 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5601 eklass = mono_class_from_mono_type (fsig->params [2]);
5603 eklass = mono_class_from_mono_type (fsig->ret);
5606 return emit_array_store (cfg, eklass, args, FALSE);
5608 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5615 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5619 param_klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, ¶m_klass->byval_arg));
5621 //Only allow for valuetypes
5622 if (!param_klass->valuetype || !return_klass->valuetype)
5626 if (param_klass->has_references || return_klass->has_references)
5629 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5630 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5631 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5634 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5635 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5638 //And have the same size
5639 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5645 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5647 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5648 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5650 //Valuetypes that are semantically equivalent
5651 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5654 //Arrays of valuetypes that are semantically equivalent
5655 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5662 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5664 #ifdef MONO_ARCH_SIMD_INTRINSICS
5665 MonoInst *ins = NULL;
5667 if (cfg->opt & MONO_OPT_SIMD) {
5668 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5674 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5678 emit_memory_barrier (MonoCompile *cfg, int kind)
5680 MonoInst *ins = NULL;
5681 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5682 MONO_ADD_INS (cfg->cbb, ins);
5683 ins->backend.memory_barrier_kind = kind;
5689 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5691 MonoInst *ins = NULL;
5694 /* The LLVM backend supports these intrinsics */
5695 if (cmethod->klass == mono_defaults.math_class) {
5696 if (strcmp (cmethod->name, "Sin") == 0) {
5698 } else if (strcmp (cmethod->name, "Cos") == 0) {
5700 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5702 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5706 if (opcode && fsig->param_count == 1) {
5707 MONO_INST_NEW (cfg, ins, opcode);
5708 ins->type = STACK_R8;
5709 ins->dreg = mono_alloc_freg (cfg);
5710 ins->sreg1 = args [0]->dreg;
5711 MONO_ADD_INS (cfg->cbb, ins);
5715 if (cfg->opt & MONO_OPT_CMOV) {
5716 if (strcmp (cmethod->name, "Min") == 0) {
5717 if (fsig->params [0]->type == MONO_TYPE_I4)
5719 if (fsig->params [0]->type == MONO_TYPE_U4)
5720 opcode = OP_IMIN_UN;
5721 else if (fsig->params [0]->type == MONO_TYPE_I8)
5723 else if (fsig->params [0]->type == MONO_TYPE_U8)
5724 opcode = OP_LMIN_UN;
5725 } else if (strcmp (cmethod->name, "Max") == 0) {
5726 if (fsig->params [0]->type == MONO_TYPE_I4)
5728 if (fsig->params [0]->type == MONO_TYPE_U4)
5729 opcode = OP_IMAX_UN;
5730 else if (fsig->params [0]->type == MONO_TYPE_I8)
5732 else if (fsig->params [0]->type == MONO_TYPE_U8)
5733 opcode = OP_LMAX_UN;
5737 if (opcode && fsig->param_count == 2) {
5738 MONO_INST_NEW (cfg, ins, opcode);
5739 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5740 ins->dreg = mono_alloc_ireg (cfg);
5741 ins->sreg1 = args [0]->dreg;
5742 ins->sreg2 = args [1]->dreg;
5743 MONO_ADD_INS (cfg->cbb, ins);
5751 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5753 if (cmethod->klass == mono_defaults.array_class) {
5754 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5755 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5756 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5757 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5758 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5759 return emit_array_unsafe_mov (cfg, fsig, args);
5766 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5768 MonoInst *ins = NULL;
5770 static MonoClass *runtime_helpers_class = NULL;
5771 if (! runtime_helpers_class)
5772 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5773 "System.Runtime.CompilerServices", "RuntimeHelpers");
5775 if (cmethod->klass == mono_defaults.string_class) {
5776 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5777 int dreg = alloc_ireg (cfg);
5778 int index_reg = alloc_preg (cfg);
5779 int add_reg = alloc_preg (cfg);
5781 #if SIZEOF_REGISTER == 8
5782 /* The array reg is 64 bits but the index reg is only 32 */
5783 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5785 index_reg = args [1]->dreg;
5787 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5789 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5790 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5791 add_reg = ins->dreg;
5792 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5795 int mult_reg = alloc_preg (cfg);
5796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5797 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5798 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5799 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5801 type_from_op (cfg, ins, NULL, NULL);
5803 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5804 int dreg = alloc_ireg (cfg);
5805 /* Decompose later to allow more optimizations */
5806 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5807 ins->type = STACK_I4;
5808 ins->flags |= MONO_INST_FAULT;
5809 cfg->cbb->has_array_access = TRUE;
5810 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5815 } else if (cmethod->klass == mono_defaults.object_class) {
5817 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5818 int dreg = alloc_ireg_ref (cfg);
5819 int vt_reg = alloc_preg (cfg);
5820 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5821 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5822 type_from_op (cfg, ins, NULL, NULL);
5825 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5826 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5827 int dreg = alloc_ireg (cfg);
5828 int t1 = alloc_ireg (cfg);
5830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5831 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5832 ins->type = STACK_I4;
5836 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5837 MONO_INST_NEW (cfg, ins, OP_NOP);
5838 MONO_ADD_INS (cfg->cbb, ins);
5842 } else if (cmethod->klass == mono_defaults.array_class) {
5843 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5844 return emit_array_generic_access (cfg, fsig, args, FALSE);
5845 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5846 return emit_array_generic_access (cfg, fsig, args, TRUE);
5848 #ifndef MONO_BIG_ARRAYS
5850 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5853 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5854 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5855 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5856 int dreg = alloc_ireg (cfg);
5857 int bounds_reg = alloc_ireg_mp (cfg);
5858 MonoBasicBlock *end_bb, *szarray_bb;
5859 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5861 NEW_BBLOCK (cfg, end_bb);
5862 NEW_BBLOCK (cfg, szarray_bb);
5864 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5865 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5868 /* Non-szarray case */
5870 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5871 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5873 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5874 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5876 MONO_START_BB (cfg, szarray_bb);
5879 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5880 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5882 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5883 MONO_START_BB (cfg, end_bb);
5885 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5886 ins->type = STACK_I4;
5892 if (cmethod->name [0] != 'g')
5895 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5896 int dreg = alloc_ireg (cfg);
5897 int vtable_reg = alloc_preg (cfg);
5898 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5899 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5900 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5901 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5902 type_from_op (cfg, ins, NULL, NULL);
5905 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5906 int dreg = alloc_ireg (cfg);
5908 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5909 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5910 type_from_op (cfg, ins, NULL, NULL);
5915 } else if (cmethod->klass == runtime_helpers_class) {
5917 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5918 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5922 } else if (cmethod->klass == mono_defaults.thread_class) {
5923 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5924 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5925 MONO_ADD_INS (cfg->cbb, ins);
5927 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5928 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5929 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5931 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5933 if (fsig->params [0]->type == MONO_TYPE_I1)
5934 opcode = OP_LOADI1_MEMBASE;
5935 else if (fsig->params [0]->type == MONO_TYPE_U1)
5936 opcode = OP_LOADU1_MEMBASE;
5937 else if (fsig->params [0]->type == MONO_TYPE_I2)
5938 opcode = OP_LOADI2_MEMBASE;
5939 else if (fsig->params [0]->type == MONO_TYPE_U2)
5940 opcode = OP_LOADU2_MEMBASE;
5941 else if (fsig->params [0]->type == MONO_TYPE_I4)
5942 opcode = OP_LOADI4_MEMBASE;
5943 else if (fsig->params [0]->type == MONO_TYPE_U4)
5944 opcode = OP_LOADU4_MEMBASE;
5945 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5946 opcode = OP_LOADI8_MEMBASE;
5947 else if (fsig->params [0]->type == MONO_TYPE_R4)
5948 opcode = OP_LOADR4_MEMBASE;
5949 else if (fsig->params [0]->type == MONO_TYPE_R8)
5950 opcode = OP_LOADR8_MEMBASE;
5951 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5952 opcode = OP_LOAD_MEMBASE;
5955 MONO_INST_NEW (cfg, ins, opcode);
5956 ins->inst_basereg = args [0]->dreg;
5957 ins->inst_offset = 0;
5958 MONO_ADD_INS (cfg->cbb, ins);
5960 switch (fsig->params [0]->type) {
5967 ins->dreg = mono_alloc_ireg (cfg);
5968 ins->type = STACK_I4;
5972 ins->dreg = mono_alloc_lreg (cfg);
5973 ins->type = STACK_I8;
5977 ins->dreg = mono_alloc_ireg (cfg);
5978 #if SIZEOF_REGISTER == 8
5979 ins->type = STACK_I8;
5981 ins->type = STACK_I4;
5986 ins->dreg = mono_alloc_freg (cfg);
5987 ins->type = STACK_R8;
5990 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5991 ins->dreg = mono_alloc_ireg_ref (cfg);
5992 ins->type = STACK_OBJ;
5996 if (opcode == OP_LOADI8_MEMBASE)
5997 ins = mono_decompose_opcode (cfg, ins);
5999 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6003 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6005 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6007 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6008 opcode = OP_STOREI1_MEMBASE_REG;
6009 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6010 opcode = OP_STOREI2_MEMBASE_REG;
6011 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6012 opcode = OP_STOREI4_MEMBASE_REG;
6013 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6014 opcode = OP_STOREI8_MEMBASE_REG;
6015 else if (fsig->params [0]->type == MONO_TYPE_R4)
6016 opcode = OP_STORER4_MEMBASE_REG;
6017 else if (fsig->params [0]->type == MONO_TYPE_R8)
6018 opcode = OP_STORER8_MEMBASE_REG;
6019 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6020 opcode = OP_STORE_MEMBASE_REG;
6023 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6025 MONO_INST_NEW (cfg, ins, opcode);
6026 ins->sreg1 = args [1]->dreg;
6027 ins->inst_destbasereg = args [0]->dreg;
6028 ins->inst_offset = 0;
6029 MONO_ADD_INS (cfg->cbb, ins);
6031 if (opcode == OP_STOREI8_MEMBASE_REG)
6032 ins = mono_decompose_opcode (cfg, ins);
6037 } else if (cmethod->klass == mono_defaults.monitor_class) {
6038 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
6039 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
6042 if (COMPILE_LLVM (cfg)) {
6044 * Pass the argument normally, the LLVM backend will handle the
6045 * calling convention problems.
6047 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6049 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
6050 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6051 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6052 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6055 return (MonoInst*)call;
6056 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
6057 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
6060 if (COMPILE_LLVM (cfg)) {
6062 * Pass the argument normally, the LLVM backend will handle the
6063 * calling convention problems.
6065 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
6067 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
6068 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6069 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6070 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6073 return (MonoInst*)call;
6075 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6078 if (COMPILE_LLVM (cfg)) {
6079 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6081 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6082 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6083 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6084 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6087 return (MonoInst*)call;
6090 } else if (cmethod->klass->image == mono_defaults.corlib &&
6091 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6092 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6095 #if SIZEOF_REGISTER == 8
6096 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6097 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6098 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6099 ins->dreg = mono_alloc_preg (cfg);
6100 ins->sreg1 = args [0]->dreg;
6101 ins->type = STACK_I8;
6102 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6103 MONO_ADD_INS (cfg->cbb, ins);
6107 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6109 /* 64 bit reads are already atomic */
6110 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6111 load_ins->dreg = mono_alloc_preg (cfg);
6112 load_ins->inst_basereg = args [0]->dreg;
6113 load_ins->inst_offset = 0;
6114 load_ins->type = STACK_I8;
6115 MONO_ADD_INS (cfg->cbb, load_ins);
6117 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6124 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6125 MonoInst *ins_iconst;
6128 if (fsig->params [0]->type == MONO_TYPE_I4) {
6129 opcode = OP_ATOMIC_ADD_I4;
6130 cfg->has_atomic_add_i4 = TRUE;
6132 #if SIZEOF_REGISTER == 8
6133 else if (fsig->params [0]->type == MONO_TYPE_I8)
6134 opcode = OP_ATOMIC_ADD_I8;
6137 if (!mono_arch_opcode_supported (opcode))
6139 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6140 ins_iconst->inst_c0 = 1;
6141 ins_iconst->dreg = mono_alloc_ireg (cfg);
6142 MONO_ADD_INS (cfg->cbb, ins_iconst);
6144 MONO_INST_NEW (cfg, ins, opcode);
6145 ins->dreg = mono_alloc_ireg (cfg);
6146 ins->inst_basereg = args [0]->dreg;
6147 ins->inst_offset = 0;
6148 ins->sreg2 = ins_iconst->dreg;
6149 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6150 MONO_ADD_INS (cfg->cbb, ins);
6152 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6153 MonoInst *ins_iconst;
6156 if (fsig->params [0]->type == MONO_TYPE_I4) {
6157 opcode = OP_ATOMIC_ADD_I4;
6158 cfg->has_atomic_add_i4 = TRUE;
6160 #if SIZEOF_REGISTER == 8
6161 else if (fsig->params [0]->type == MONO_TYPE_I8)
6162 opcode = OP_ATOMIC_ADD_I8;
6165 if (!mono_arch_opcode_supported (opcode))
6167 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6168 ins_iconst->inst_c0 = -1;
6169 ins_iconst->dreg = mono_alloc_ireg (cfg);
6170 MONO_ADD_INS (cfg->cbb, ins_iconst);
6172 MONO_INST_NEW (cfg, ins, opcode);
6173 ins->dreg = mono_alloc_ireg (cfg);
6174 ins->inst_basereg = args [0]->dreg;
6175 ins->inst_offset = 0;
6176 ins->sreg2 = ins_iconst->dreg;
6177 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6178 MONO_ADD_INS (cfg->cbb, ins);
6180 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6183 if (fsig->params [0]->type == MONO_TYPE_I4) {
6184 opcode = OP_ATOMIC_ADD_I4;
6185 cfg->has_atomic_add_i4 = TRUE;
6187 #if SIZEOF_REGISTER == 8
6188 else if (fsig->params [0]->type == MONO_TYPE_I8)
6189 opcode = OP_ATOMIC_ADD_I8;
6192 if (!mono_arch_opcode_supported (opcode))
6194 MONO_INST_NEW (cfg, ins, opcode);
6195 ins->dreg = mono_alloc_ireg (cfg);
6196 ins->inst_basereg = args [0]->dreg;
6197 ins->inst_offset = 0;
6198 ins->sreg2 = args [1]->dreg;
6199 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6200 MONO_ADD_INS (cfg->cbb, ins);
6203 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6204 MonoInst *f2i = NULL, *i2f;
6205 guint32 opcode, f2i_opcode, i2f_opcode;
6206 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6207 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6209 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6210 fsig->params [0]->type == MONO_TYPE_R4) {
6211 opcode = OP_ATOMIC_EXCHANGE_I4;
6212 f2i_opcode = OP_MOVE_F_TO_I4;
6213 i2f_opcode = OP_MOVE_I4_TO_F;
6214 cfg->has_atomic_exchange_i4 = TRUE;
6216 #if SIZEOF_REGISTER == 8
6218 fsig->params [0]->type == MONO_TYPE_I8 ||
6219 fsig->params [0]->type == MONO_TYPE_R8 ||
6220 fsig->params [0]->type == MONO_TYPE_I) {
6221 opcode = OP_ATOMIC_EXCHANGE_I8;
6222 f2i_opcode = OP_MOVE_F_TO_I8;
6223 i2f_opcode = OP_MOVE_I8_TO_F;
6226 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6227 opcode = OP_ATOMIC_EXCHANGE_I4;
6228 cfg->has_atomic_exchange_i4 = TRUE;
6234 if (!mono_arch_opcode_supported (opcode))
6238 /* TODO: Decompose these opcodes instead of bailing here. */
6239 if (COMPILE_SOFT_FLOAT (cfg))
6242 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6243 f2i->dreg = mono_alloc_ireg (cfg);
6244 f2i->sreg1 = args [1]->dreg;
6245 if (f2i_opcode == OP_MOVE_F_TO_I4)
6246 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6247 MONO_ADD_INS (cfg->cbb, f2i);
6250 MONO_INST_NEW (cfg, ins, opcode);
6251 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6252 ins->inst_basereg = args [0]->dreg;
6253 ins->inst_offset = 0;
6254 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6255 MONO_ADD_INS (cfg->cbb, ins);
6257 switch (fsig->params [0]->type) {
6259 ins->type = STACK_I4;
6262 ins->type = STACK_I8;
6265 #if SIZEOF_REGISTER == 8
6266 ins->type = STACK_I8;
6268 ins->type = STACK_I4;
6273 ins->type = STACK_R8;
6276 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6277 ins->type = STACK_OBJ;
6282 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6283 i2f->dreg = mono_alloc_freg (cfg);
6284 i2f->sreg1 = ins->dreg;
6285 i2f->type = STACK_R8;
6286 if (i2f_opcode == OP_MOVE_I4_TO_F)
6287 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6288 MONO_ADD_INS (cfg->cbb, i2f);
6293 if (cfg->gen_write_barriers && is_ref)
6294 emit_write_barrier (cfg, args [0], args [1]);
6296 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6297 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6298 guint32 opcode, f2i_opcode, i2f_opcode;
6299 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6300 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6302 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6303 fsig->params [1]->type == MONO_TYPE_R4) {
6304 opcode = OP_ATOMIC_CAS_I4;
6305 f2i_opcode = OP_MOVE_F_TO_I4;
6306 i2f_opcode = OP_MOVE_I4_TO_F;
6307 cfg->has_atomic_cas_i4 = TRUE;
6309 #if SIZEOF_REGISTER == 8
6311 fsig->params [1]->type == MONO_TYPE_I8 ||
6312 fsig->params [1]->type == MONO_TYPE_R8 ||
6313 fsig->params [1]->type == MONO_TYPE_I) {
6314 opcode = OP_ATOMIC_CAS_I8;
6315 f2i_opcode = OP_MOVE_F_TO_I8;
6316 i2f_opcode = OP_MOVE_I8_TO_F;
6319 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6320 opcode = OP_ATOMIC_CAS_I4;
6321 cfg->has_atomic_cas_i4 = TRUE;
6327 if (!mono_arch_opcode_supported (opcode))
6331 /* TODO: Decompose these opcodes instead of bailing here. */
6332 if (COMPILE_SOFT_FLOAT (cfg))
6335 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6336 f2i_new->dreg = mono_alloc_ireg (cfg);
6337 f2i_new->sreg1 = args [1]->dreg;
6338 if (f2i_opcode == OP_MOVE_F_TO_I4)
6339 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6340 MONO_ADD_INS (cfg->cbb, f2i_new);
6342 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6343 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6344 f2i_cmp->sreg1 = args [2]->dreg;
6345 if (f2i_opcode == OP_MOVE_F_TO_I4)
6346 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6347 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6350 MONO_INST_NEW (cfg, ins, opcode);
6351 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6352 ins->sreg1 = args [0]->dreg;
6353 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6354 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6355 MONO_ADD_INS (cfg->cbb, ins);
6357 switch (fsig->params [1]->type) {
6359 ins->type = STACK_I4;
6362 ins->type = STACK_I8;
6365 #if SIZEOF_REGISTER == 8
6366 ins->type = STACK_I8;
6368 ins->type = STACK_I4;
6372 ins->type = cfg->r4_stack_type;
6375 ins->type = STACK_R8;
6378 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6379 ins->type = STACK_OBJ;
6384 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6385 i2f->dreg = mono_alloc_freg (cfg);
6386 i2f->sreg1 = ins->dreg;
6387 i2f->type = STACK_R8;
6388 if (i2f_opcode == OP_MOVE_I4_TO_F)
6389 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6390 MONO_ADD_INS (cfg->cbb, i2f);
6395 if (cfg->gen_write_barriers && is_ref)
6396 emit_write_barrier (cfg, args [0], args [1]);
6398 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6399 fsig->params [1]->type == MONO_TYPE_I4) {
6400 MonoInst *cmp, *ceq;
6402 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6405 /* int32 r = CAS (location, value, comparand); */
6406 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6407 ins->dreg = alloc_ireg (cfg);
6408 ins->sreg1 = args [0]->dreg;
6409 ins->sreg2 = args [1]->dreg;
6410 ins->sreg3 = args [2]->dreg;
6411 ins->type = STACK_I4;
6412 MONO_ADD_INS (cfg->cbb, ins);
6414 /* bool result = r == comparand; */
6415 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6416 cmp->sreg1 = ins->dreg;
6417 cmp->sreg2 = args [2]->dreg;
6418 cmp->type = STACK_I4;
6419 MONO_ADD_INS (cfg->cbb, cmp);
6421 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6422 ceq->dreg = alloc_ireg (cfg);
6423 ceq->type = STACK_I4;
6424 MONO_ADD_INS (cfg->cbb, ceq);
6426 /* *success = result; */
6427 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6429 cfg->has_atomic_cas_i4 = TRUE;
6431 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6432 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6436 } else if (cmethod->klass->image == mono_defaults.corlib &&
6437 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6438 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6441 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6443 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6444 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6446 if (fsig->params [0]->type == MONO_TYPE_I1)
6447 opcode = OP_ATOMIC_LOAD_I1;
6448 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6449 opcode = OP_ATOMIC_LOAD_U1;
6450 else if (fsig->params [0]->type == MONO_TYPE_I2)
6451 opcode = OP_ATOMIC_LOAD_I2;
6452 else if (fsig->params [0]->type == MONO_TYPE_U2)
6453 opcode = OP_ATOMIC_LOAD_U2;
6454 else if (fsig->params [0]->type == MONO_TYPE_I4)
6455 opcode = OP_ATOMIC_LOAD_I4;
6456 else if (fsig->params [0]->type == MONO_TYPE_U4)
6457 opcode = OP_ATOMIC_LOAD_U4;
6458 else if (fsig->params [0]->type == MONO_TYPE_R4)
6459 opcode = OP_ATOMIC_LOAD_R4;
6460 else if (fsig->params [0]->type == MONO_TYPE_R8)
6461 opcode = OP_ATOMIC_LOAD_R8;
6462 #if SIZEOF_REGISTER == 8
6463 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6464 opcode = OP_ATOMIC_LOAD_I8;
6465 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6466 opcode = OP_ATOMIC_LOAD_U8;
6468 else if (fsig->params [0]->type == MONO_TYPE_I)
6469 opcode = OP_ATOMIC_LOAD_I4;
6470 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6471 opcode = OP_ATOMIC_LOAD_U4;
6475 if (!mono_arch_opcode_supported (opcode))
6478 MONO_INST_NEW (cfg, ins, opcode);
6479 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6480 ins->sreg1 = args [0]->dreg;
6481 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6482 MONO_ADD_INS (cfg->cbb, ins);
6484 switch (fsig->params [0]->type) {
6485 case MONO_TYPE_BOOLEAN:
6492 ins->type = STACK_I4;
6496 ins->type = STACK_I8;
6500 #if SIZEOF_REGISTER == 8
6501 ins->type = STACK_I8;
6503 ins->type = STACK_I4;
6507 ins->type = cfg->r4_stack_type;
6510 ins->type = STACK_R8;
6513 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6514 ins->type = STACK_OBJ;
6520 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6522 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6524 if (fsig->params [0]->type == MONO_TYPE_I1)
6525 opcode = OP_ATOMIC_STORE_I1;
6526 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6527 opcode = OP_ATOMIC_STORE_U1;
6528 else if (fsig->params [0]->type == MONO_TYPE_I2)
6529 opcode = OP_ATOMIC_STORE_I2;
6530 else if (fsig->params [0]->type == MONO_TYPE_U2)
6531 opcode = OP_ATOMIC_STORE_U2;
6532 else if (fsig->params [0]->type == MONO_TYPE_I4)
6533 opcode = OP_ATOMIC_STORE_I4;
6534 else if (fsig->params [0]->type == MONO_TYPE_U4)
6535 opcode = OP_ATOMIC_STORE_U4;
6536 else if (fsig->params [0]->type == MONO_TYPE_R4)
6537 opcode = OP_ATOMIC_STORE_R4;
6538 else if (fsig->params [0]->type == MONO_TYPE_R8)
6539 opcode = OP_ATOMIC_STORE_R8;
6540 #if SIZEOF_REGISTER == 8
6541 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6542 opcode = OP_ATOMIC_STORE_I8;
6543 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6544 opcode = OP_ATOMIC_STORE_U8;
6546 else if (fsig->params [0]->type == MONO_TYPE_I)
6547 opcode = OP_ATOMIC_STORE_I4;
6548 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6549 opcode = OP_ATOMIC_STORE_U4;
6553 if (!mono_arch_opcode_supported (opcode))
6556 MONO_INST_NEW (cfg, ins, opcode);
6557 ins->dreg = args [0]->dreg;
6558 ins->sreg1 = args [1]->dreg;
6559 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6560 MONO_ADD_INS (cfg->cbb, ins);
6562 if (cfg->gen_write_barriers && is_ref)
6563 emit_write_barrier (cfg, args [0], args [1]);
6569 } else if (cmethod->klass->image == mono_defaults.corlib &&
6570 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6571 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6572 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6573 if (should_insert_brekpoint (cfg->method)) {
6574 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6576 MONO_INST_NEW (cfg, ins, OP_NOP);
6577 MONO_ADD_INS (cfg->cbb, ins);
6581 } else if (cmethod->klass->image == mono_defaults.corlib &&
6582 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6583 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6584 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6586 EMIT_NEW_ICONST (cfg, ins, 1);
6588 EMIT_NEW_ICONST (cfg, ins, 0);
6591 } else if (cmethod->klass == mono_defaults.math_class) {
6593 * There is general branchless code for Min/Max, but it does not work for
6595 * http://everything2.com/?node_id=1051618
6597 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6598 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6599 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6600 !strcmp (cmethod->klass->name, "Selector")) ||
6601 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6602 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6603 !strcmp (cmethod->klass->name, "Selector"))
6605 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6606 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6607 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6610 MonoJumpInfoToken *ji;
6613 cfg->disable_llvm = TRUE;
6615 if (args [0]->opcode == OP_GOT_ENTRY) {
6616 pi = args [0]->inst_p1;
6617 g_assert (pi->opcode == OP_PATCH_INFO);
6618 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6621 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6622 ji = args [0]->inst_p0;
6625 NULLIFY_INS (args [0]);
6628 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6629 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6630 ins->dreg = mono_alloc_ireg (cfg);
6632 ins->inst_p0 = mono_string_to_utf8 (s);
6633 MONO_ADD_INS (cfg->cbb, ins);
6639 #ifdef MONO_ARCH_SIMD_INTRINSICS
6640 if (cfg->opt & MONO_OPT_SIMD) {
6641 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6647 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6651 if (COMPILE_LLVM (cfg)) {
6652 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6657 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6661 * This entry point could be used later for arbitrary method
6664 inline static MonoInst*
6665 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6666 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6668 if (method->klass == mono_defaults.string_class) {
6669 /* managed string allocation support */
6670 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6671 MonoInst *iargs [2];
6672 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6673 MonoMethod *managed_alloc = NULL;
6675 g_assert (vtable); /*Should not fail since it System.String*/
6676 #ifndef MONO_CROSS_COMPILE
6677 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6681 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6682 iargs [1] = args [0];
6683 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6690 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6692 MonoInst *store, *temp;
6695 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6696 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6699 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6700 * would be different than the MonoInst's used to represent arguments, and
6701 * the ldelema implementation can't deal with that.
6702 * Solution: When ldelema is used on an inline argument, create a var for
6703 * it, emit ldelema on that var, and emit the saving code below in
6704 * inline_method () if needed.
6706 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6707 cfg->args [i] = temp;
6708 /* This uses cfg->args [i] which is set by the preceeding line */
6709 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6710 store->cil_code = sp [0]->cil_code;
6715 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6716 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6718 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6720 check_inline_called_method_name_limit (MonoMethod *called_method)
6723 static const char *limit = NULL;
6725 if (limit == NULL) {
6726 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6728 if (limit_string != NULL)
6729 limit = limit_string;
6734 if (limit [0] != '\0') {
6735 char *called_method_name = mono_method_full_name (called_method, TRUE);
6737 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6738 g_free (called_method_name);
6740 //return (strncmp_result <= 0);
6741 return (strncmp_result == 0);
6748 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6750 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6753 static const char *limit = NULL;
6755 if (limit == NULL) {
6756 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6757 if (limit_string != NULL) {
6758 limit = limit_string;
6764 if (limit [0] != '\0') {
6765 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6767 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6768 g_free (caller_method_name);
6770 //return (strncmp_result <= 0);
6771 return (strncmp_result == 0);
6779 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6781 static double r8_0 = 0.0;
6782 static float r4_0 = 0.0;
6786 rtype = mini_get_underlying_type (cfg, rtype);
6790 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6791 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6792 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6793 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6794 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6795 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6796 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6797 ins->type = STACK_R4;
6798 ins->inst_p0 = (void*)&r4_0;
6800 MONO_ADD_INS (cfg->cbb, ins);
6801 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6802 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6803 ins->type = STACK_R8;
6804 ins->inst_p0 = (void*)&r8_0;
6806 MONO_ADD_INS (cfg->cbb, ins);
6807 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6808 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6809 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6810 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6811 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6813 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6818 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6822 rtype = mini_get_underlying_type (cfg, rtype);
6826 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6827 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6828 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6829 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6830 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6831 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6832 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6833 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6834 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6835 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6836 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6837 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6838 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6839 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6841 emit_init_rvar (cfg, dreg, rtype);
6845 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6847 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6849 MonoInst *var = cfg->locals [local];
6850 if (COMPILE_SOFT_FLOAT (cfg)) {
6852 int reg = alloc_dreg (cfg, var->type);
6853 emit_init_rvar (cfg, reg, type);
6854 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6857 emit_init_rvar (cfg, var->dreg, type);
6859 emit_dummy_init_rvar (cfg, var->dreg, type);
6866 * Return the cost of inlining CMETHOD.
6869 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6870 guchar *ip, guint real_offset, gboolean inline_always)
6872 MonoInst *ins, *rvar = NULL;
6873 MonoMethodHeader *cheader;
6874 MonoBasicBlock *ebblock, *sbblock;
6876 MonoMethod *prev_inlined_method;
6877 MonoInst **prev_locals, **prev_args;
6878 MonoType **prev_arg_types;
6879 guint prev_real_offset;
6880 GHashTable *prev_cbb_hash;
6881 MonoBasicBlock **prev_cil_offset_to_bb;
6882 MonoBasicBlock *prev_cbb;
6883 unsigned char* prev_cil_start;
6884 guint32 prev_cil_offset_to_bb_len;
6885 MonoMethod *prev_current_method;
6886 MonoGenericContext *prev_generic_context;
6887 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6889 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6891 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6892 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6895 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6896 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6901 fsig = mono_method_signature (cmethod);
6903 if (cfg->verbose_level > 2)
6904 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6906 if (!cmethod->inline_info) {
6907 cfg->stat_inlineable_methods++;
6908 cmethod->inline_info = 1;
6911 /* allocate local variables */
6912 cheader = mono_method_get_header (cmethod);
6914 if (cheader == NULL || mono_loader_get_last_error ()) {
6915 MonoLoaderError *error = mono_loader_get_last_error ();
6918 mono_metadata_free_mh (cheader);
6919 if (inline_always && error)
6920 mono_cfg_set_exception (cfg, error->exception_type);
6922 mono_loader_clear_error ();
6926 /*Must verify before creating locals as it can cause the JIT to assert.*/
6927 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6928 mono_metadata_free_mh (cheader);
6932 /* allocate space to store the return value */
6933 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6934 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6937 prev_locals = cfg->locals;
6938 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6939 for (i = 0; i < cheader->num_locals; ++i)
6940 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6942 /* allocate start and end blocks */
6943 /* This is needed so if the inline is aborted, we can clean up */
6944 NEW_BBLOCK (cfg, sbblock);
6945 sbblock->real_offset = real_offset;
6947 NEW_BBLOCK (cfg, ebblock);
6948 ebblock->block_num = cfg->num_bblocks++;
6949 ebblock->real_offset = real_offset;
6951 prev_args = cfg->args;
6952 prev_arg_types = cfg->arg_types;
6953 prev_inlined_method = cfg->inlined_method;
6954 cfg->inlined_method = cmethod;
6955 cfg->ret_var_set = FALSE;
6956 cfg->inline_depth ++;
6957 prev_real_offset = cfg->real_offset;
6958 prev_cbb_hash = cfg->cbb_hash;
6959 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6960 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6961 prev_cil_start = cfg->cil_start;
6962 prev_cbb = cfg->cbb;
6963 prev_current_method = cfg->current_method;
6964 prev_generic_context = cfg->generic_context;
6965 prev_ret_var_set = cfg->ret_var_set;
6966 prev_disable_inline = cfg->disable_inline;
6968 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6971 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6973 ret_var_set = cfg->ret_var_set;
6975 cfg->inlined_method = prev_inlined_method;
6976 cfg->real_offset = prev_real_offset;
6977 cfg->cbb_hash = prev_cbb_hash;
6978 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6979 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6980 cfg->cil_start = prev_cil_start;
6981 cfg->locals = prev_locals;
6982 cfg->args = prev_args;
6983 cfg->arg_types = prev_arg_types;
6984 cfg->current_method = prev_current_method;
6985 cfg->generic_context = prev_generic_context;
6986 cfg->ret_var_set = prev_ret_var_set;
6987 cfg->disable_inline = prev_disable_inline;
6988 cfg->inline_depth --;
6990 if ((costs >= 0 && costs < 60) || inline_always) {
6991 if (cfg->verbose_level > 2)
6992 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6994 cfg->stat_inlined_methods++;
6996 /* always add some code to avoid block split failures */
6997 MONO_INST_NEW (cfg, ins, OP_NOP);
6998 MONO_ADD_INS (prev_cbb, ins);
7000 prev_cbb->next_bb = sbblock;
7001 link_bblock (cfg, prev_cbb, sbblock);
7004 * Get rid of the begin and end bblocks if possible to aid local
7007 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7009 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7010 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7012 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7013 MonoBasicBlock *prev = ebblock->in_bb [0];
7014 mono_merge_basic_blocks (cfg, prev, ebblock);
7016 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7017 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7018 cfg->cbb = prev_cbb;
7022 * Its possible that the rvar is set in some prev bblock, but not in others.
7028 for (i = 0; i < ebblock->in_count; ++i) {
7029 bb = ebblock->in_bb [i];
7031 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7034 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7044 * If the inlined method contains only a throw, then the ret var is not
7045 * set, so set it to a dummy value.
7048 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7050 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7053 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7056 if (cfg->verbose_level > 2)
7057 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7058 cfg->exception_type = MONO_EXCEPTION_NONE;
7059 mono_loader_clear_error ();
7061 /* This gets rid of the newly added bblocks */
7062 cfg->cbb = prev_cbb;
7064 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7069 * Some of these comments may well be out-of-date.
7070 * Design decisions: we do a single pass over the IL code (and we do bblock
7071 * splitting/merging in the few cases when it's required: a back jump to an IL
7072 * address that was not already seen as bblock starting point).
7073 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7074 * Complex operations are decomposed in simpler ones right away. We need to let the
7075 * arch-specific code peek and poke inside this process somehow (except when the
7076 * optimizations can take advantage of the full semantic info of coarse opcodes).
7077 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7078 * MonoInst->opcode initially is the IL opcode or some simplification of that
7079 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7080 * opcode with value bigger than OP_LAST.
7081 * At this point the IR can be handed over to an interpreter, a dumb code generator
7082 * or to the optimizing code generator that will translate it to SSA form.
7084 * Profiling directed optimizations.
7085 * We may compile by default with few or no optimizations and instrument the code
7086 * or the user may indicate what methods to optimize the most either in a config file
7087 * or through repeated runs where the compiler applies offline the optimizations to
7088 * each method and then decides if it was worth it.
7091 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7092 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7093 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7094 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7095 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7096 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7097 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7098 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7100 /* offset from br.s -> br like opcodes */
7101 #define BIG_BRANCH_OFFSET 13
7104 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7106 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7108 return b == NULL || b == bb;
7112 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7114 unsigned char *ip = start;
7115 unsigned char *target;
7118 MonoBasicBlock *bblock;
7119 const MonoOpcode *opcode;
7122 cli_addr = ip - start;
7123 i = mono_opcode_value ((const guint8 **)&ip, end);
7126 opcode = &mono_opcodes [i];
7127 switch (opcode->argument) {
7128 case MonoInlineNone:
7131 case MonoInlineString:
7132 case MonoInlineType:
7133 case MonoInlineField:
7134 case MonoInlineMethod:
7137 case MonoShortInlineR:
7144 case MonoShortInlineVar:
7145 case MonoShortInlineI:
7148 case MonoShortInlineBrTarget:
7149 target = start + cli_addr + 2 + (signed char)ip [1];
7150 GET_BBLOCK (cfg, bblock, target);
7153 GET_BBLOCK (cfg, bblock, ip);
7155 case MonoInlineBrTarget:
7156 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7157 GET_BBLOCK (cfg, bblock, target);
7160 GET_BBLOCK (cfg, bblock, ip);
7162 case MonoInlineSwitch: {
7163 guint32 n = read32 (ip + 1);
7166 cli_addr += 5 + 4 * n;
7167 target = start + cli_addr;
7168 GET_BBLOCK (cfg, bblock, target);
7170 for (j = 0; j < n; ++j) {
7171 target = start + cli_addr + (gint32)read32 (ip);
7172 GET_BBLOCK (cfg, bblock, target);
7182 g_assert_not_reached ();
7185 if (i == CEE_THROW) {
7186 unsigned char *bb_start = ip - 1;
7188 /* Find the start of the bblock containing the throw */
7190 while ((bb_start >= start) && !bblock) {
7191 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7195 bblock->out_of_line = 1;
7205 static inline MonoMethod *
7206 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7210 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7211 method = mono_method_get_wrapper_data (m, token);
7214 method = mono_class_inflate_generic_method_checked (method, context, &error);
7215 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7218 method = mono_get_method_full (m->klass->image, token, klass, context);
7224 static inline MonoMethod *
7225 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7227 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7229 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7235 static inline MonoClass*
7236 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7241 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7242 klass = mono_method_get_wrapper_data (method, token);
7244 klass = mono_class_inflate_generic_class (klass, context);
7246 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7247 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7250 mono_class_init (klass);
7254 static inline MonoMethodSignature*
7255 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7257 MonoMethodSignature *fsig;
7259 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7262 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7264 fsig = mono_inflate_generic_signature (fsig, context, &error);
7266 g_assert (mono_error_ok (&error));
7269 fsig = mono_metadata_parse_signature (method->klass->image, token);
7275 throw_exception (void)
7277 static MonoMethod *method = NULL;
7280 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7281 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7288 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7290 MonoMethod *thrower = throw_exception ();
7293 EMIT_NEW_PCONST (cfg, args [0], ex);
7294 mono_emit_method_call (cfg, thrower, args, NULL);
7298 * Return the original method is a wrapper is specified. We can only access
7299 * the custom attributes from the original method.
7302 get_original_method (MonoMethod *method)
7304 if (method->wrapper_type == MONO_WRAPPER_NONE)
7307 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7308 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7311 /* in other cases we need to find the original method */
7312 return mono_marshal_method_from_wrapper (method);
7316 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7318 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7319 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7321 emit_throw_exception (cfg, ex);
7325 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7327 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7328 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7330 emit_throw_exception (cfg, ex);
7334 * Check that the IL instructions at ip are the array initialization
7335 * sequence and return the pointer to the data and the size.
7338 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7341 * newarr[System.Int32]
7343 * ldtoken field valuetype ...
7344 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7346 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7348 guint32 token = read32 (ip + 7);
7349 guint32 field_token = read32 (ip + 2);
7350 guint32 field_index = field_token & 0xffffff;
7352 const char *data_ptr;
7354 MonoMethod *cmethod;
7355 MonoClass *dummy_class;
7356 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7360 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7364 *out_field_token = field_token;
7366 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7369 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7371 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7372 case MONO_TYPE_BOOLEAN:
7376 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7377 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7378 case MONO_TYPE_CHAR:
7395 if (size > mono_type_size (field->type, &dummy_align))
7398 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7399 if (!image_is_dynamic (method->klass->image)) {
7400 field_index = read32 (ip + 2) & 0xffffff;
7401 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7402 data_ptr = mono_image_rva_map (method->klass->image, rva);
7403 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7404 /* for aot code we do the lookup on load */
7405 if (aot && data_ptr)
7406 return GUINT_TO_POINTER (rva);
7408 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7410 data_ptr = mono_field_get_data (field);
7418 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7420 char *method_fname = mono_method_full_name (method, TRUE);
7422 MonoMethodHeader *header = mono_method_get_header (method);
7424 if (header->code_size == 0)
7425 method_code = g_strdup ("method body is empty.");
7427 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7429 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7430 g_free (method_fname);
7431 g_free (method_code);
7432 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7436 set_exception_object (MonoCompile *cfg, MonoException *exception)
7438 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7439 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7440 cfg->exception_ptr = exception;
7444 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7447 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7448 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7449 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7450 /* Optimize reg-reg moves away */
7452 * Can't optimize other opcodes, since sp[0] might point to
7453 * the last ins of a decomposed opcode.
7455 sp [0]->dreg = (cfg)->locals [n]->dreg;
7457 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7462 * ldloca inhibits many optimizations so try to get rid of it in common
7465 static inline unsigned char *
7466 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7476 local = read16 (ip + 2);
7480 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7481 /* From the INITOBJ case */
7482 token = read32 (ip + 2);
7483 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7484 CHECK_TYPELOAD (klass);
7485 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7486 emit_init_local (cfg, local, type, TRUE);
7494 is_exception_class (MonoClass *class)
7497 if (class == mono_defaults.exception_class)
7499 class = class->parent;
7505 * is_jit_optimizer_disabled:
7507 * Determine whenever M's assembly has a DebuggableAttribute with the
7508 * IsJITOptimizerDisabled flag set.
7511 is_jit_optimizer_disabled (MonoMethod *m)
7513 MonoAssembly *ass = m->klass->image->assembly;
7514 MonoCustomAttrInfo* attrs;
7515 static MonoClass *klass;
7517 gboolean val = FALSE;
7520 if (ass->jit_optimizer_disabled_inited)
7521 return ass->jit_optimizer_disabled;
7524 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7527 ass->jit_optimizer_disabled = FALSE;
7528 mono_memory_barrier ();
7529 ass->jit_optimizer_disabled_inited = TRUE;
7533 attrs = mono_custom_attrs_from_assembly (ass);
7535 for (i = 0; i < attrs->num_attrs; ++i) {
7536 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7538 MonoMethodSignature *sig;
7540 if (!attr->ctor || attr->ctor->klass != klass)
7542 /* Decode the attribute. See reflection.c */
7543 p = (const char*)attr->data;
7544 g_assert (read16 (p) == 0x0001);
7547 // FIXME: Support named parameters
7548 sig = mono_method_signature (attr->ctor);
7549 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7551 /* Two boolean arguments */
7555 mono_custom_attrs_free (attrs);
7558 ass->jit_optimizer_disabled = val;
7559 mono_memory_barrier ();
7560 ass->jit_optimizer_disabled_inited = TRUE;
7566 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7568 gboolean supported_tail_call;
7571 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7572 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7574 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7577 for (i = 0; i < fsig->param_count; ++i) {
7578 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7579 /* These can point to the current method's stack */
7580 supported_tail_call = FALSE;
7582 if (fsig->hasthis && cmethod->klass->valuetype)
7583 /* this might point to the current method's stack */
7584 supported_tail_call = FALSE;
7585 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7586 supported_tail_call = FALSE;
7587 if (cfg->method->save_lmf)
7588 supported_tail_call = FALSE;
7589 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7590 supported_tail_call = FALSE;
7591 if (call_opcode != CEE_CALL)
7592 supported_tail_call = FALSE;
7594 /* Debugging support */
7596 if (supported_tail_call) {
7597 if (!mono_debug_count ())
7598 supported_tail_call = FALSE;
7602 return supported_tail_call;
7605 /* emits the code needed to access a managed tls var (like ThreadStatic)
7606 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7607 * pointer for the current thread.
7608 * Returns the MonoInst* representing the address of the tls var.
7611 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7614 int static_data_reg, array_reg, dreg;
7615 int offset2_reg, idx_reg;
7616 // inlined access to the tls data (see threads.c)
7617 static_data_reg = alloc_ireg (cfg);
7618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7619 idx_reg = alloc_ireg (cfg);
7620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7622 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7623 array_reg = alloc_ireg (cfg);
7624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7625 offset2_reg = alloc_ireg (cfg);
7626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7628 dreg = alloc_ireg (cfg);
7629 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7636 * Handle calls made to ctors from NEWOBJ opcodes.
7639 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7640 MonoInst **sp, guint8 *ip, int *inline_costs)
7642 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7644 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7645 mono_method_is_generic_sharable (cmethod, TRUE)) {
7646 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7647 mono_class_vtable (cfg->domain, cmethod->klass);
7648 CHECK_TYPELOAD (cmethod->klass);
7650 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7651 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7654 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7655 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7657 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7659 CHECK_TYPELOAD (cmethod->klass);
7660 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7665 /* Avoid virtual calls to ctors if possible */
7666 if (mono_class_is_marshalbyref (cmethod->klass))
7667 callvirt_this_arg = sp [0];
7669 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7670 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7671 CHECK_CFG_EXCEPTION;
7672 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7673 mono_method_check_inlining (cfg, cmethod) &&
7674 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7677 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7678 cfg->real_offset += 5;
7680 *inline_costs += costs - 5;
7682 INLINE_FAILURE ("inline failure");
7683 // FIXME-VT: Clean this up
7684 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7685 GSHAREDVT_FAILURE(*ip);
7686 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7688 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7691 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7692 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7693 } else if (context_used &&
7694 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7695 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7696 MonoInst *cmethod_addr;
7698 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7700 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7701 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7703 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7705 INLINE_FAILURE ("ctor call");
7706 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7707 callvirt_this_arg, NULL, vtable_arg);
7714 * mono_method_to_ir:
7716 * Translate the .net IL into linear IR.
7719 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7720 MonoInst *return_var, MonoInst **inline_args,
7721 guint inline_offset, gboolean is_virtual_call)
7724 MonoInst *ins, **sp, **stack_start;
7725 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7726 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7727 MonoMethod *cmethod, *method_definition;
7728 MonoInst **arg_array;
7729 MonoMethodHeader *header;
7731 guint32 token, ins_flag;
7733 MonoClass *constrained_class = NULL;
7734 unsigned char *ip, *end, *target, *err_pos;
7735 MonoMethodSignature *sig;
7736 MonoGenericContext *generic_context = NULL;
7737 MonoGenericContainer *generic_container = NULL;
7738 MonoType **param_types;
7739 int i, n, start_new_bblock, dreg;
7740 int num_calls = 0, inline_costs = 0;
7741 int breakpoint_id = 0;
7743 GSList *class_inits = NULL;
7744 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7746 gboolean init_locals, seq_points, skip_dead_blocks;
7747 gboolean sym_seq_points = FALSE;
7748 MonoDebugMethodInfo *minfo;
7749 MonoBitSet *seq_point_locs = NULL;
7750 MonoBitSet *seq_point_set_locs = NULL;
7752 cfg->disable_inline = is_jit_optimizer_disabled (method);
7754 /* serialization and xdomain stuff may need access to private fields and methods */
7755 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7756 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7757 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7758 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7759 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7760 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7762 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7763 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7764 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7765 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7766 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7768 image = method->klass->image;
7769 header = mono_method_get_header (method);
7771 MonoLoaderError *error;
7773 if ((error = mono_loader_get_last_error ())) {
7774 mono_cfg_set_exception (cfg, error->exception_type);
7776 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7777 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7779 goto exception_exit;
7781 generic_container = mono_method_get_generic_container (method);
7782 sig = mono_method_signature (method);
7783 num_args = sig->hasthis + sig->param_count;
7784 ip = (unsigned char*)header->code;
7785 cfg->cil_start = ip;
7786 end = ip + header->code_size;
7787 cfg->stat_cil_code_size += header->code_size;
7789 seq_points = cfg->gen_seq_points && cfg->method == method;
7791 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7792 /* We could hit a seq point before attaching to the JIT (#8338) */
7796 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7797 minfo = mono_debug_lookup_method (method);
7799 MonoSymSeqPoint *sps;
7800 int i, n_il_offsets;
7802 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7803 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7804 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7805 sym_seq_points = TRUE;
7806 for (i = 0; i < n_il_offsets; ++i) {
7807 if (sps [i].il_offset < header->code_size)
7808 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7811 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7812 /* Methods without line number info like auto-generated property accessors */
7813 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7814 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7815 sym_seq_points = TRUE;
7820 * Methods without init_locals set could cause asserts in various passes
7821 * (#497220). To work around this, we emit dummy initialization opcodes
7822 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7823 * on some platforms.
7825 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7826 init_locals = header->init_locals;
7830 method_definition = method;
7831 while (method_definition->is_inflated) {
7832 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7833 method_definition = imethod->declaring;
7836 /* SkipVerification is not allowed if core-clr is enabled */
7837 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7839 dont_verify_stloc = TRUE;
7842 if (sig->is_inflated)
7843 generic_context = mono_method_get_context (method);
7844 else if (generic_container)
7845 generic_context = &generic_container->context;
7846 cfg->generic_context = generic_context;
7848 if (!cfg->generic_sharing_context)
7849 g_assert (!sig->has_type_parameters);
7851 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7852 g_assert (method->is_inflated);
7853 g_assert (mono_method_get_context (method)->method_inst);
7855 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7856 g_assert (sig->generic_param_count);
7858 if (cfg->method == method) {
7859 cfg->real_offset = 0;
7861 cfg->real_offset = inline_offset;
7864 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7865 cfg->cil_offset_to_bb_len = header->code_size;
7867 cfg->current_method = method;
7869 if (cfg->verbose_level > 2)
7870 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7872 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7874 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7875 for (n = 0; n < sig->param_count; ++n)
7876 param_types [n + sig->hasthis] = sig->params [n];
7877 cfg->arg_types = param_types;
7879 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7880 if (cfg->method == method) {
7882 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7883 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7886 NEW_BBLOCK (cfg, start_bblock);
7887 cfg->bb_entry = start_bblock;
7888 start_bblock->cil_code = NULL;
7889 start_bblock->cil_length = 0;
7892 NEW_BBLOCK (cfg, end_bblock);
7893 cfg->bb_exit = end_bblock;
7894 end_bblock->cil_code = NULL;
7895 end_bblock->cil_length = 0;
7896 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7897 g_assert (cfg->num_bblocks == 2);
7899 arg_array = cfg->args;
7901 if (header->num_clauses) {
7902 cfg->spvars = g_hash_table_new (NULL, NULL);
7903 cfg->exvars = g_hash_table_new (NULL, NULL);
7905 /* handle exception clauses */
7906 for (i = 0; i < header->num_clauses; ++i) {
7907 MonoBasicBlock *try_bb;
7908 MonoExceptionClause *clause = &header->clauses [i];
7909 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7910 try_bb->real_offset = clause->try_offset;
7911 try_bb->try_start = TRUE;
7912 try_bb->region = ((i + 1) << 8) | clause->flags;
7913 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7914 tblock->real_offset = clause->handler_offset;
7915 tblock->flags |= BB_EXCEPTION_HANDLER;
7918 * Linking the try block with the EH block hinders inlining as we won't be able to
7919 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7921 if (COMPILE_LLVM (cfg))
7922 link_bblock (cfg, try_bb, tblock);
7924 if (*(ip + clause->handler_offset) == CEE_POP)
7925 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7927 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7928 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7929 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7930 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7931 MONO_ADD_INS (tblock, ins);
7933 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7934 /* finally clauses already have a seq point */
7935 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7936 MONO_ADD_INS (tblock, ins);
7939 /* todo: is a fault block unsafe to optimize? */
7940 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7941 tblock->flags |= BB_EXCEPTION_UNSAFE;
7944 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7946 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7948 /* catch and filter blocks get the exception object on the stack */
7949 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7950 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7952 /* mostly like handle_stack_args (), but just sets the input args */
7953 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7954 tblock->in_scount = 1;
7955 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7956 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7960 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7961 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7962 if (!cfg->compile_llvm) {
7963 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7964 ins->dreg = tblock->in_stack [0]->dreg;
7965 MONO_ADD_INS (tblock, ins);
7968 MonoInst *dummy_use;
7971 * Add a dummy use for the exvar so its liveness info will be
7974 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7977 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7978 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7979 tblock->flags |= BB_EXCEPTION_HANDLER;
7980 tblock->real_offset = clause->data.filter_offset;
7981 tblock->in_scount = 1;
7982 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7983 /* The filter block shares the exvar with the handler block */
7984 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7985 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7986 MONO_ADD_INS (tblock, ins);
7990 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7991 clause->data.catch_class &&
7992 cfg->generic_sharing_context &&
7993 mono_class_check_context_used (clause->data.catch_class)) {
7995 * In shared generic code with catch
7996 * clauses containing type variables
7997 * the exception handling code has to
7998 * be able to get to the rgctx.
7999 * Therefore we have to make sure that
8000 * the vtable/mrgctx argument (for
8001 * static or generic methods) or the
8002 * "this" argument (for non-static
8003 * methods) are live.
8005 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8006 mini_method_get_context (method)->method_inst ||
8007 method->klass->valuetype) {
8008 mono_get_vtable_var (cfg);
8010 MonoInst *dummy_use;
8012 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8017 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8018 cfg->cbb = start_bblock;
8019 cfg->args = arg_array;
8020 mono_save_args (cfg, sig, inline_args);
8023 /* FIRST CODE BLOCK */
8024 NEW_BBLOCK (cfg, tblock);
8025 tblock->cil_code = ip;
8029 ADD_BBLOCK (cfg, tblock);
8031 if (cfg->method == method) {
8032 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8033 if (breakpoint_id) {
8034 MONO_INST_NEW (cfg, ins, OP_BREAK);
8035 MONO_ADD_INS (cfg->cbb, ins);
8039 /* we use a separate basic block for the initialization code */
8040 NEW_BBLOCK (cfg, init_localsbb);
8041 cfg->bb_init = init_localsbb;
8042 init_localsbb->real_offset = cfg->real_offset;
8043 start_bblock->next_bb = init_localsbb;
8044 init_localsbb->next_bb = cfg->cbb;
8045 link_bblock (cfg, start_bblock, init_localsbb);
8046 link_bblock (cfg, init_localsbb, cfg->cbb);
8048 cfg->cbb = init_localsbb;
8050 if (cfg->gsharedvt && cfg->method == method) {
8051 MonoGSharedVtMethodInfo *info;
8052 MonoInst *var, *locals_var;
8055 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8056 info->method = cfg->method;
8057 info->count_entries = 16;
8058 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8059 cfg->gsharedvt_info = info;
8061 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8062 /* prevent it from being register allocated */
8063 //var->flags |= MONO_INST_VOLATILE;
8064 cfg->gsharedvt_info_var = var;
8066 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8067 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8069 /* Allocate locals */
8070 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8071 /* prevent it from being register allocated */
8072 //locals_var->flags |= MONO_INST_VOLATILE;
8073 cfg->gsharedvt_locals_var = locals_var;
8075 dreg = alloc_ireg (cfg);
8076 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8078 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8079 ins->dreg = locals_var->dreg;
8081 MONO_ADD_INS (cfg->cbb, ins);
8082 cfg->gsharedvt_locals_var_ins = ins;
8084 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8087 ins->flags |= MONO_INST_INIT;
8091 if (mono_security_core_clr_enabled ()) {
8092 /* check if this is native code, e.g. an icall or a p/invoke */
8093 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8094 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8096 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8097 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8099 /* if this ia a native call then it can only be JITted from platform code */
8100 if ((icall || pinvk) && method->klass && method->klass->image) {
8101 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8102 MonoException *ex = icall ? mono_get_exception_security () :
8103 mono_get_exception_method_access ();
8104 emit_throw_exception (cfg, ex);
8111 CHECK_CFG_EXCEPTION;
8113 if (header->code_size == 0)
8116 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8121 if (cfg->method == method)
8122 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8124 for (n = 0; n < header->num_locals; ++n) {
8125 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8130 /* We force the vtable variable here for all shared methods
8131 for the possibility that they might show up in a stack
8132 trace where their exact instantiation is needed. */
8133 if (cfg->generic_sharing_context && method == cfg->method) {
8134 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8135 mini_method_get_context (method)->method_inst ||
8136 method->klass->valuetype) {
8137 mono_get_vtable_var (cfg);
8139 /* FIXME: Is there a better way to do this?
8140 We need the variable live for the duration
8141 of the whole method. */
8142 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8146 /* add a check for this != NULL to inlined methods */
8147 if (is_virtual_call) {
8150 NEW_ARGLOAD (cfg, arg_ins, 0);
8151 MONO_ADD_INS (cfg->cbb, arg_ins);
8152 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8155 skip_dead_blocks = !dont_verify;
8156 if (skip_dead_blocks) {
8157 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8162 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8163 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8166 start_new_bblock = 0;
8168 if (cfg->method == method)
8169 cfg->real_offset = ip - header->code;
8171 cfg->real_offset = inline_offset;
8176 if (start_new_bblock) {
8177 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8178 if (start_new_bblock == 2) {
8179 g_assert (ip == tblock->cil_code);
8181 GET_BBLOCK (cfg, tblock, ip);
8183 cfg->cbb->next_bb = tblock;
8185 start_new_bblock = 0;
8186 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8187 if (cfg->verbose_level > 3)
8188 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8189 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8193 g_slist_free (class_inits);
8196 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8197 link_bblock (cfg, cfg->cbb, tblock);
8198 if (sp != stack_start) {
8199 handle_stack_args (cfg, stack_start, sp - stack_start);
8201 CHECK_UNVERIFIABLE (cfg);
8203 cfg->cbb->next_bb = tblock;
8205 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8206 if (cfg->verbose_level > 3)
8207 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8208 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8211 g_slist_free (class_inits);
8216 if (skip_dead_blocks) {
8217 int ip_offset = ip - header->code;
8219 if (ip_offset == bb->end)
8223 int op_size = mono_opcode_size (ip, end);
8224 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8226 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8228 if (ip_offset + op_size == bb->end) {
8229 MONO_INST_NEW (cfg, ins, OP_NOP);
8230 MONO_ADD_INS (cfg->cbb, ins);
8231 start_new_bblock = 1;
8239 * Sequence points are points where the debugger can place a breakpoint.
8240 * Currently, we generate these automatically at points where the IL
8243 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8245 * Make methods interruptable at the beginning, and at the targets of
8246 * backward branches.
8247 * Also, do this at the start of every bblock in methods with clauses too,
8248 * to be able to handle instructions with inprecise control flow like
8250 * Backward branches are handled at the end of method-to-ir ().
8252 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8253 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8255 /* Avoid sequence points on empty IL like .volatile */
8256 // FIXME: Enable this
8257 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8258 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8259 if ((sp != stack_start) && !sym_seq_point)
8260 ins->flags |= MONO_INST_NONEMPTY_STACK;
8261 MONO_ADD_INS (cfg->cbb, ins);
8264 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8267 cfg->cbb->real_offset = cfg->real_offset;
8269 if ((cfg->method == method) && cfg->coverage_info) {
8270 guint32 cil_offset = ip - header->code;
8271 cfg->coverage_info->data [cil_offset].cil_code = ip;
8273 /* TODO: Use an increment here */
8274 #if defined(TARGET_X86)
8275 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8276 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8278 MONO_ADD_INS (cfg->cbb, ins);
8280 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8281 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8285 if (cfg->verbose_level > 3)
8286 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8290 if (seq_points && !sym_seq_points && sp != stack_start) {
8292 * The C# compiler uses these nops to notify the JIT that it should
8293 * insert seq points.
8295 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8296 MONO_ADD_INS (cfg->cbb, ins);
8298 if (cfg->keep_cil_nops)
8299 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8301 MONO_INST_NEW (cfg, ins, OP_NOP);
8303 MONO_ADD_INS (cfg->cbb, ins);
8306 if (should_insert_brekpoint (cfg->method)) {
8307 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8309 MONO_INST_NEW (cfg, ins, OP_NOP);
8312 MONO_ADD_INS (cfg->cbb, ins);
8318 CHECK_STACK_OVF (1);
8319 n = (*ip)-CEE_LDARG_0;
8321 EMIT_NEW_ARGLOAD (cfg, ins, n);
8329 CHECK_STACK_OVF (1);
8330 n = (*ip)-CEE_LDLOC_0;
8332 EMIT_NEW_LOCLOAD (cfg, ins, n);
8341 n = (*ip)-CEE_STLOC_0;
8344 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8346 emit_stloc_ir (cfg, sp, header, n);
8353 CHECK_STACK_OVF (1);
8356 EMIT_NEW_ARGLOAD (cfg, ins, n);
8362 CHECK_STACK_OVF (1);
8365 NEW_ARGLOADA (cfg, ins, n);
8366 MONO_ADD_INS (cfg->cbb, ins);
8376 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8378 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8383 CHECK_STACK_OVF (1);
8386 EMIT_NEW_LOCLOAD (cfg, ins, n);
8390 case CEE_LDLOCA_S: {
8391 unsigned char *tmp_ip;
8393 CHECK_STACK_OVF (1);
8394 CHECK_LOCAL (ip [1]);
8396 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8402 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8411 CHECK_LOCAL (ip [1]);
8412 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8414 emit_stloc_ir (cfg, sp, header, ip [1]);
8419 CHECK_STACK_OVF (1);
8420 EMIT_NEW_PCONST (cfg, ins, NULL);
8421 ins->type = STACK_OBJ;
8426 CHECK_STACK_OVF (1);
8427 EMIT_NEW_ICONST (cfg, ins, -1);
8440 CHECK_STACK_OVF (1);
8441 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8447 CHECK_STACK_OVF (1);
8449 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8455 CHECK_STACK_OVF (1);
8456 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8462 CHECK_STACK_OVF (1);
8463 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8464 ins->type = STACK_I8;
8465 ins->dreg = alloc_dreg (cfg, STACK_I8);
8467 ins->inst_l = (gint64)read64 (ip);
8468 MONO_ADD_INS (cfg->cbb, ins);
8474 gboolean use_aotconst = FALSE;
8476 #ifdef TARGET_POWERPC
8477 /* FIXME: Clean this up */
8478 if (cfg->compile_aot)
8479 use_aotconst = TRUE;
8482 /* FIXME: we should really allocate this only late in the compilation process */
8483 f = mono_domain_alloc (cfg->domain, sizeof (float));
8485 CHECK_STACK_OVF (1);
8491 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8493 dreg = alloc_freg (cfg);
8494 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8495 ins->type = cfg->r4_stack_type;
8497 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8498 ins->type = cfg->r4_stack_type;
8499 ins->dreg = alloc_dreg (cfg, STACK_R8);
8501 MONO_ADD_INS (cfg->cbb, ins);
8511 gboolean use_aotconst = FALSE;
8513 #ifdef TARGET_POWERPC
8514 /* FIXME: Clean this up */
8515 if (cfg->compile_aot)
8516 use_aotconst = TRUE;
8519 /* FIXME: we should really allocate this only late in the compilation process */
8520 d = mono_domain_alloc (cfg->domain, sizeof (double));
8522 CHECK_STACK_OVF (1);
8528 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8530 dreg = alloc_freg (cfg);
8531 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8532 ins->type = STACK_R8;
8534 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8535 ins->type = STACK_R8;
8536 ins->dreg = alloc_dreg (cfg, STACK_R8);
8538 MONO_ADD_INS (cfg->cbb, ins);
8547 MonoInst *temp, *store;
8549 CHECK_STACK_OVF (1);
8553 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8554 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8556 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8559 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8572 if (sp [0]->type == STACK_R8)
8573 /* we need to pop the value from the x86 FP stack */
8574 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8580 INLINE_FAILURE ("jmp");
8581 GSHAREDVT_FAILURE (*ip);
8584 if (stack_start != sp)
8586 token = read32 (ip + 1);
8587 /* FIXME: check the signature matches */
8588 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8590 if (!cmethod || mono_loader_get_last_error ())
8593 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8594 GENERIC_SHARING_FAILURE (CEE_JMP);
8596 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8598 if (ARCH_HAVE_OP_TAIL_CALL) {
8599 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8602 /* Handle tail calls similarly to calls */
8603 n = fsig->param_count + fsig->hasthis;
8607 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8608 call->method = cmethod;
8609 call->tail_call = TRUE;
8610 call->signature = mono_method_signature (cmethod);
8611 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8612 call->inst.inst_p0 = cmethod;
8613 for (i = 0; i < n; ++i)
8614 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8616 mono_arch_emit_call (cfg, call);
8617 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8618 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8620 for (i = 0; i < num_args; ++i)
8621 /* Prevent arguments from being optimized away */
8622 arg_array [i]->flags |= MONO_INST_VOLATILE;
8624 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8625 ins = (MonoInst*)call;
8626 ins->inst_p0 = cmethod;
8627 MONO_ADD_INS (cfg->cbb, ins);
8631 start_new_bblock = 1;
8636 MonoMethodSignature *fsig;
8639 token = read32 (ip + 1);
8643 //GSHAREDVT_FAILURE (*ip);
8648 fsig = mini_get_signature (method, token, generic_context);
8650 if (method->dynamic && fsig->pinvoke) {
8654 * This is a call through a function pointer using a pinvoke
8655 * signature. Have to create a wrapper and call that instead.
8656 * FIXME: This is very slow, need to create a wrapper at JIT time
8657 * instead based on the signature.
8659 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8660 EMIT_NEW_PCONST (cfg, args [1], fsig);
8662 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8665 n = fsig->param_count + fsig->hasthis;
8669 //g_assert (!virtual || fsig->hasthis);
8673 inline_costs += 10 * num_calls++;
8676 * Making generic calls out of gsharedvt methods.
8677 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8678 * patching gshared method addresses into a gsharedvt method.
8680 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8682 * We pass the address to the gsharedvt trampoline in the rgctx reg
8684 MonoInst *callee = addr;
8686 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8688 GSHAREDVT_FAILURE (*ip);
8690 addr = emit_get_rgctx_sig (cfg, context_used,
8691 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8692 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8696 /* Prevent inlining of methods with indirect calls */
8697 INLINE_FAILURE ("indirect call");
8699 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8704 * Instead of emitting an indirect call, emit a direct call
8705 * with the contents of the aotconst as the patch info.
8707 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8708 info_type = addr->inst_c1;
8709 info_data = addr->inst_p0;
8711 info_type = addr->inst_right->inst_c1;
8712 info_data = addr->inst_right->inst_left;
8715 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8716 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8721 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8725 /* End of call, INS should contain the result of the call, if any */
8727 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8729 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8732 CHECK_CFG_EXCEPTION;
8736 constrained_class = NULL;
8740 case CEE_CALLVIRT: {
8741 MonoInst *addr = NULL;
8742 MonoMethodSignature *fsig = NULL;
8744 int virtual = *ip == CEE_CALLVIRT;
8745 gboolean pass_imt_from_rgctx = FALSE;
8746 MonoInst *imt_arg = NULL;
8747 MonoInst *keep_this_alive = NULL;
8748 gboolean pass_vtable = FALSE;
8749 gboolean pass_mrgctx = FALSE;
8750 MonoInst *vtable_arg = NULL;
8751 gboolean check_this = FALSE;
8752 gboolean supported_tail_call = FALSE;
8753 gboolean tail_call = FALSE;
8754 gboolean need_seq_point = FALSE;
8755 guint32 call_opcode = *ip;
8756 gboolean emit_widen = TRUE;
8757 gboolean push_res = TRUE;
8758 gboolean skip_ret = FALSE;
8759 gboolean delegate_invoke = FALSE;
8760 gboolean direct_icall = FALSE;
8761 gboolean constrained_partial_call = FALSE;
8762 MonoMethod *cil_method;
8765 token = read32 (ip + 1);
8769 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8770 cil_method = cmethod;
8772 if (constrained_class) {
8773 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8774 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8775 g_assert (!cmethod->klass->valuetype);
8776 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg))
8777 constrained_partial_call = TRUE;
8781 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8782 if (cfg->verbose_level > 2)
8783 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8784 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8785 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8786 cfg->generic_sharing_context)) {
8787 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8791 if (cfg->verbose_level > 2)
8792 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8794 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8796 * This is needed since get_method_constrained can't find
8797 * the method in klass representing a type var.
8798 * The type var is guaranteed to be a reference type in this
8801 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8802 g_assert (!cmethod->klass->valuetype);
8804 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8810 if (!cmethod || mono_loader_get_last_error ())
8812 if (!dont_verify && !cfg->skip_visibility) {
8813 MonoMethod *target_method = cil_method;
8814 if (method->is_inflated) {
8815 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8817 if (!mono_method_can_access_method (method_definition, target_method) &&
8818 !mono_method_can_access_method (method, cil_method))
8819 METHOD_ACCESS_FAILURE (method, cil_method);
8822 if (mono_security_core_clr_enabled ())
8823 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8825 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8826 /* MS.NET seems to silently convert this to a callvirt */
8831 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8832 * converts to a callvirt.
8834 * tests/bug-515884.il is an example of this behavior
8836 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8837 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8838 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8842 if (!cmethod->klass->inited)
8843 if (!mono_class_init (cmethod->klass))
8844 TYPE_LOAD_ERROR (cmethod->klass);
8846 fsig = mono_method_signature (cmethod);
8849 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8850 mini_class_is_system_array (cmethod->klass)) {
8851 array_rank = cmethod->klass->rank;
8852 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8853 direct_icall = TRUE;
8854 } else if (fsig->pinvoke) {
8855 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8856 check_for_pending_exc, cfg->compile_aot);
8857 fsig = mono_method_signature (wrapper);
8858 } else if (constrained_class) {
8860 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8864 mono_save_token_info (cfg, image, token, cil_method);
8866 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8867 need_seq_point = TRUE;
8869 /* Don't support calls made using type arguments for now */
8871 if (cfg->gsharedvt) {
8872 if (mini_is_gsharedvt_signature (cfg, fsig))
8873 GSHAREDVT_FAILURE (*ip);
8877 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8878 g_assert_not_reached ();
8880 n = fsig->param_count + fsig->hasthis;
8882 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8885 if (!cfg->generic_sharing_context)
8886 g_assert (!mono_method_check_context_used (cmethod));
8890 //g_assert (!virtual || fsig->hasthis);
8894 if (constrained_class) {
8895 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8896 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8897 /* The 'Own method' case below */
8898 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8899 /* 'The type parameter is instantiated as a reference type' case below. */
8901 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8902 CHECK_CFG_EXCEPTION;
8909 * We have the `constrained.' prefix opcode.
8911 if (constrained_partial_call) {
8912 gboolean need_box = TRUE;
8915 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8916 * called method is not known at compile time either. The called method could end up being
8917 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8918 * to box the receiver.
8919 * A simple solution would be to box always and make a normal virtual call, but that would
8920 * be bad performance wise.
8922 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8924 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8931 MonoBasicBlock *is_ref_bb, *end_bb;
8932 MonoInst *nonbox_call;
8935 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8937 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8938 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8940 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8942 NEW_BBLOCK (cfg, is_ref_bb);
8943 NEW_BBLOCK (cfg, end_bb);
8945 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8950 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8955 MONO_START_BB (cfg, is_ref_bb);
8956 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8957 ins->klass = constrained_class;
8958 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8959 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8963 MONO_START_BB (cfg, end_bb);
8966 nonbox_call->dreg = ins->dreg;
8968 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8969 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8970 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8973 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8975 * The type parameter is instantiated as a valuetype,
8976 * but that type doesn't override the method we're
8977 * calling, so we need to box `this'.
8979 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8980 ins->klass = constrained_class;
8981 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8982 CHECK_CFG_EXCEPTION;
8983 } else if (!constrained_class->valuetype) {
8984 int dreg = alloc_ireg_ref (cfg);
8987 * The type parameter is instantiated as a reference
8988 * type. We have a managed pointer on the stack, so
8989 * we need to dereference it here.
8991 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8992 ins->type = STACK_OBJ;
8995 if (cmethod->klass->valuetype) {
8998 /* Interface method */
9001 mono_class_setup_vtable (constrained_class);
9002 CHECK_TYPELOAD (constrained_class);
9003 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9005 TYPE_LOAD_ERROR (constrained_class);
9006 slot = mono_method_get_vtable_slot (cmethod);
9008 TYPE_LOAD_ERROR (cmethod->klass);
9009 cmethod = constrained_class->vtable [ioffset + slot];
9011 if (cmethod->klass == mono_defaults.enum_class) {
9012 /* Enum implements some interfaces, so treat this as the first case */
9013 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9014 ins->klass = constrained_class;
9015 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9016 CHECK_CFG_EXCEPTION;
9021 constrained_class = NULL;
9024 if (check_call_signature (cfg, fsig, sp))
9027 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9028 delegate_invoke = TRUE;
9030 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9031 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9032 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9040 * If the callee is a shared method, then its static cctor
9041 * might not get called after the call was patched.
9043 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9044 emit_generic_class_init (cfg, cmethod->klass);
9045 CHECK_TYPELOAD (cmethod->klass);
9048 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9050 if (cfg->generic_sharing_context) {
9051 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9053 context_used = mini_method_check_context_used (cfg, cmethod);
9055 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9056 /* Generic method interface
9057 calls are resolved via a
9058 helper function and don't
9060 if (!cmethod_context || !cmethod_context->method_inst)
9061 pass_imt_from_rgctx = TRUE;
9065 * If a shared method calls another
9066 * shared method then the caller must
9067 * have a generic sharing context
9068 * because the magic trampoline
9069 * requires it. FIXME: We shouldn't
9070 * have to force the vtable/mrgctx
9071 * variable here. Instead there
9072 * should be a flag in the cfg to
9073 * request a generic sharing context.
9076 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9077 mono_get_vtable_var (cfg);
9082 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9084 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9086 CHECK_TYPELOAD (cmethod->klass);
9087 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9092 g_assert (!vtable_arg);
9094 if (!cfg->compile_aot) {
9096 * emit_get_rgctx_method () calls mono_class_vtable () so check
9097 * for type load errors before.
9099 mono_class_setup_vtable (cmethod->klass);
9100 CHECK_TYPELOAD (cmethod->klass);
9103 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9105 /* !marshalbyref is needed to properly handle generic methods + remoting */
9106 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9107 MONO_METHOD_IS_FINAL (cmethod)) &&
9108 !mono_class_is_marshalbyref (cmethod->klass)) {
9115 if (pass_imt_from_rgctx) {
9116 g_assert (!pass_vtable);
9118 imt_arg = emit_get_rgctx_method (cfg, context_used,
9119 cmethod, MONO_RGCTX_INFO_METHOD);
9123 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9125 /* Calling virtual generic methods */
9126 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9127 !(MONO_METHOD_IS_FINAL (cmethod) &&
9128 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9129 fsig->generic_param_count &&
9130 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9131 MonoInst *this_temp, *this_arg_temp, *store;
9132 MonoInst *iargs [4];
9133 gboolean use_imt = FALSE;
9135 g_assert (fsig->is_inflated);
9137 /* Prevent inlining of methods that contain indirect calls */
9138 INLINE_FAILURE ("virtual generic call");
9140 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9141 GSHAREDVT_FAILURE (*ip);
9143 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9144 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9149 g_assert (!imt_arg);
9151 g_assert (cmethod->is_inflated);
9152 imt_arg = emit_get_rgctx_method (cfg, context_used,
9153 cmethod, MONO_RGCTX_INFO_METHOD);
9154 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9156 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9157 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9158 MONO_ADD_INS (cfg->cbb, store);
9160 /* FIXME: This should be a managed pointer */
9161 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9163 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9164 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9165 cmethod, MONO_RGCTX_INFO_METHOD);
9166 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9167 addr = mono_emit_jit_icall (cfg,
9168 mono_helper_compile_generic_method, iargs);
9170 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9172 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9179 * Implement a workaround for the inherent races involved in locking:
9185 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9186 * try block, the Exit () won't be executed, see:
9187 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9188 * To work around this, we extend such try blocks to include the last x bytes
9189 * of the Monitor.Enter () call.
9191 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9192 MonoBasicBlock *tbb;
9194 GET_BBLOCK (cfg, tbb, ip + 5);
9196 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9197 * from Monitor.Enter like ArgumentNullException.
9199 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9200 /* Mark this bblock as needing to be extended */
9201 tbb->extend_try_block = TRUE;
9205 /* Conversion to a JIT intrinsic */
9206 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9207 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9208 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9215 if ((cfg->opt & MONO_OPT_INLINE) &&
9216 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9217 mono_method_check_inlining (cfg, cmethod)) {
9219 gboolean always = FALSE;
9221 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9222 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9223 /* Prevent inlining of methods that call wrappers */
9224 INLINE_FAILURE ("wrapper call");
9225 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9229 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9231 cfg->real_offset += 5;
9233 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9234 /* *sp is already set by inline_method */
9239 inline_costs += costs;
9245 /* Tail recursion elimination */
9246 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9247 gboolean has_vtargs = FALSE;
9250 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9251 INLINE_FAILURE ("tail call");
9253 /* keep it simple */
9254 for (i = fsig->param_count - 1; i >= 0; i--) {
9255 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9260 for (i = 0; i < n; ++i)
9261 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9262 MONO_INST_NEW (cfg, ins, OP_BR);
9263 MONO_ADD_INS (cfg->cbb, ins);
9264 tblock = start_bblock->out_bb [0];
9265 link_bblock (cfg, cfg->cbb, tblock);
9266 ins->inst_target_bb = tblock;
9267 start_new_bblock = 1;
9269 /* skip the CEE_RET, too */
9270 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9277 inline_costs += 10 * num_calls++;
9280 * Making generic calls out of gsharedvt methods.
9281 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9282 * patching gshared method addresses into a gsharedvt method.
9284 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9285 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9286 MonoRgctxInfoType info_type;
9289 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9290 //GSHAREDVT_FAILURE (*ip);
9291 // disable for possible remoting calls
9292 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9293 GSHAREDVT_FAILURE (*ip);
9294 if (fsig->generic_param_count) {
9295 /* virtual generic call */
9296 g_assert (!imt_arg);
9297 /* Same as the virtual generic case above */
9298 imt_arg = emit_get_rgctx_method (cfg, context_used,
9299 cmethod, MONO_RGCTX_INFO_METHOD);
9300 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9302 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9303 /* This can happen when we call a fully instantiated iface method */
9304 imt_arg = emit_get_rgctx_method (cfg, context_used,
9305 cmethod, MONO_RGCTX_INFO_METHOD);
9310 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9311 keep_this_alive = sp [0];
9313 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9314 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9316 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9317 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9319 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9323 /* Generic sharing */
9326 * Use this if the callee is gsharedvt sharable too, since
9327 * at runtime we might find an instantiation so the call cannot
9328 * be patched (the 'no_patch' code path in mini-trampolines.c).
9330 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9331 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9332 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9333 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9334 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9335 INLINE_FAILURE ("gshared");
9337 g_assert (cfg->generic_sharing_context && cmethod);
9341 * We are compiling a call to a
9342 * generic method from shared code,
9343 * which means that we have to look up
9344 * the method in the rgctx and do an
9348 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9350 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9351 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9355 /* Direct calls to icalls */
9357 MonoMethod *wrapper;
9360 /* Inline the wrapper */
9361 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9363 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9364 g_assert (costs > 0);
9365 cfg->real_offset += 5;
9367 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9368 /* *sp is already set by inline_method */
9373 inline_costs += costs;
9382 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9383 MonoInst *val = sp [fsig->param_count];
9385 if (val->type == STACK_OBJ) {
9386 MonoInst *iargs [2];
9391 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9394 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9395 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9396 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9397 emit_write_barrier (cfg, addr, val);
9398 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9399 GSHAREDVT_FAILURE (*ip);
9400 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9401 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9403 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9404 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9405 if (!cmethod->klass->element_class->valuetype && !readonly)
9406 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9407 CHECK_TYPELOAD (cmethod->klass);
9410 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9413 g_assert_not_reached ();
9420 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9424 /* Tail prefix / tail call optimization */
9426 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9427 /* FIXME: runtime generic context pointer for jumps? */
9428 /* FIXME: handle this for generic sharing eventually */
9429 if ((ins_flag & MONO_INST_TAILCALL) &&
9430 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9431 supported_tail_call = TRUE;
9433 if (supported_tail_call) {
9436 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9437 INLINE_FAILURE ("tail call");
9439 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9441 if (ARCH_HAVE_OP_TAIL_CALL) {
9442 /* Handle tail calls similarly to normal calls */
9445 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9447 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9448 call->tail_call = TRUE;
9449 call->method = cmethod;
9450 call->signature = mono_method_signature (cmethod);
9453 * We implement tail calls by storing the actual arguments into the
9454 * argument variables, then emitting a CEE_JMP.
9456 for (i = 0; i < n; ++i) {
9457 /* Prevent argument from being register allocated */
9458 arg_array [i]->flags |= MONO_INST_VOLATILE;
9459 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9461 ins = (MonoInst*)call;
9462 ins->inst_p0 = cmethod;
9463 ins->inst_p1 = arg_array [0];
9464 MONO_ADD_INS (cfg->cbb, ins);
9465 link_bblock (cfg, cfg->cbb, end_bblock);
9466 start_new_bblock = 1;
9468 // FIXME: Eliminate unreachable epilogs
9471 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9472 * only reachable from this call.
9474 GET_BBLOCK (cfg, tblock, ip + 5);
9475 if (tblock == cfg->cbb || tblock->in_count == 0)
9484 * Synchronized wrappers.
9485 * Its hard to determine where to replace a method with its synchronized
9486 * wrapper without causing an infinite recursion. The current solution is
9487 * to add the synchronized wrapper in the trampolines, and to
9488 * change the called method to a dummy wrapper, and resolve that wrapper
9489 * to the real method in mono_jit_compile_method ().
9491 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9492 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9493 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9494 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9498 INLINE_FAILURE ("call");
9499 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9500 imt_arg, vtable_arg);
9503 link_bblock (cfg, cfg->cbb, end_bblock);
9504 start_new_bblock = 1;
9506 // FIXME: Eliminate unreachable epilogs
9509 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9510 * only reachable from this call.
9512 GET_BBLOCK (cfg, tblock, ip + 5);
9513 if (tblock == cfg->cbb || tblock->in_count == 0)
9520 /* End of call, INS should contain the result of the call, if any */
9522 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9525 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9530 if (keep_this_alive) {
9531 MonoInst *dummy_use;
9533 /* See mono_emit_method_call_full () */
9534 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9537 CHECK_CFG_EXCEPTION;
9541 g_assert (*ip == CEE_RET);
9545 constrained_class = NULL;
9547 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9551 if (cfg->method != method) {
9552 /* return from inlined method */
9554 * If in_count == 0, that means the ret is unreachable due to
9555 * being preceeded by a throw. In that case, inline_method () will
9556 * handle setting the return value
9557 * (test case: test_0_inline_throw ()).
9559 if (return_var && cfg->cbb->in_count) {
9560 MonoType *ret_type = mono_method_signature (method)->ret;
9566 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9569 //g_assert (returnvar != -1);
9570 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9571 cfg->ret_var_set = TRUE;
9574 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9576 if (cfg->lmf_var && cfg->cbb->in_count)
9580 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9582 if (seq_points && !sym_seq_points) {
9584 * Place a seq point here too even through the IL stack is not
9585 * empty, so a step over on
9588 * will work correctly.
9590 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9591 MONO_ADD_INS (cfg->cbb, ins);
9594 g_assert (!return_var);
9598 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9601 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9604 if (!cfg->vret_addr) {
9607 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9609 EMIT_NEW_RETLOADA (cfg, ret_addr);
9611 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9612 ins->klass = mono_class_from_mono_type (ret_type);
9615 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9616 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9617 MonoInst *iargs [1];
9621 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9622 mono_arch_emit_setret (cfg, method, conv);
9624 mono_arch_emit_setret (cfg, method, *sp);
9627 mono_arch_emit_setret (cfg, method, *sp);
9632 if (sp != stack_start)
9634 MONO_INST_NEW (cfg, ins, OP_BR);
9636 ins->inst_target_bb = end_bblock;
9637 MONO_ADD_INS (cfg->cbb, ins);
9638 link_bblock (cfg, cfg->cbb, end_bblock);
9639 start_new_bblock = 1;
9643 MONO_INST_NEW (cfg, ins, OP_BR);
9645 target = ip + 1 + (signed char)(*ip);
9647 GET_BBLOCK (cfg, tblock, target);
9648 link_bblock (cfg, cfg->cbb, tblock);
9649 ins->inst_target_bb = tblock;
9650 if (sp != stack_start) {
9651 handle_stack_args (cfg, stack_start, sp - stack_start);
9653 CHECK_UNVERIFIABLE (cfg);
9655 MONO_ADD_INS (cfg->cbb, ins);
9656 start_new_bblock = 1;
9657 inline_costs += BRANCH_COST;
9671 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9673 target = ip + 1 + *(signed char*)ip;
9679 inline_costs += BRANCH_COST;
9683 MONO_INST_NEW (cfg, ins, OP_BR);
9686 target = ip + 4 + (gint32)read32(ip);
9688 GET_BBLOCK (cfg, tblock, target);
9689 link_bblock (cfg, cfg->cbb, tblock);
9690 ins->inst_target_bb = tblock;
9691 if (sp != stack_start) {
9692 handle_stack_args (cfg, stack_start, sp - stack_start);
9694 CHECK_UNVERIFIABLE (cfg);
9697 MONO_ADD_INS (cfg->cbb, ins);
9699 start_new_bblock = 1;
9700 inline_costs += BRANCH_COST;
9707 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9708 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9709 guint32 opsize = is_short ? 1 : 4;
9711 CHECK_OPSIZE (opsize);
9713 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9716 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9721 GET_BBLOCK (cfg, tblock, target);
9722 link_bblock (cfg, cfg->cbb, tblock);
9723 GET_BBLOCK (cfg, tblock, ip);
9724 link_bblock (cfg, cfg->cbb, tblock);
9726 if (sp != stack_start) {
9727 handle_stack_args (cfg, stack_start, sp - stack_start);
9728 CHECK_UNVERIFIABLE (cfg);
9731 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9732 cmp->sreg1 = sp [0]->dreg;
9733 type_from_op (cfg, cmp, sp [0], NULL);
9736 #if SIZEOF_REGISTER == 4
9737 if (cmp->opcode == OP_LCOMPARE_IMM) {
9738 /* Convert it to OP_LCOMPARE */
9739 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9740 ins->type = STACK_I8;
9741 ins->dreg = alloc_dreg (cfg, STACK_I8);
9743 MONO_ADD_INS (cfg->cbb, ins);
9744 cmp->opcode = OP_LCOMPARE;
9745 cmp->sreg2 = ins->dreg;
9748 MONO_ADD_INS (cfg->cbb, cmp);
9750 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9751 type_from_op (cfg, ins, sp [0], NULL);
9752 MONO_ADD_INS (cfg->cbb, ins);
9753 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9754 GET_BBLOCK (cfg, tblock, target);
9755 ins->inst_true_bb = tblock;
9756 GET_BBLOCK (cfg, tblock, ip);
9757 ins->inst_false_bb = tblock;
9758 start_new_bblock = 2;
9761 inline_costs += BRANCH_COST;
9776 MONO_INST_NEW (cfg, ins, *ip);
9778 target = ip + 4 + (gint32)read32(ip);
9784 inline_costs += BRANCH_COST;
9788 MonoBasicBlock **targets;
9789 MonoBasicBlock *default_bblock;
9790 MonoJumpInfoBBTable *table;
9791 int offset_reg = alloc_preg (cfg);
9792 int target_reg = alloc_preg (cfg);
9793 int table_reg = alloc_preg (cfg);
9794 int sum_reg = alloc_preg (cfg);
9795 gboolean use_op_switch;
9799 n = read32 (ip + 1);
9802 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9806 CHECK_OPSIZE (n * sizeof (guint32));
9807 target = ip + n * sizeof (guint32);
9809 GET_BBLOCK (cfg, default_bblock, target);
9810 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9812 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9813 for (i = 0; i < n; ++i) {
9814 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9815 targets [i] = tblock;
9816 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9820 if (sp != stack_start) {
9822 * Link the current bb with the targets as well, so handle_stack_args
9823 * will set their in_stack correctly.
9825 link_bblock (cfg, cfg->cbb, default_bblock);
9826 for (i = 0; i < n; ++i)
9827 link_bblock (cfg, cfg->cbb, targets [i]);
9829 handle_stack_args (cfg, stack_start, sp - stack_start);
9831 CHECK_UNVERIFIABLE (cfg);
9834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9835 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9837 for (i = 0; i < n; ++i)
9838 link_bblock (cfg, cfg->cbb, targets [i]);
9840 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9841 table->table = targets;
9842 table->table_size = n;
9844 use_op_switch = FALSE;
9846 /* ARM implements SWITCH statements differently */
9847 /* FIXME: Make it use the generic implementation */
9848 if (!cfg->compile_aot)
9849 use_op_switch = TRUE;
9852 if (COMPILE_LLVM (cfg))
9853 use_op_switch = TRUE;
9855 cfg->cbb->has_jump_table = 1;
9857 if (use_op_switch) {
9858 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9859 ins->sreg1 = src1->dreg;
9860 ins->inst_p0 = table;
9861 ins->inst_many_bb = targets;
9862 ins->klass = GUINT_TO_POINTER (n);
9863 MONO_ADD_INS (cfg->cbb, ins);
9865 if (sizeof (gpointer) == 8)
9866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9870 #if SIZEOF_REGISTER == 8
9871 /* The upper word might not be zero, and we add it to a 64 bit address later */
9872 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9875 if (cfg->compile_aot) {
9876 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9878 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9879 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9880 ins->inst_p0 = table;
9881 ins->dreg = table_reg;
9882 MONO_ADD_INS (cfg->cbb, ins);
9885 /* FIXME: Use load_memindex */
9886 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9888 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9890 start_new_bblock = 1;
9891 inline_costs += (BRANCH_COST * 2);
9911 dreg = alloc_freg (cfg);
9914 dreg = alloc_lreg (cfg);
9917 dreg = alloc_ireg_ref (cfg);
9920 dreg = alloc_preg (cfg);
9923 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9924 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9925 if (*ip == CEE_LDIND_R4)
9926 ins->type = cfg->r4_stack_type;
9927 ins->flags |= ins_flag;
9928 MONO_ADD_INS (cfg->cbb, ins);
9930 if (ins_flag & MONO_INST_VOLATILE) {
9931 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9932 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9948 if (ins_flag & MONO_INST_VOLATILE) {
9949 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9950 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9953 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9954 ins->flags |= ins_flag;
9957 MONO_ADD_INS (cfg->cbb, ins);
9959 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9960 emit_write_barrier (cfg, sp [0], sp [1]);
9969 MONO_INST_NEW (cfg, ins, (*ip));
9971 ins->sreg1 = sp [0]->dreg;
9972 ins->sreg2 = sp [1]->dreg;
9973 type_from_op (cfg, ins, sp [0], sp [1]);
9975 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9977 /* Use the immediate opcodes if possible */
9978 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9979 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9980 if (imm_opcode != -1) {
9981 ins->opcode = imm_opcode;
9982 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9985 NULLIFY_INS (sp [1]);
9989 MONO_ADD_INS ((cfg)->cbb, (ins));
9991 *sp++ = mono_decompose_opcode (cfg, ins);
10008 MONO_INST_NEW (cfg, ins, (*ip));
10010 ins->sreg1 = sp [0]->dreg;
10011 ins->sreg2 = sp [1]->dreg;
10012 type_from_op (cfg, ins, sp [0], sp [1]);
10014 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10015 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10017 /* FIXME: Pass opcode to is_inst_imm */
10019 /* Use the immediate opcodes if possible */
10020 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10023 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10024 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10025 /* Keep emulated opcodes which are optimized away later */
10026 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10027 imm_opcode = mono_op_to_op_imm (ins->opcode);
10030 if (imm_opcode != -1) {
10031 ins->opcode = imm_opcode;
10032 if (sp [1]->opcode == OP_I8CONST) {
10033 #if SIZEOF_REGISTER == 8
10034 ins->inst_imm = sp [1]->inst_l;
10036 ins->inst_ls_word = sp [1]->inst_ls_word;
10037 ins->inst_ms_word = sp [1]->inst_ms_word;
10041 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10044 /* Might be followed by an instruction added by add_widen_op */
10045 if (sp [1]->next == NULL)
10046 NULLIFY_INS (sp [1]);
10049 MONO_ADD_INS ((cfg)->cbb, (ins));
10051 *sp++ = mono_decompose_opcode (cfg, ins);
10064 case CEE_CONV_OVF_I8:
10065 case CEE_CONV_OVF_U8:
10066 case CEE_CONV_R_UN:
10069 /* Special case this earlier so we have long constants in the IR */
10070 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10071 int data = sp [-1]->inst_c0;
10072 sp [-1]->opcode = OP_I8CONST;
10073 sp [-1]->type = STACK_I8;
10074 #if SIZEOF_REGISTER == 8
10075 if ((*ip) == CEE_CONV_U8)
10076 sp [-1]->inst_c0 = (guint32)data;
10078 sp [-1]->inst_c0 = data;
10080 sp [-1]->inst_ls_word = data;
10081 if ((*ip) == CEE_CONV_U8)
10082 sp [-1]->inst_ms_word = 0;
10084 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10086 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10093 case CEE_CONV_OVF_I4:
10094 case CEE_CONV_OVF_I1:
10095 case CEE_CONV_OVF_I2:
10096 case CEE_CONV_OVF_I:
10097 case CEE_CONV_OVF_U:
10100 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10101 ADD_UNOP (CEE_CONV_OVF_I8);
10108 case CEE_CONV_OVF_U1:
10109 case CEE_CONV_OVF_U2:
10110 case CEE_CONV_OVF_U4:
10113 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10114 ADD_UNOP (CEE_CONV_OVF_U8);
10121 case CEE_CONV_OVF_I1_UN:
10122 case CEE_CONV_OVF_I2_UN:
10123 case CEE_CONV_OVF_I4_UN:
10124 case CEE_CONV_OVF_I8_UN:
10125 case CEE_CONV_OVF_U1_UN:
10126 case CEE_CONV_OVF_U2_UN:
10127 case CEE_CONV_OVF_U4_UN:
10128 case CEE_CONV_OVF_U8_UN:
10129 case CEE_CONV_OVF_I_UN:
10130 case CEE_CONV_OVF_U_UN:
10137 CHECK_CFG_EXCEPTION;
10141 case CEE_ADD_OVF_UN:
10143 case CEE_MUL_OVF_UN:
10145 case CEE_SUB_OVF_UN:
10151 GSHAREDVT_FAILURE (*ip);
10154 token = read32 (ip + 1);
10155 klass = mini_get_class (method, token, generic_context);
10156 CHECK_TYPELOAD (klass);
10158 if (generic_class_is_reference_type (cfg, klass)) {
10159 MonoInst *store, *load;
10160 int dreg = alloc_ireg_ref (cfg);
10162 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10163 load->flags |= ins_flag;
10164 MONO_ADD_INS (cfg->cbb, load);
10166 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10167 store->flags |= ins_flag;
10168 MONO_ADD_INS (cfg->cbb, store);
10170 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10171 emit_write_barrier (cfg, sp [0], sp [1]);
10173 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10179 int loc_index = -1;
10185 token = read32 (ip + 1);
10186 klass = mini_get_class (method, token, generic_context);
10187 CHECK_TYPELOAD (klass);
10189 /* Optimize the common ldobj+stloc combination */
10192 loc_index = ip [6];
10199 loc_index = ip [5] - CEE_STLOC_0;
10206 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10207 CHECK_LOCAL (loc_index);
10209 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10210 ins->dreg = cfg->locals [loc_index]->dreg;
10211 ins->flags |= ins_flag;
10214 if (ins_flag & MONO_INST_VOLATILE) {
10215 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10216 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10222 /* Optimize the ldobj+stobj combination */
10223 /* The reference case ends up being a load+store anyway */
10224 /* Skip this if the operation is volatile. */
10225 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10230 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10237 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10238 ins->flags |= ins_flag;
10241 if (ins_flag & MONO_INST_VOLATILE) {
10242 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10243 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10252 CHECK_STACK_OVF (1);
10254 n = read32 (ip + 1);
10256 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10257 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10258 ins->type = STACK_OBJ;
10261 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10262 MonoInst *iargs [1];
10263 char *str = mono_method_get_wrapper_data (method, n);
10265 if (cfg->compile_aot)
10266 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10268 EMIT_NEW_PCONST (cfg, iargs [0], str);
10269 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10271 if (cfg->opt & MONO_OPT_SHARED) {
10272 MonoInst *iargs [3];
10274 if (cfg->compile_aot) {
10275 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10277 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10278 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10279 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10280 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10281 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10283 if (cfg->cbb->out_of_line) {
10284 MonoInst *iargs [2];
10286 if (image == mono_defaults.corlib) {
10288 * Avoid relocations in AOT and save some space by using a
10289 * version of helper_ldstr specialized to mscorlib.
10291 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10292 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10294 /* Avoid creating the string object */
10295 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10296 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10297 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10301 if (cfg->compile_aot) {
10302 NEW_LDSTRCONST (cfg, ins, image, n);
10304 MONO_ADD_INS (cfg->cbb, ins);
10307 NEW_PCONST (cfg, ins, NULL);
10308 ins->type = STACK_OBJ;
10309 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10311 OUT_OF_MEMORY_FAILURE;
10314 MONO_ADD_INS (cfg->cbb, ins);
10323 MonoInst *iargs [2];
10324 MonoMethodSignature *fsig;
10327 MonoInst *vtable_arg = NULL;
10330 token = read32 (ip + 1);
10331 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10332 if (!cmethod || mono_loader_get_last_error ())
10334 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10337 mono_save_token_info (cfg, image, token, cmethod);
10339 if (!mono_class_init (cmethod->klass))
10340 TYPE_LOAD_ERROR (cmethod->klass);
10342 context_used = mini_method_check_context_used (cfg, cmethod);
10344 if (mono_security_core_clr_enabled ())
10345 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10347 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10348 emit_generic_class_init (cfg, cmethod->klass);
10349 CHECK_TYPELOAD (cmethod->klass);
10353 if (cfg->gsharedvt) {
10354 if (mini_is_gsharedvt_variable_signature (sig))
10355 GSHAREDVT_FAILURE (*ip);
10359 n = fsig->param_count;
10363 * Generate smaller code for the common newobj <exception> instruction in
10364 * argument checking code.
10366 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10367 is_exception_class (cmethod->klass) && n <= 2 &&
10368 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10369 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10370 MonoInst *iargs [3];
10374 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10377 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10380 iargs [1] = sp [0];
10381 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10384 iargs [1] = sp [0];
10385 iargs [2] = sp [1];
10386 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10389 g_assert_not_reached ();
10397 /* move the args to allow room for 'this' in the first position */
10403 /* check_call_signature () requires sp[0] to be set */
10404 this_ins.type = STACK_OBJ;
10405 sp [0] = &this_ins;
10406 if (check_call_signature (cfg, fsig, sp))
10411 if (mini_class_is_system_array (cmethod->klass)) {
10412 *sp = emit_get_rgctx_method (cfg, context_used,
10413 cmethod, MONO_RGCTX_INFO_METHOD);
10415 /* Avoid varargs in the common case */
10416 if (fsig->param_count == 1)
10417 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10418 else if (fsig->param_count == 2)
10419 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10420 else if (fsig->param_count == 3)
10421 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10422 else if (fsig->param_count == 4)
10423 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10425 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10426 } else if (cmethod->string_ctor) {
10427 g_assert (!context_used);
10428 g_assert (!vtable_arg);
10429 /* we simply pass a null pointer */
10430 EMIT_NEW_PCONST (cfg, *sp, NULL);
10431 /* now call the string ctor */
10432 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10434 if (cmethod->klass->valuetype) {
10435 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10436 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10437 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10442 * The code generated by mini_emit_virtual_call () expects
10443 * iargs [0] to be a boxed instance, but luckily the vcall
10444 * will be transformed into a normal call there.
10446 } else if (context_used) {
10447 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10450 MonoVTable *vtable = NULL;
10452 if (!cfg->compile_aot)
10453 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10454 CHECK_TYPELOAD (cmethod->klass);
10457 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10458 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10459 * As a workaround, we call class cctors before allocating objects.
10461 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10462 emit_class_init (cfg, cmethod->klass);
10463 if (cfg->verbose_level > 2)
10464 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10465 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10468 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10471 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10474 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10476 /* Now call the actual ctor */
10477 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10478 CHECK_CFG_EXCEPTION;
10481 if (alloc == NULL) {
10483 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10484 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10492 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10493 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10496 case CEE_CASTCLASS:
10500 token = read32 (ip + 1);
10501 klass = mini_get_class (method, token, generic_context);
10502 CHECK_TYPELOAD (klass);
10503 if (sp [0]->type != STACK_OBJ)
10506 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10507 CHECK_CFG_EXCEPTION;
10516 token = read32 (ip + 1);
10517 klass = mini_get_class (method, token, generic_context);
10518 CHECK_TYPELOAD (klass);
10519 if (sp [0]->type != STACK_OBJ)
10522 context_used = mini_class_check_context_used (cfg, klass);
10524 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10525 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10526 MonoInst *args [3];
10533 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10536 if (cfg->compile_aot) {
10537 idx = get_castclass_cache_idx (cfg);
10538 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10540 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10543 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10546 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10547 MonoMethod *mono_isinst;
10548 MonoInst *iargs [1];
10551 mono_isinst = mono_marshal_get_isinst (klass);
10552 iargs [0] = sp [0];
10554 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10555 iargs, ip, cfg->real_offset, TRUE);
10556 CHECK_CFG_EXCEPTION;
10557 g_assert (costs > 0);
10560 cfg->real_offset += 5;
10564 inline_costs += costs;
10567 ins = handle_isinst (cfg, klass, *sp, context_used);
10568 CHECK_CFG_EXCEPTION;
10574 case CEE_UNBOX_ANY: {
10575 MonoInst *res, *addr;
10580 token = read32 (ip + 1);
10581 klass = mini_get_class (method, token, generic_context);
10582 CHECK_TYPELOAD (klass);
10584 mono_save_token_info (cfg, image, token, klass);
10586 context_used = mini_class_check_context_used (cfg, klass);
10588 if (mini_is_gsharedvt_klass (cfg, klass)) {
10589 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10591 } else if (generic_class_is_reference_type (cfg, klass)) {
10592 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10593 CHECK_CFG_EXCEPTION;
10594 } else if (mono_class_is_nullable (klass)) {
10595 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10597 addr = handle_unbox (cfg, klass, sp, context_used);
10599 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10610 MonoClass *enum_class;
10611 MonoMethod *has_flag;
10617 token = read32 (ip + 1);
10618 klass = mini_get_class (method, token, generic_context);
10619 CHECK_TYPELOAD (klass);
10621 mono_save_token_info (cfg, image, token, klass);
10623 context_used = mini_class_check_context_used (cfg, klass);
10625 if (generic_class_is_reference_type (cfg, klass)) {
10631 if (klass == mono_defaults.void_class)
10633 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10635 /* frequent check in generic code: box (struct), brtrue */
10640 * <push int/long ptr>
10643 * constrained. MyFlags
10644 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10646 * If we find this sequence and the operand types on box and constrained
10647 * are equal, we can emit a specialized instruction sequence instead of
10648 * the very slow HasFlag () call.
10650 if ((cfg->opt & MONO_OPT_INTRINS) &&
10651 /* Cheap checks first. */
10652 ip + 5 + 6 + 5 < end &&
10653 ip [5] == CEE_PREFIX1 &&
10654 ip [6] == CEE_CONSTRAINED_ &&
10655 ip [11] == CEE_CALLVIRT &&
10656 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10657 mono_class_is_enum (klass) &&
10658 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10659 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10660 has_flag->klass == mono_defaults.enum_class &&
10661 !strcmp (has_flag->name, "HasFlag") &&
10662 has_flag->signature->hasthis &&
10663 has_flag->signature->param_count == 1) {
10664 CHECK_TYPELOAD (enum_class);
10666 if (enum_class == klass) {
10667 MonoInst *enum_this, *enum_flag;
10672 enum_this = sp [0];
10673 enum_flag = sp [1];
10675 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10680 // FIXME: LLVM can't handle the inconsistent bb linking
10681 if (!mono_class_is_nullable (klass) &&
10682 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10683 (ip [5] == CEE_BRTRUE ||
10684 ip [5] == CEE_BRTRUE_S ||
10685 ip [5] == CEE_BRFALSE ||
10686 ip [5] == CEE_BRFALSE_S)) {
10687 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10689 MonoBasicBlock *true_bb, *false_bb;
10693 if (cfg->verbose_level > 3) {
10694 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10695 printf ("<box+brtrue opt>\n");
10700 case CEE_BRFALSE_S:
10703 target = ip + 1 + (signed char)(*ip);
10710 target = ip + 4 + (gint)(read32 (ip));
10714 g_assert_not_reached ();
10718 * We need to link both bblocks, since it is needed for handling stack
10719 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10720 * Branching to only one of them would lead to inconsistencies, so
10721 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10723 GET_BBLOCK (cfg, true_bb, target);
10724 GET_BBLOCK (cfg, false_bb, ip);
10726 mono_link_bblock (cfg, cfg->cbb, true_bb);
10727 mono_link_bblock (cfg, cfg->cbb, false_bb);
10729 if (sp != stack_start) {
10730 handle_stack_args (cfg, stack_start, sp - stack_start);
10732 CHECK_UNVERIFIABLE (cfg);
10735 if (COMPILE_LLVM (cfg)) {
10736 dreg = alloc_ireg (cfg);
10737 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10740 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10742 /* The JIT can't eliminate the iconst+compare */
10743 MONO_INST_NEW (cfg, ins, OP_BR);
10744 ins->inst_target_bb = is_true ? true_bb : false_bb;
10745 MONO_ADD_INS (cfg->cbb, ins);
10748 start_new_bblock = 1;
10752 *sp++ = handle_box (cfg, val, klass, context_used);
10754 CHECK_CFG_EXCEPTION;
10763 token = read32 (ip + 1);
10764 klass = mini_get_class (method, token, generic_context);
10765 CHECK_TYPELOAD (klass);
10767 mono_save_token_info (cfg, image, token, klass);
10769 context_used = mini_class_check_context_used (cfg, klass);
10771 if (mono_class_is_nullable (klass)) {
10774 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10775 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10779 ins = handle_unbox (cfg, klass, sp, context_used);
10792 MonoClassField *field;
10793 #ifndef DISABLE_REMOTING
10797 gboolean is_instance;
10799 gpointer addr = NULL;
10800 gboolean is_special_static;
10802 MonoInst *store_val = NULL;
10803 MonoInst *thread_ins;
10806 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10808 if (op == CEE_STFLD) {
10811 store_val = sp [1];
10816 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10818 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10821 if (op == CEE_STSFLD) {
10824 store_val = sp [0];
10829 token = read32 (ip + 1);
10830 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10831 field = mono_method_get_wrapper_data (method, token);
10832 klass = field->parent;
10835 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10838 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10839 FIELD_ACCESS_FAILURE (method, field);
10840 mono_class_init (klass);
10842 /* if the class is Critical then transparent code cannot access it's fields */
10843 if (!is_instance && mono_security_core_clr_enabled ())
10844 ensure_method_is_allowed_to_access_field (cfg, method, field);
10846 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10847 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10848 if (mono_security_core_clr_enabled ())
10849 ensure_method_is_allowed_to_access_field (cfg, method, field);
10853 * LDFLD etc. is usable on static fields as well, so convert those cases to
10856 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10868 g_assert_not_reached ();
10870 is_instance = FALSE;
10873 context_used = mini_class_check_context_used (cfg, klass);
10875 /* INSTANCE CASE */
10877 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10878 if (op == CEE_STFLD) {
10879 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10881 #ifndef DISABLE_REMOTING
10882 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10883 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10884 MonoInst *iargs [5];
10886 GSHAREDVT_FAILURE (op);
10888 iargs [0] = sp [0];
10889 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10890 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10891 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10893 iargs [4] = sp [1];
10895 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10896 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10897 iargs, ip, cfg->real_offset, TRUE);
10898 CHECK_CFG_EXCEPTION;
10899 g_assert (costs > 0);
10901 cfg->real_offset += 5;
10903 inline_costs += costs;
10905 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10912 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10914 if (mini_is_gsharedvt_klass (cfg, klass)) {
10915 MonoInst *offset_ins;
10917 context_used = mini_class_check_context_used (cfg, klass);
10919 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10920 dreg = alloc_ireg_mp (cfg);
10921 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10922 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10923 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10925 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10927 if (sp [0]->opcode != OP_LDADDR)
10928 store->flags |= MONO_INST_FAULT;
10930 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10931 /* insert call to write barrier */
10935 dreg = alloc_ireg_mp (cfg);
10936 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10937 emit_write_barrier (cfg, ptr, sp [1]);
10940 store->flags |= ins_flag;
10947 #ifndef DISABLE_REMOTING
10948 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10949 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10950 MonoInst *iargs [4];
10952 GSHAREDVT_FAILURE (op);
10954 iargs [0] = sp [0];
10955 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10956 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10957 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10958 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10959 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10960 iargs, ip, cfg->real_offset, TRUE);
10961 CHECK_CFG_EXCEPTION;
10962 g_assert (costs > 0);
10964 cfg->real_offset += 5;
10968 inline_costs += costs;
10970 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10976 if (sp [0]->type == STACK_VTYPE) {
10979 /* Have to compute the address of the variable */
10981 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10983 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10985 g_assert (var->klass == klass);
10987 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10991 if (op == CEE_LDFLDA) {
10992 if (sp [0]->type == STACK_OBJ) {
10993 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10994 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10997 dreg = alloc_ireg_mp (cfg);
10999 if (mini_is_gsharedvt_klass (cfg, klass)) {
11000 MonoInst *offset_ins;
11002 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11003 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11005 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11007 ins->klass = mono_class_from_mono_type (field->type);
11008 ins->type = STACK_MP;
11013 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11015 if (mini_is_gsharedvt_klass (cfg, klass)) {
11016 MonoInst *offset_ins;
11018 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11019 dreg = alloc_ireg_mp (cfg);
11020 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11023 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11025 load->flags |= ins_flag;
11026 if (sp [0]->opcode != OP_LDADDR)
11027 load->flags |= MONO_INST_FAULT;
11039 context_used = mini_class_check_context_used (cfg, klass);
11041 ftype = mono_field_get_type (field);
11043 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11046 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11047 * to be called here.
11049 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11050 mono_class_vtable (cfg->domain, klass);
11051 CHECK_TYPELOAD (klass);
11053 mono_domain_lock (cfg->domain);
11054 if (cfg->domain->special_static_fields)
11055 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11056 mono_domain_unlock (cfg->domain);
11058 is_special_static = mono_class_field_is_special_static (field);
11060 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11061 thread_ins = mono_get_thread_intrinsic (cfg);
11065 /* Generate IR to compute the field address */
11066 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11068 * Fast access to TLS data
11069 * Inline version of get_thread_static_data () in
11073 int idx, static_data_reg, array_reg, dreg;
11075 GSHAREDVT_FAILURE (op);
11077 MONO_ADD_INS (cfg->cbb, thread_ins);
11078 static_data_reg = alloc_ireg (cfg);
11079 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11081 if (cfg->compile_aot) {
11082 int offset_reg, offset2_reg, idx_reg;
11084 /* For TLS variables, this will return the TLS offset */
11085 EMIT_NEW_SFLDACONST (cfg, ins, field);
11086 offset_reg = ins->dreg;
11087 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11088 idx_reg = alloc_ireg (cfg);
11089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11091 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11092 array_reg = alloc_ireg (cfg);
11093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11094 offset2_reg = alloc_ireg (cfg);
11095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11097 dreg = alloc_ireg (cfg);
11098 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11100 offset = (gsize)addr & 0x7fffffff;
11101 idx = offset & 0x3f;
11103 array_reg = alloc_ireg (cfg);
11104 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11105 dreg = alloc_ireg (cfg);
11106 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11108 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11109 (cfg->compile_aot && is_special_static) ||
11110 (context_used && is_special_static)) {
11111 MonoInst *iargs [2];
11113 g_assert (field->parent);
11114 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11115 if (context_used) {
11116 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11117 field, MONO_RGCTX_INFO_CLASS_FIELD);
11119 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11121 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11122 } else if (context_used) {
11123 MonoInst *static_data;
11126 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11127 method->klass->name_space, method->klass->name, method->name,
11128 depth, field->offset);
11131 if (mono_class_needs_cctor_run (klass, method))
11132 emit_generic_class_init (cfg, klass);
11135 * The pointer we're computing here is
11137 * super_info.static_data + field->offset
11139 static_data = emit_get_rgctx_klass (cfg, context_used,
11140 klass, MONO_RGCTX_INFO_STATIC_DATA);
11142 if (mini_is_gsharedvt_klass (cfg, klass)) {
11143 MonoInst *offset_ins;
11145 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11146 dreg = alloc_ireg_mp (cfg);
11147 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11148 } else if (field->offset == 0) {
11151 int addr_reg = mono_alloc_preg (cfg);
11152 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11154 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11155 MonoInst *iargs [2];
11157 g_assert (field->parent);
11158 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11159 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11160 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11162 MonoVTable *vtable = NULL;
11164 if (!cfg->compile_aot)
11165 vtable = mono_class_vtable (cfg->domain, klass);
11166 CHECK_TYPELOAD (klass);
11169 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11170 if (!(g_slist_find (class_inits, klass))) {
11171 emit_class_init (cfg, klass);
11172 if (cfg->verbose_level > 2)
11173 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11174 class_inits = g_slist_prepend (class_inits, klass);
11177 if (cfg->run_cctors) {
11179 /* This makes so that inline cannot trigger */
11180 /* .cctors: too many apps depend on them */
11181 /* running with a specific order... */
11183 if (! vtable->initialized)
11184 INLINE_FAILURE ("class init");
11185 ex = mono_runtime_class_init_full (vtable, FALSE);
11187 set_exception_object (cfg, ex);
11188 goto exception_exit;
11192 if (cfg->compile_aot)
11193 EMIT_NEW_SFLDACONST (cfg, ins, field);
11196 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11198 EMIT_NEW_PCONST (cfg, ins, addr);
11201 MonoInst *iargs [1];
11202 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11203 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11207 /* Generate IR to do the actual load/store operation */
11209 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11210 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11211 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11214 if (op == CEE_LDSFLDA) {
11215 ins->klass = mono_class_from_mono_type (ftype);
11216 ins->type = STACK_PTR;
11218 } else if (op == CEE_STSFLD) {
11221 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11222 store->flags |= ins_flag;
11224 gboolean is_const = FALSE;
11225 MonoVTable *vtable = NULL;
11226 gpointer addr = NULL;
11228 if (!context_used) {
11229 vtable = mono_class_vtable (cfg->domain, klass);
11230 CHECK_TYPELOAD (klass);
11232 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11233 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11234 int ro_type = ftype->type;
11236 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11237 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11238 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11241 GSHAREDVT_FAILURE (op);
11243 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11246 case MONO_TYPE_BOOLEAN:
11248 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11252 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11255 case MONO_TYPE_CHAR:
11257 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11261 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11266 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11270 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11275 case MONO_TYPE_PTR:
11276 case MONO_TYPE_FNPTR:
11277 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11278 type_to_eval_stack_type ((cfg), field->type, *sp);
11281 case MONO_TYPE_STRING:
11282 case MONO_TYPE_OBJECT:
11283 case MONO_TYPE_CLASS:
11284 case MONO_TYPE_SZARRAY:
11285 case MONO_TYPE_ARRAY:
11286 if (!mono_gc_is_moving ()) {
11287 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11288 type_to_eval_stack_type ((cfg), field->type, *sp);
11296 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11301 case MONO_TYPE_VALUETYPE:
11311 CHECK_STACK_OVF (1);
11313 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11314 load->flags |= ins_flag;
11320 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11321 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11322 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11333 token = read32 (ip + 1);
11334 klass = mini_get_class (method, token, generic_context);
11335 CHECK_TYPELOAD (klass);
11336 if (ins_flag & MONO_INST_VOLATILE) {
11337 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11338 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11340 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11341 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11342 ins->flags |= ins_flag;
11343 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11344 generic_class_is_reference_type (cfg, klass)) {
11345 /* insert call to write barrier */
11346 emit_write_barrier (cfg, sp [0], sp [1]);
11358 const char *data_ptr;
11360 guint32 field_token;
11366 token = read32 (ip + 1);
11368 klass = mini_get_class (method, token, generic_context);
11369 CHECK_TYPELOAD (klass);
11371 context_used = mini_class_check_context_used (cfg, klass);
11373 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11374 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11375 ins->sreg1 = sp [0]->dreg;
11376 ins->type = STACK_I4;
11377 ins->dreg = alloc_ireg (cfg);
11378 MONO_ADD_INS (cfg->cbb, ins);
11379 *sp = mono_decompose_opcode (cfg, ins);
11382 if (context_used) {
11383 MonoInst *args [3];
11384 MonoClass *array_class = mono_array_class_get (klass, 1);
11385 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11387 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11390 args [0] = emit_get_rgctx_klass (cfg, context_used,
11391 array_class, MONO_RGCTX_INFO_VTABLE);
11396 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11398 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11400 if (cfg->opt & MONO_OPT_SHARED) {
11401 /* Decompose now to avoid problems with references to the domainvar */
11402 MonoInst *iargs [3];
11404 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11405 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11406 iargs [2] = sp [0];
11408 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11410 /* Decompose later since it is needed by abcrem */
11411 MonoClass *array_type = mono_array_class_get (klass, 1);
11412 mono_class_vtable (cfg->domain, array_type);
11413 CHECK_TYPELOAD (array_type);
11415 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11416 ins->dreg = alloc_ireg_ref (cfg);
11417 ins->sreg1 = sp [0]->dreg;
11418 ins->inst_newa_class = klass;
11419 ins->type = STACK_OBJ;
11420 ins->klass = array_type;
11421 MONO_ADD_INS (cfg->cbb, ins);
11422 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11423 cfg->cbb->has_array_access = TRUE;
11425 /* Needed so mono_emit_load_get_addr () gets called */
11426 mono_get_got_var (cfg);
11436 * we inline/optimize the initialization sequence if possible.
11437 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11438 * for small sizes open code the memcpy
11439 * ensure the rva field is big enough
11441 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11442 MonoMethod *memcpy_method = get_memcpy_method ();
11443 MonoInst *iargs [3];
11444 int add_reg = alloc_ireg_mp (cfg);
11446 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11447 if (cfg->compile_aot) {
11448 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11450 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11452 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11453 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11462 if (sp [0]->type != STACK_OBJ)
11465 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11466 ins->dreg = alloc_preg (cfg);
11467 ins->sreg1 = sp [0]->dreg;
11468 ins->type = STACK_I4;
11469 /* This flag will be inherited by the decomposition */
11470 ins->flags |= MONO_INST_FAULT;
11471 MONO_ADD_INS (cfg->cbb, ins);
11472 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11473 cfg->cbb->has_array_access = TRUE;
11481 if (sp [0]->type != STACK_OBJ)
11484 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11486 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11487 CHECK_TYPELOAD (klass);
11488 /* we need to make sure that this array is exactly the type it needs
11489 * to be for correctness. the wrappers are lax with their usage
11490 * so we need to ignore them here
11492 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11493 MonoClass *array_class = mono_array_class_get (klass, 1);
11494 mini_emit_check_array_type (cfg, sp [0], array_class);
11495 CHECK_TYPELOAD (array_class);
11499 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11504 case CEE_LDELEM_I1:
11505 case CEE_LDELEM_U1:
11506 case CEE_LDELEM_I2:
11507 case CEE_LDELEM_U2:
11508 case CEE_LDELEM_I4:
11509 case CEE_LDELEM_U4:
11510 case CEE_LDELEM_I8:
11512 case CEE_LDELEM_R4:
11513 case CEE_LDELEM_R8:
11514 case CEE_LDELEM_REF: {
11520 if (*ip == CEE_LDELEM) {
11522 token = read32 (ip + 1);
11523 klass = mini_get_class (method, token, generic_context);
11524 CHECK_TYPELOAD (klass);
11525 mono_class_init (klass);
11528 klass = array_access_to_klass (*ip);
11530 if (sp [0]->type != STACK_OBJ)
11533 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11535 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11536 // FIXME-VT: OP_ICONST optimization
11537 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11538 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11539 ins->opcode = OP_LOADV_MEMBASE;
11540 } else if (sp [1]->opcode == OP_ICONST) {
11541 int array_reg = sp [0]->dreg;
11542 int index_reg = sp [1]->dreg;
11543 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11545 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11546 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11548 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11549 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11552 if (*ip == CEE_LDELEM)
11559 case CEE_STELEM_I1:
11560 case CEE_STELEM_I2:
11561 case CEE_STELEM_I4:
11562 case CEE_STELEM_I8:
11563 case CEE_STELEM_R4:
11564 case CEE_STELEM_R8:
11565 case CEE_STELEM_REF:
11570 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11572 if (*ip == CEE_STELEM) {
11574 token = read32 (ip + 1);
11575 klass = mini_get_class (method, token, generic_context);
11576 CHECK_TYPELOAD (klass);
11577 mono_class_init (klass);
11580 klass = array_access_to_klass (*ip);
11582 if (sp [0]->type != STACK_OBJ)
11585 emit_array_store (cfg, klass, sp, TRUE);
11587 if (*ip == CEE_STELEM)
11594 case CEE_CKFINITE: {
11598 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11599 ins->sreg1 = sp [0]->dreg;
11600 ins->dreg = alloc_freg (cfg);
11601 ins->type = STACK_R8;
11602 MONO_ADD_INS (cfg->cbb, ins);
11604 *sp++ = mono_decompose_opcode (cfg, ins);
11609 case CEE_REFANYVAL: {
11610 MonoInst *src_var, *src;
11612 int klass_reg = alloc_preg (cfg);
11613 int dreg = alloc_preg (cfg);
11615 GSHAREDVT_FAILURE (*ip);
11618 MONO_INST_NEW (cfg, ins, *ip);
11621 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11622 CHECK_TYPELOAD (klass);
11624 context_used = mini_class_check_context_used (cfg, klass);
11627 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11629 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11630 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11633 if (context_used) {
11634 MonoInst *klass_ins;
11636 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11637 klass, MONO_RGCTX_INFO_KLASS);
11640 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11641 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11643 mini_emit_class_check (cfg, klass_reg, klass);
11645 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11646 ins->type = STACK_MP;
11647 ins->klass = klass;
11652 case CEE_MKREFANY: {
11653 MonoInst *loc, *addr;
11655 GSHAREDVT_FAILURE (*ip);
11658 MONO_INST_NEW (cfg, ins, *ip);
11661 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11662 CHECK_TYPELOAD (klass);
11664 context_used = mini_class_check_context_used (cfg, klass);
11666 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11667 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11669 if (context_used) {
11670 MonoInst *const_ins;
11671 int type_reg = alloc_preg (cfg);
11673 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11677 } else if (cfg->compile_aot) {
11678 int const_reg = alloc_preg (cfg);
11679 int type_reg = alloc_preg (cfg);
11681 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11686 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11687 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11691 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11692 ins->type = STACK_VTYPE;
11693 ins->klass = mono_defaults.typed_reference_class;
11698 case CEE_LDTOKEN: {
11700 MonoClass *handle_class;
11702 CHECK_STACK_OVF (1);
11705 n = read32 (ip + 1);
11707 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11708 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11709 handle = mono_method_get_wrapper_data (method, n);
11710 handle_class = mono_method_get_wrapper_data (method, n + 1);
11711 if (handle_class == mono_defaults.typehandle_class)
11712 handle = &((MonoClass*)handle)->byval_arg;
11715 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11720 mono_class_init (handle_class);
11721 if (cfg->generic_sharing_context) {
11722 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11723 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11724 /* This case handles ldtoken
11725 of an open type, like for
11728 } else if (handle_class == mono_defaults.typehandle_class) {
11729 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11730 } else if (handle_class == mono_defaults.fieldhandle_class)
11731 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11732 else if (handle_class == mono_defaults.methodhandle_class)
11733 context_used = mini_method_check_context_used (cfg, handle);
11735 g_assert_not_reached ();
11738 if ((cfg->opt & MONO_OPT_SHARED) &&
11739 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11740 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11741 MonoInst *addr, *vtvar, *iargs [3];
11742 int method_context_used;
11744 method_context_used = mini_method_check_context_used (cfg, method);
11746 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11748 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11749 EMIT_NEW_ICONST (cfg, iargs [1], n);
11750 if (method_context_used) {
11751 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11752 method, MONO_RGCTX_INFO_METHOD);
11753 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11755 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11756 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11758 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11760 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11762 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11764 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11765 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11766 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11767 (cmethod->klass == mono_defaults.systemtype_class) &&
11768 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11769 MonoClass *tclass = mono_class_from_mono_type (handle);
11771 mono_class_init (tclass);
11772 if (context_used) {
11773 ins = emit_get_rgctx_klass (cfg, context_used,
11774 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11775 } else if (cfg->compile_aot) {
11776 if (method->wrapper_type) {
11777 mono_error_init (&error); //got to do it since there are multiple conditionals below
11778 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11779 /* Special case for static synchronized wrappers */
11780 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11782 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11783 /* FIXME: n is not a normal token */
11785 EMIT_NEW_PCONST (cfg, ins, NULL);
11788 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11791 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11793 ins->type = STACK_OBJ;
11794 ins->klass = cmethod->klass;
11797 MonoInst *addr, *vtvar;
11799 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11801 if (context_used) {
11802 if (handle_class == mono_defaults.typehandle_class) {
11803 ins = emit_get_rgctx_klass (cfg, context_used,
11804 mono_class_from_mono_type (handle),
11805 MONO_RGCTX_INFO_TYPE);
11806 } else if (handle_class == mono_defaults.methodhandle_class) {
11807 ins = emit_get_rgctx_method (cfg, context_used,
11808 handle, MONO_RGCTX_INFO_METHOD);
11809 } else if (handle_class == mono_defaults.fieldhandle_class) {
11810 ins = emit_get_rgctx_field (cfg, context_used,
11811 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11813 g_assert_not_reached ();
11815 } else if (cfg->compile_aot) {
11816 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11818 EMIT_NEW_PCONST (cfg, ins, handle);
11820 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11822 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11832 MONO_INST_NEW (cfg, ins, OP_THROW);
11834 ins->sreg1 = sp [0]->dreg;
11836 cfg->cbb->out_of_line = TRUE;
11837 MONO_ADD_INS (cfg->cbb, ins);
11838 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11839 MONO_ADD_INS (cfg->cbb, ins);
11842 link_bblock (cfg, cfg->cbb, end_bblock);
11843 start_new_bblock = 1;
11845 case CEE_ENDFINALLY:
11846 /* mono_save_seq_point_info () depends on this */
11847 if (sp != stack_start)
11848 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11849 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11850 MONO_ADD_INS (cfg->cbb, ins);
11852 start_new_bblock = 1;
11855 * Control will leave the method so empty the stack, otherwise
11856 * the next basic block will start with a nonempty stack.
11858 while (sp != stack_start) {
11863 case CEE_LEAVE_S: {
11866 if (*ip == CEE_LEAVE) {
11868 target = ip + 5 + (gint32)read32(ip + 1);
11871 target = ip + 2 + (signed char)(ip [1]);
11874 /* empty the stack */
11875 while (sp != stack_start) {
11880 * If this leave statement is in a catch block, check for a
11881 * pending exception, and rethrow it if necessary.
11882 * We avoid doing this in runtime invoke wrappers, since those are called
11883 * by native code which excepts the wrapper to catch all exceptions.
11885 for (i = 0; i < header->num_clauses; ++i) {
11886 MonoExceptionClause *clause = &header->clauses [i];
11889 * Use <= in the final comparison to handle clauses with multiple
11890 * leave statements, like in bug #78024.
11891 * The ordering of the exception clauses guarantees that we find the
11892 * innermost clause.
11894 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11896 MonoBasicBlock *dont_throw;
11901 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11904 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11906 NEW_BBLOCK (cfg, dont_throw);
11909 * Currently, we always rethrow the abort exception, despite the
11910 * fact that this is not correct. See thread6.cs for an example.
11911 * But propagating the abort exception is more important than
11912 * getting the sematics right.
11914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11916 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11918 MONO_START_BB (cfg, dont_throw);
11922 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11924 MonoExceptionClause *clause;
11926 for (tmp = handlers; tmp; tmp = tmp->next) {
11927 clause = tmp->data;
11928 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11930 link_bblock (cfg, cfg->cbb, tblock);
11931 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11932 ins->inst_target_bb = tblock;
11933 ins->inst_eh_block = clause;
11934 MONO_ADD_INS (cfg->cbb, ins);
11935 cfg->cbb->has_call_handler = 1;
11936 if (COMPILE_LLVM (cfg)) {
11937 MonoBasicBlock *target_bb;
11940 * Link the finally bblock with the target, since it will
11941 * conceptually branch there.
11942 * FIXME: Have to link the bblock containing the endfinally.
11944 GET_BBLOCK (cfg, target_bb, target);
11945 link_bblock (cfg, tblock, target_bb);
11948 g_list_free (handlers);
11951 MONO_INST_NEW (cfg, ins, OP_BR);
11952 MONO_ADD_INS (cfg->cbb, ins);
11953 GET_BBLOCK (cfg, tblock, target);
11954 link_bblock (cfg, cfg->cbb, tblock);
11955 ins->inst_target_bb = tblock;
11956 start_new_bblock = 1;
11958 if (*ip == CEE_LEAVE)
11967 * Mono specific opcodes
11969 case MONO_CUSTOM_PREFIX: {
11971 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11975 case CEE_MONO_ICALL: {
11977 MonoJitICallInfo *info;
11979 token = read32 (ip + 2);
11980 func = mono_method_get_wrapper_data (method, token);
11981 info = mono_find_jit_icall_by_addr (func);
11983 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11986 CHECK_STACK (info->sig->param_count);
11987 sp -= info->sig->param_count;
11989 ins = mono_emit_jit_icall (cfg, info->func, sp);
11990 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11994 inline_costs += 10 * num_calls++;
11998 case CEE_MONO_LDPTR_CARD_TABLE: {
12000 gpointer card_mask;
12001 CHECK_STACK_OVF (1);
12003 if (cfg->compile_aot)
12004 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12006 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12010 inline_costs += 10 * num_calls++;
12013 case CEE_MONO_LDPTR_NURSERY_START: {
12016 CHECK_STACK_OVF (1);
12018 if (cfg->compile_aot)
12019 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12021 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12025 inline_costs += 10 * num_calls++;
12028 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12029 CHECK_STACK_OVF (1);
12031 if (cfg->compile_aot)
12032 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12034 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12038 inline_costs += 10 * num_calls++;
12041 case CEE_MONO_LDPTR: {
12044 CHECK_STACK_OVF (1);
12046 token = read32 (ip + 2);
12048 ptr = mono_method_get_wrapper_data (method, token);
12049 EMIT_NEW_PCONST (cfg, ins, ptr);
12052 inline_costs += 10 * num_calls++;
12053 /* Can't embed random pointers into AOT code */
12057 case CEE_MONO_JIT_ICALL_ADDR: {
12058 MonoJitICallInfo *callinfo;
12061 CHECK_STACK_OVF (1);
12063 token = read32 (ip + 2);
12065 ptr = mono_method_get_wrapper_data (method, token);
12066 callinfo = mono_find_jit_icall_by_addr (ptr);
12067 g_assert (callinfo);
12068 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12071 inline_costs += 10 * num_calls++;
12074 case CEE_MONO_ICALL_ADDR: {
12075 MonoMethod *cmethod;
12078 CHECK_STACK_OVF (1);
12080 token = read32 (ip + 2);
12082 cmethod = mono_method_get_wrapper_data (method, token);
12084 if (cfg->compile_aot) {
12085 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12087 ptr = mono_lookup_internal_call (cmethod);
12089 EMIT_NEW_PCONST (cfg, ins, ptr);
12095 case CEE_MONO_VTADDR: {
12096 MonoInst *src_var, *src;
12102 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12103 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12108 case CEE_MONO_NEWOBJ: {
12109 MonoInst *iargs [2];
12111 CHECK_STACK_OVF (1);
12113 token = read32 (ip + 2);
12114 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12115 mono_class_init (klass);
12116 NEW_DOMAINCONST (cfg, iargs [0]);
12117 MONO_ADD_INS (cfg->cbb, iargs [0]);
12118 NEW_CLASSCONST (cfg, iargs [1], klass);
12119 MONO_ADD_INS (cfg->cbb, iargs [1]);
12120 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12122 inline_costs += 10 * num_calls++;
12125 case CEE_MONO_OBJADDR:
12128 MONO_INST_NEW (cfg, ins, OP_MOVE);
12129 ins->dreg = alloc_ireg_mp (cfg);
12130 ins->sreg1 = sp [0]->dreg;
12131 ins->type = STACK_MP;
12132 MONO_ADD_INS (cfg->cbb, ins);
12136 case CEE_MONO_LDNATIVEOBJ:
12138 * Similar to LDOBJ, but instead load the unmanaged
12139 * representation of the vtype to the stack.
12144 token = read32 (ip + 2);
12145 klass = mono_method_get_wrapper_data (method, token);
12146 g_assert (klass->valuetype);
12147 mono_class_init (klass);
12150 MonoInst *src, *dest, *temp;
12153 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12154 temp->backend.is_pinvoke = 1;
12155 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12156 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12158 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12159 dest->type = STACK_VTYPE;
12160 dest->klass = klass;
12166 case CEE_MONO_RETOBJ: {
12168 * Same as RET, but return the native representation of a vtype
12171 g_assert (cfg->ret);
12172 g_assert (mono_method_signature (method)->pinvoke);
12177 token = read32 (ip + 2);
12178 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12180 if (!cfg->vret_addr) {
12181 g_assert (cfg->ret_var_is_local);
12183 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12185 EMIT_NEW_RETLOADA (cfg, ins);
12187 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12189 if (sp != stack_start)
12192 MONO_INST_NEW (cfg, ins, OP_BR);
12193 ins->inst_target_bb = end_bblock;
12194 MONO_ADD_INS (cfg->cbb, ins);
12195 link_bblock (cfg, cfg->cbb, end_bblock);
12196 start_new_bblock = 1;
12200 case CEE_MONO_CISINST:
12201 case CEE_MONO_CCASTCLASS: {
12206 token = read32 (ip + 2);
12207 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12208 if (ip [1] == CEE_MONO_CISINST)
12209 ins = handle_cisinst (cfg, klass, sp [0]);
12211 ins = handle_ccastclass (cfg, klass, sp [0]);
12216 case CEE_MONO_SAVE_LMF:
12217 case CEE_MONO_RESTORE_LMF:
12218 #ifdef MONO_ARCH_HAVE_LMF_OPS
12219 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12220 MONO_ADD_INS (cfg->cbb, ins);
12221 cfg->need_lmf_area = TRUE;
12225 case CEE_MONO_CLASSCONST:
12226 CHECK_STACK_OVF (1);
12228 token = read32 (ip + 2);
12229 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12232 inline_costs += 10 * num_calls++;
12234 case CEE_MONO_NOT_TAKEN:
12235 cfg->cbb->out_of_line = TRUE;
12238 case CEE_MONO_TLS: {
12241 CHECK_STACK_OVF (1);
12243 key = (gint32)read32 (ip + 2);
12244 g_assert (key < TLS_KEY_NUM);
12246 ins = mono_create_tls_get (cfg, key);
12248 if (cfg->compile_aot) {
12250 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12251 ins->dreg = alloc_preg (cfg);
12252 ins->type = STACK_PTR;
12254 g_assert_not_reached ();
12257 ins->type = STACK_PTR;
12258 MONO_ADD_INS (cfg->cbb, ins);
12263 case CEE_MONO_DYN_CALL: {
12264 MonoCallInst *call;
12266 /* It would be easier to call a trampoline, but that would put an
12267 * extra frame on the stack, confusing exception handling. So
12268 * implement it inline using an opcode for now.
12271 if (!cfg->dyn_call_var) {
12272 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12273 /* prevent it from being register allocated */
12274 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12277 /* Has to use a call inst since it local regalloc expects it */
12278 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12279 ins = (MonoInst*)call;
12281 ins->sreg1 = sp [0]->dreg;
12282 ins->sreg2 = sp [1]->dreg;
12283 MONO_ADD_INS (cfg->cbb, ins);
12285 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12288 inline_costs += 10 * num_calls++;
12292 case CEE_MONO_MEMORY_BARRIER: {
12294 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12298 case CEE_MONO_JIT_ATTACH: {
12299 MonoInst *args [16], *domain_ins;
12300 MonoInst *ad_ins, *jit_tls_ins;
12301 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12303 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12305 EMIT_NEW_PCONST (cfg, ins, NULL);
12306 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12308 ad_ins = mono_get_domain_intrinsic (cfg);
12309 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12311 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12312 NEW_BBLOCK (cfg, next_bb);
12313 NEW_BBLOCK (cfg, call_bb);
12315 if (cfg->compile_aot) {
12316 /* AOT code is only used in the root domain */
12317 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12319 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12321 MONO_ADD_INS (cfg->cbb, ad_ins);
12322 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12323 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12325 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12329 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12330 MONO_START_BB (cfg, call_bb);
12333 if (cfg->compile_aot) {
12334 /* AOT code is only used in the root domain */
12335 EMIT_NEW_PCONST (cfg, args [0], NULL);
12337 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12339 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12340 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12343 MONO_START_BB (cfg, next_bb);
12347 case CEE_MONO_JIT_DETACH: {
12348 MonoInst *args [16];
12350 /* Restore the original domain */
12351 dreg = alloc_ireg (cfg);
12352 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12353 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12358 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12364 case CEE_PREFIX1: {
12367 case CEE_ARGLIST: {
12368 /* somewhat similar to LDTOKEN */
12369 MonoInst *addr, *vtvar;
12370 CHECK_STACK_OVF (1);
12371 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12373 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12374 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12376 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12377 ins->type = STACK_VTYPE;
12378 ins->klass = mono_defaults.argumenthandle_class;
12388 MonoInst *cmp, *arg1, *arg2;
12396 * The following transforms:
12397 * CEE_CEQ into OP_CEQ
12398 * CEE_CGT into OP_CGT
12399 * CEE_CGT_UN into OP_CGT_UN
12400 * CEE_CLT into OP_CLT
12401 * CEE_CLT_UN into OP_CLT_UN
12403 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12405 MONO_INST_NEW (cfg, ins, cmp->opcode);
12406 cmp->sreg1 = arg1->dreg;
12407 cmp->sreg2 = arg2->dreg;
12408 type_from_op (cfg, cmp, arg1, arg2);
12410 add_widen_op (cfg, cmp, &arg1, &arg2);
12411 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12412 cmp->opcode = OP_LCOMPARE;
12413 else if (arg1->type == STACK_R4)
12414 cmp->opcode = OP_RCOMPARE;
12415 else if (arg1->type == STACK_R8)
12416 cmp->opcode = OP_FCOMPARE;
12418 cmp->opcode = OP_ICOMPARE;
12419 MONO_ADD_INS (cfg->cbb, cmp);
12420 ins->type = STACK_I4;
12421 ins->dreg = alloc_dreg (cfg, ins->type);
12422 type_from_op (cfg, ins, arg1, arg2);
12424 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12426 * The backends expect the fceq opcodes to do the
12429 ins->sreg1 = cmp->sreg1;
12430 ins->sreg2 = cmp->sreg2;
12433 MONO_ADD_INS (cfg->cbb, ins);
12439 MonoInst *argconst;
12440 MonoMethod *cil_method;
12442 CHECK_STACK_OVF (1);
12444 n = read32 (ip + 2);
12445 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12446 if (!cmethod || mono_loader_get_last_error ())
12448 mono_class_init (cmethod->klass);
12450 mono_save_token_info (cfg, image, n, cmethod);
12452 context_used = mini_method_check_context_used (cfg, cmethod);
12454 cil_method = cmethod;
12455 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12456 METHOD_ACCESS_FAILURE (method, cil_method);
12458 if (mono_security_core_clr_enabled ())
12459 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12462 * Optimize the common case of ldftn+delegate creation
12464 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12465 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12466 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12467 MonoInst *target_ins, *handle_ins;
12468 MonoMethod *invoke;
12469 int invoke_context_used;
12471 invoke = mono_get_delegate_invoke (ctor_method->klass);
12472 if (!invoke || !mono_method_signature (invoke))
12475 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12477 target_ins = sp [-1];
12479 if (mono_security_core_clr_enabled ())
12480 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12482 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12483 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12484 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12485 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12486 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12490 /* FIXME: SGEN support */
12491 if (invoke_context_used == 0) {
12493 if (cfg->verbose_level > 3)
12494 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12495 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12498 CHECK_CFG_EXCEPTION;
12508 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12509 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12513 inline_costs += 10 * num_calls++;
12516 case CEE_LDVIRTFTN: {
12517 MonoInst *args [2];
12521 n = read32 (ip + 2);
12522 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12523 if (!cmethod || mono_loader_get_last_error ())
12525 mono_class_init (cmethod->klass);
12527 context_used = mini_method_check_context_used (cfg, cmethod);
12529 if (mono_security_core_clr_enabled ())
12530 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12533 * Optimize the common case of ldvirtftn+delegate creation
12535 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12536 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12537 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12538 MonoInst *target_ins, *handle_ins;
12539 MonoMethod *invoke;
12540 int invoke_context_used;
12541 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12543 invoke = mono_get_delegate_invoke (ctor_method->klass);
12544 if (!invoke || !mono_method_signature (invoke))
12547 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12549 target_ins = sp [-1];
12551 if (mono_security_core_clr_enabled ())
12552 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12554 /* FIXME: SGEN support */
12555 if (invoke_context_used == 0) {
12557 if (cfg->verbose_level > 3)
12558 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12559 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12562 CHECK_CFG_EXCEPTION;
12575 args [1] = emit_get_rgctx_method (cfg, context_used,
12576 cmethod, MONO_RGCTX_INFO_METHOD);
12579 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12581 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12584 inline_costs += 10 * num_calls++;
12588 CHECK_STACK_OVF (1);
12590 n = read16 (ip + 2);
12592 EMIT_NEW_ARGLOAD (cfg, ins, n);
12597 CHECK_STACK_OVF (1);
12599 n = read16 (ip + 2);
12601 NEW_ARGLOADA (cfg, ins, n);
12602 MONO_ADD_INS (cfg->cbb, ins);
12610 n = read16 (ip + 2);
12612 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12614 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12618 CHECK_STACK_OVF (1);
12620 n = read16 (ip + 2);
12622 EMIT_NEW_LOCLOAD (cfg, ins, n);
12627 unsigned char *tmp_ip;
12628 CHECK_STACK_OVF (1);
12630 n = read16 (ip + 2);
12633 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12639 EMIT_NEW_LOCLOADA (cfg, ins, n);
12648 n = read16 (ip + 2);
12650 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12652 emit_stloc_ir (cfg, sp, header, n);
12659 if (sp != stack_start)
12661 if (cfg->method != method)
12663 * Inlining this into a loop in a parent could lead to
12664 * stack overflows which is different behavior than the
12665 * non-inlined case, thus disable inlining in this case.
12667 INLINE_FAILURE("localloc");
12669 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12670 ins->dreg = alloc_preg (cfg);
12671 ins->sreg1 = sp [0]->dreg;
12672 ins->type = STACK_PTR;
12673 MONO_ADD_INS (cfg->cbb, ins);
12675 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12677 ins->flags |= MONO_INST_INIT;
12682 case CEE_ENDFILTER: {
12683 MonoExceptionClause *clause, *nearest;
12688 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12690 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12691 ins->sreg1 = (*sp)->dreg;
12692 MONO_ADD_INS (cfg->cbb, ins);
12693 start_new_bblock = 1;
12697 for (cc = 0; cc < header->num_clauses; ++cc) {
12698 clause = &header->clauses [cc];
12699 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12700 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12701 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12704 g_assert (nearest);
12705 if ((ip - header->code) != nearest->handler_offset)
12710 case CEE_UNALIGNED_:
12711 ins_flag |= MONO_INST_UNALIGNED;
12712 /* FIXME: record alignment? we can assume 1 for now */
12716 case CEE_VOLATILE_:
12717 ins_flag |= MONO_INST_VOLATILE;
12721 ins_flag |= MONO_INST_TAILCALL;
12722 cfg->flags |= MONO_CFG_HAS_TAIL;
12723 /* Can't inline tail calls at this time */
12724 inline_costs += 100000;
12731 token = read32 (ip + 2);
12732 klass = mini_get_class (method, token, generic_context);
12733 CHECK_TYPELOAD (klass);
12734 if (generic_class_is_reference_type (cfg, klass))
12735 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12737 mini_emit_initobj (cfg, *sp, NULL, klass);
12741 case CEE_CONSTRAINED_:
12743 token = read32 (ip + 2);
12744 constrained_class = mini_get_class (method, token, generic_context);
12745 CHECK_TYPELOAD (constrained_class);
12749 case CEE_INITBLK: {
12750 MonoInst *iargs [3];
12754 /* Skip optimized paths for volatile operations. */
12755 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12756 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12757 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12758 /* emit_memset only works when val == 0 */
12759 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12762 iargs [0] = sp [0];
12763 iargs [1] = sp [1];
12764 iargs [2] = sp [2];
12765 if (ip [1] == CEE_CPBLK) {
12767 * FIXME: It's unclear whether we should be emitting both the acquire
12768 * and release barriers for cpblk. It is technically both a load and
12769 * store operation, so it seems like that's the sensible thing to do.
12771 * FIXME: We emit full barriers on both sides of the operation for
12772 * simplicity. We should have a separate atomic memcpy method instead.
12774 MonoMethod *memcpy_method = get_memcpy_method ();
12776 if (ins_flag & MONO_INST_VOLATILE)
12777 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12779 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12780 call->flags |= ins_flag;
12782 if (ins_flag & MONO_INST_VOLATILE)
12783 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12785 MonoMethod *memset_method = get_memset_method ();
12786 if (ins_flag & MONO_INST_VOLATILE) {
12787 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12788 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12790 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12791 call->flags |= ins_flag;
12802 ins_flag |= MONO_INST_NOTYPECHECK;
12804 ins_flag |= MONO_INST_NORANGECHECK;
12805 /* we ignore the no-nullcheck for now since we
12806 * really do it explicitly only when doing callvirt->call
12810 case CEE_RETHROW: {
12812 int handler_offset = -1;
12814 for (i = 0; i < header->num_clauses; ++i) {
12815 MonoExceptionClause *clause = &header->clauses [i];
12816 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12817 handler_offset = clause->handler_offset;
12822 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12824 if (handler_offset == -1)
12827 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12828 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12829 ins->sreg1 = load->dreg;
12830 MONO_ADD_INS (cfg->cbb, ins);
12832 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12833 MONO_ADD_INS (cfg->cbb, ins);
12836 link_bblock (cfg, cfg->cbb, end_bblock);
12837 start_new_bblock = 1;
12845 CHECK_STACK_OVF (1);
12847 token = read32 (ip + 2);
12848 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12849 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12852 val = mono_type_size (type, &ialign);
12854 MonoClass *klass = mini_get_class (method, token, generic_context);
12855 CHECK_TYPELOAD (klass);
12857 val = mono_type_size (&klass->byval_arg, &ialign);
12859 if (mini_is_gsharedvt_klass (cfg, klass))
12860 GSHAREDVT_FAILURE (*ip);
12862 EMIT_NEW_ICONST (cfg, ins, val);
12867 case CEE_REFANYTYPE: {
12868 MonoInst *src_var, *src;
12870 GSHAREDVT_FAILURE (*ip);
12876 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12878 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12879 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12880 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12885 case CEE_READONLY_:
12898 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12908 g_warning ("opcode 0x%02x not handled", *ip);
12912 if (start_new_bblock != 1)
12915 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12916 if (cfg->cbb->next_bb) {
12917 /* This could already be set because of inlining, #693905 */
12918 MonoBasicBlock *bb = cfg->cbb;
12920 while (bb->next_bb)
12922 bb->next_bb = end_bblock;
12924 cfg->cbb->next_bb = end_bblock;
12927 if (cfg->method == method && cfg->domainvar) {
12929 MonoInst *get_domain;
12931 cfg->cbb = init_localsbb;
12933 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12934 MONO_ADD_INS (cfg->cbb, get_domain);
12936 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12938 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12939 MONO_ADD_INS (cfg->cbb, store);
12942 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12943 if (cfg->compile_aot)
12944 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12945 mono_get_got_var (cfg);
12948 if (cfg->method == method && cfg->got_var)
12949 mono_emit_load_got_addr (cfg);
12951 if (init_localsbb) {
12952 cfg->cbb = init_localsbb;
12954 for (i = 0; i < header->num_locals; ++i) {
12955 emit_init_local (cfg, i, header->locals [i], init_locals);
12959 if (cfg->init_ref_vars && cfg->method == method) {
12960 /* Emit initialization for ref vars */
12961 // FIXME: Avoid duplication initialization for IL locals.
12962 for (i = 0; i < cfg->num_varinfo; ++i) {
12963 MonoInst *ins = cfg->varinfo [i];
12965 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12966 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12970 if (cfg->lmf_var && cfg->method == method) {
12971 cfg->cbb = init_localsbb;
12972 emit_push_lmf (cfg);
12975 cfg->cbb = init_localsbb;
12976 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12979 MonoBasicBlock *bb;
12982 * Make seq points at backward branch targets interruptable.
12984 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12985 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12986 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12989 /* Add a sequence point for method entry/exit events */
12990 if (seq_points && cfg->gen_sdb_seq_points) {
12991 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12992 MONO_ADD_INS (init_localsbb, ins);
12993 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12994 MONO_ADD_INS (cfg->bb_exit, ins);
12998 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12999 * the code they refer to was dead (#11880).
13001 if (sym_seq_points) {
13002 for (i = 0; i < header->code_size; ++i) {
13003 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13006 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13007 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13014 if (cfg->method == method) {
13015 MonoBasicBlock *bb;
13016 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13017 bb->region = mono_find_block_region (cfg, bb->real_offset);
13019 mono_create_spvar_for_region (cfg, bb->region);
13020 if (cfg->verbose_level > 2)
13021 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13025 if (inline_costs < 0) {
13028 /* Method is too large */
13029 mname = mono_method_full_name (method, TRUE);
13030 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13031 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13035 if ((cfg->verbose_level > 2) && (cfg->method == method))
13036 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13041 g_assert (!mono_error_ok (&cfg->error));
13045 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13049 set_exception_type_from_invalid_il (cfg, method, ip);
13053 g_slist_free (class_inits);
13054 mono_basic_block_free (original_bb);
13055 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13056 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13057 if (cfg->exception_type)
13060 return inline_costs;
13064 store_membase_reg_to_store_membase_imm (int opcode)
13067 case OP_STORE_MEMBASE_REG:
13068 return OP_STORE_MEMBASE_IMM;
13069 case OP_STOREI1_MEMBASE_REG:
13070 return OP_STOREI1_MEMBASE_IMM;
13071 case OP_STOREI2_MEMBASE_REG:
13072 return OP_STOREI2_MEMBASE_IMM;
13073 case OP_STOREI4_MEMBASE_REG:
13074 return OP_STOREI4_MEMBASE_IMM;
13075 case OP_STOREI8_MEMBASE_REG:
13076 return OP_STOREI8_MEMBASE_IMM;
13078 g_assert_not_reached ();
13085 mono_op_to_op_imm (int opcode)
13089 return OP_IADD_IMM;
13091 return OP_ISUB_IMM;
13093 return OP_IDIV_IMM;
13095 return OP_IDIV_UN_IMM;
13097 return OP_IREM_IMM;
13099 return OP_IREM_UN_IMM;
13101 return OP_IMUL_IMM;
13103 return OP_IAND_IMM;
13107 return OP_IXOR_IMM;
13109 return OP_ISHL_IMM;
13111 return OP_ISHR_IMM;
13113 return OP_ISHR_UN_IMM;
13116 return OP_LADD_IMM;
13118 return OP_LSUB_IMM;
13120 return OP_LAND_IMM;
13124 return OP_LXOR_IMM;
13126 return OP_LSHL_IMM;
13128 return OP_LSHR_IMM;
13130 return OP_LSHR_UN_IMM;
13131 #if SIZEOF_REGISTER == 8
13133 return OP_LREM_IMM;
13137 return OP_COMPARE_IMM;
13139 return OP_ICOMPARE_IMM;
13141 return OP_LCOMPARE_IMM;
13143 case OP_STORE_MEMBASE_REG:
13144 return OP_STORE_MEMBASE_IMM;
13145 case OP_STOREI1_MEMBASE_REG:
13146 return OP_STOREI1_MEMBASE_IMM;
13147 case OP_STOREI2_MEMBASE_REG:
13148 return OP_STOREI2_MEMBASE_IMM;
13149 case OP_STOREI4_MEMBASE_REG:
13150 return OP_STOREI4_MEMBASE_IMM;
13152 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13154 return OP_X86_PUSH_IMM;
13155 case OP_X86_COMPARE_MEMBASE_REG:
13156 return OP_X86_COMPARE_MEMBASE_IMM;
13158 #if defined(TARGET_AMD64)
13159 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13160 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13162 case OP_VOIDCALL_REG:
13163 return OP_VOIDCALL;
13171 return OP_LOCALLOC_IMM;
13178 ldind_to_load_membase (int opcode)
13182 return OP_LOADI1_MEMBASE;
13184 return OP_LOADU1_MEMBASE;
13186 return OP_LOADI2_MEMBASE;
13188 return OP_LOADU2_MEMBASE;
13190 return OP_LOADI4_MEMBASE;
13192 return OP_LOADU4_MEMBASE;
13194 return OP_LOAD_MEMBASE;
13195 case CEE_LDIND_REF:
13196 return OP_LOAD_MEMBASE;
13198 return OP_LOADI8_MEMBASE;
13200 return OP_LOADR4_MEMBASE;
13202 return OP_LOADR8_MEMBASE;
13204 g_assert_not_reached ();
13211 stind_to_store_membase (int opcode)
13215 return OP_STOREI1_MEMBASE_REG;
13217 return OP_STOREI2_MEMBASE_REG;
13219 return OP_STOREI4_MEMBASE_REG;
13221 case CEE_STIND_REF:
13222 return OP_STORE_MEMBASE_REG;
13224 return OP_STOREI8_MEMBASE_REG;
13226 return OP_STORER4_MEMBASE_REG;
13228 return OP_STORER8_MEMBASE_REG;
13230 g_assert_not_reached ();
13237 mono_load_membase_to_load_mem (int opcode)
13239 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13240 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13242 case OP_LOAD_MEMBASE:
13243 return OP_LOAD_MEM;
13244 case OP_LOADU1_MEMBASE:
13245 return OP_LOADU1_MEM;
13246 case OP_LOADU2_MEMBASE:
13247 return OP_LOADU2_MEM;
13248 case OP_LOADI4_MEMBASE:
13249 return OP_LOADI4_MEM;
13250 case OP_LOADU4_MEMBASE:
13251 return OP_LOADU4_MEM;
13252 #if SIZEOF_REGISTER == 8
13253 case OP_LOADI8_MEMBASE:
13254 return OP_LOADI8_MEM;
13263 op_to_op_dest_membase (int store_opcode, int opcode)
13265 #if defined(TARGET_X86)
13266 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13271 return OP_X86_ADD_MEMBASE_REG;
13273 return OP_X86_SUB_MEMBASE_REG;
13275 return OP_X86_AND_MEMBASE_REG;
13277 return OP_X86_OR_MEMBASE_REG;
13279 return OP_X86_XOR_MEMBASE_REG;
13282 return OP_X86_ADD_MEMBASE_IMM;
13285 return OP_X86_SUB_MEMBASE_IMM;
13288 return OP_X86_AND_MEMBASE_IMM;
13291 return OP_X86_OR_MEMBASE_IMM;
13294 return OP_X86_XOR_MEMBASE_IMM;
13300 #if defined(TARGET_AMD64)
13301 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13306 return OP_X86_ADD_MEMBASE_REG;
13308 return OP_X86_SUB_MEMBASE_REG;
13310 return OP_X86_AND_MEMBASE_REG;
13312 return OP_X86_OR_MEMBASE_REG;
13314 return OP_X86_XOR_MEMBASE_REG;
13316 return OP_X86_ADD_MEMBASE_IMM;
13318 return OP_X86_SUB_MEMBASE_IMM;
13320 return OP_X86_AND_MEMBASE_IMM;
13322 return OP_X86_OR_MEMBASE_IMM;
13324 return OP_X86_XOR_MEMBASE_IMM;
13326 return OP_AMD64_ADD_MEMBASE_REG;
13328 return OP_AMD64_SUB_MEMBASE_REG;
13330 return OP_AMD64_AND_MEMBASE_REG;
13332 return OP_AMD64_OR_MEMBASE_REG;
13334 return OP_AMD64_XOR_MEMBASE_REG;
13337 return OP_AMD64_ADD_MEMBASE_IMM;
13340 return OP_AMD64_SUB_MEMBASE_IMM;
13343 return OP_AMD64_AND_MEMBASE_IMM;
13346 return OP_AMD64_OR_MEMBASE_IMM;
13349 return OP_AMD64_XOR_MEMBASE_IMM;
13359 op_to_op_store_membase (int store_opcode, int opcode)
13361 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13364 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13365 return OP_X86_SETEQ_MEMBASE;
13367 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13368 return OP_X86_SETNE_MEMBASE;
13376 op_to_op_src1_membase (int load_opcode, int opcode)
13379 /* FIXME: This has sign extension issues */
13381 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13382 return OP_X86_COMPARE_MEMBASE8_IMM;
13385 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13390 return OP_X86_PUSH_MEMBASE;
13391 case OP_COMPARE_IMM:
13392 case OP_ICOMPARE_IMM:
13393 return OP_X86_COMPARE_MEMBASE_IMM;
13396 return OP_X86_COMPARE_MEMBASE_REG;
13400 #ifdef TARGET_AMD64
13401 /* FIXME: This has sign extension issues */
13403 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13404 return OP_X86_COMPARE_MEMBASE8_IMM;
13409 #ifdef __mono_ilp32__
13410 if (load_opcode == OP_LOADI8_MEMBASE)
13412 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13414 return OP_X86_PUSH_MEMBASE;
13416 /* FIXME: This only works for 32 bit immediates
13417 case OP_COMPARE_IMM:
13418 case OP_LCOMPARE_IMM:
13419 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13420 return OP_AMD64_COMPARE_MEMBASE_IMM;
13422 case OP_ICOMPARE_IMM:
13423 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13424 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13428 #ifdef __mono_ilp32__
13429 if (load_opcode == OP_LOAD_MEMBASE)
13430 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13431 if (load_opcode == OP_LOADI8_MEMBASE)
13433 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13435 return OP_AMD64_COMPARE_MEMBASE_REG;
13438 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13439 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13448 op_to_op_src2_membase (int load_opcode, int opcode)
13451 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13457 return OP_X86_COMPARE_REG_MEMBASE;
13459 return OP_X86_ADD_REG_MEMBASE;
13461 return OP_X86_SUB_REG_MEMBASE;
13463 return OP_X86_AND_REG_MEMBASE;
13465 return OP_X86_OR_REG_MEMBASE;
13467 return OP_X86_XOR_REG_MEMBASE;
13471 #ifdef TARGET_AMD64
13472 #ifdef __mono_ilp32__
13473 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13475 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13479 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13481 return OP_X86_ADD_REG_MEMBASE;
13483 return OP_X86_SUB_REG_MEMBASE;
13485 return OP_X86_AND_REG_MEMBASE;
13487 return OP_X86_OR_REG_MEMBASE;
13489 return OP_X86_XOR_REG_MEMBASE;
13491 #ifdef __mono_ilp32__
13492 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13494 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13499 return OP_AMD64_COMPARE_REG_MEMBASE;
13501 return OP_AMD64_ADD_REG_MEMBASE;
13503 return OP_AMD64_SUB_REG_MEMBASE;
13505 return OP_AMD64_AND_REG_MEMBASE;
13507 return OP_AMD64_OR_REG_MEMBASE;
13509 return OP_AMD64_XOR_REG_MEMBASE;
13518 mono_op_to_op_imm_noemul (int opcode)
13521 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13527 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13534 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13539 return mono_op_to_op_imm (opcode);
13544 * mono_handle_global_vregs:
13546 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13550 mono_handle_global_vregs (MonoCompile *cfg)
13552 gint32 *vreg_to_bb;
13553 MonoBasicBlock *bb;
13556 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13558 #ifdef MONO_ARCH_SIMD_INTRINSICS
13559 if (cfg->uses_simd_intrinsics)
13560 mono_simd_simplify_indirection (cfg);
13563 /* Find local vregs used in more than one bb */
13564 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13565 MonoInst *ins = bb->code;
13566 int block_num = bb->block_num;
13568 if (cfg->verbose_level > 2)
13569 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13572 for (; ins; ins = ins->next) {
13573 const char *spec = INS_INFO (ins->opcode);
13574 int regtype = 0, regindex;
13577 if (G_UNLIKELY (cfg->verbose_level > 2))
13578 mono_print_ins (ins);
13580 g_assert (ins->opcode >= MONO_CEE_LAST);
13582 for (regindex = 0; regindex < 4; regindex ++) {
13585 if (regindex == 0) {
13586 regtype = spec [MONO_INST_DEST];
13587 if (regtype == ' ')
13590 } else if (regindex == 1) {
13591 regtype = spec [MONO_INST_SRC1];
13592 if (regtype == ' ')
13595 } else if (regindex == 2) {
13596 regtype = spec [MONO_INST_SRC2];
13597 if (regtype == ' ')
13600 } else if (regindex == 3) {
13601 regtype = spec [MONO_INST_SRC3];
13602 if (regtype == ' ')
13607 #if SIZEOF_REGISTER == 4
13608 /* In the LLVM case, the long opcodes are not decomposed */
13609 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13611 * Since some instructions reference the original long vreg,
13612 * and some reference the two component vregs, it is quite hard
13613 * to determine when it needs to be global. So be conservative.
13615 if (!get_vreg_to_inst (cfg, vreg)) {
13616 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13618 if (cfg->verbose_level > 2)
13619 printf ("LONG VREG R%d made global.\n", vreg);
13623 * Make the component vregs volatile since the optimizations can
13624 * get confused otherwise.
13626 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13627 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13631 g_assert (vreg != -1);
13633 prev_bb = vreg_to_bb [vreg];
13634 if (prev_bb == 0) {
13635 /* 0 is a valid block num */
13636 vreg_to_bb [vreg] = block_num + 1;
13637 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13638 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13641 if (!get_vreg_to_inst (cfg, vreg)) {
13642 if (G_UNLIKELY (cfg->verbose_level > 2))
13643 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13647 if (vreg_is_ref (cfg, vreg))
13648 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13650 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13653 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13656 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13659 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13662 g_assert_not_reached ();
13666 /* Flag as having been used in more than one bb */
13667 vreg_to_bb [vreg] = -1;
13673 /* If a variable is used in only one bblock, convert it into a local vreg */
13674 for (i = 0; i < cfg->num_varinfo; i++) {
13675 MonoInst *var = cfg->varinfo [i];
13676 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13678 switch (var->type) {
13684 #if SIZEOF_REGISTER == 8
13687 #if !defined(TARGET_X86)
13688 /* Enabling this screws up the fp stack on x86 */
13691 if (mono_arch_is_soft_float ())
13694 /* Arguments are implicitly global */
13695 /* Putting R4 vars into registers doesn't work currently */
13696 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13697 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13699 * Make that the variable's liveness interval doesn't contain a call, since
13700 * that would cause the lvreg to be spilled, making the whole optimization
13703 /* This is too slow for JIT compilation */
13705 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13707 int def_index, call_index, ins_index;
13708 gboolean spilled = FALSE;
13713 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13714 const char *spec = INS_INFO (ins->opcode);
13716 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13717 def_index = ins_index;
13719 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13720 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13721 if (call_index > def_index) {
13727 if (MONO_IS_CALL (ins))
13728 call_index = ins_index;
13738 if (G_UNLIKELY (cfg->verbose_level > 2))
13739 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13740 var->flags |= MONO_INST_IS_DEAD;
13741 cfg->vreg_to_inst [var->dreg] = NULL;
13748 * Compress the varinfo and vars tables so the liveness computation is faster and
13749 * takes up less space.
13752 for (i = 0; i < cfg->num_varinfo; ++i) {
13753 MonoInst *var = cfg->varinfo [i];
13754 if (pos < i && cfg->locals_start == i)
13755 cfg->locals_start = pos;
13756 if (!(var->flags & MONO_INST_IS_DEAD)) {
13758 cfg->varinfo [pos] = cfg->varinfo [i];
13759 cfg->varinfo [pos]->inst_c0 = pos;
13760 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13761 cfg->vars [pos].idx = pos;
13762 #if SIZEOF_REGISTER == 4
13763 if (cfg->varinfo [pos]->type == STACK_I8) {
13764 /* Modify the two component vars too */
13767 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13768 var1->inst_c0 = pos;
13769 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13770 var1->inst_c0 = pos;
13777 cfg->num_varinfo = pos;
13778 if (cfg->locals_start > cfg->num_varinfo)
13779 cfg->locals_start = cfg->num_varinfo;
13783 * mono_spill_global_vars:
13785 * Generate spill code for variables which are not allocated to registers,
13786 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13787 * code is generated which could be optimized by the local optimization passes.
13790 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13792 MonoBasicBlock *bb;
13794 int orig_next_vreg;
13795 guint32 *vreg_to_lvreg;
13797 guint32 i, lvregs_len;
13798 gboolean dest_has_lvreg = FALSE;
13799 guint32 stacktypes [128];
13800 MonoInst **live_range_start, **live_range_end;
13801 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13802 int *gsharedvt_vreg_to_idx = NULL;
13804 *need_local_opts = FALSE;
13806 memset (spec2, 0, sizeof (spec2));
13808 /* FIXME: Move this function to mini.c */
13809 stacktypes ['i'] = STACK_PTR;
13810 stacktypes ['l'] = STACK_I8;
13811 stacktypes ['f'] = STACK_R8;
13812 #ifdef MONO_ARCH_SIMD_INTRINSICS
13813 stacktypes ['x'] = STACK_VTYPE;
13816 #if SIZEOF_REGISTER == 4
13817 /* Create MonoInsts for longs */
13818 for (i = 0; i < cfg->num_varinfo; i++) {
13819 MonoInst *ins = cfg->varinfo [i];
13821 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13822 switch (ins->type) {
13827 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13830 g_assert (ins->opcode == OP_REGOFFSET);
13832 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13834 tree->opcode = OP_REGOFFSET;
13835 tree->inst_basereg = ins->inst_basereg;
13836 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13838 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13840 tree->opcode = OP_REGOFFSET;
13841 tree->inst_basereg = ins->inst_basereg;
13842 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13852 if (cfg->compute_gc_maps) {
13853 /* registers need liveness info even for !non refs */
13854 for (i = 0; i < cfg->num_varinfo; i++) {
13855 MonoInst *ins = cfg->varinfo [i];
13857 if (ins->opcode == OP_REGVAR)
13858 ins->flags |= MONO_INST_GC_TRACK;
13862 if (cfg->gsharedvt) {
13863 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13865 for (i = 0; i < cfg->num_varinfo; ++i) {
13866 MonoInst *ins = cfg->varinfo [i];
13869 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13870 if (i >= cfg->locals_start) {
13872 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13873 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13874 ins->opcode = OP_GSHAREDVT_LOCAL;
13875 ins->inst_imm = idx;
13878 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13879 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13885 /* FIXME: widening and truncation */
13888 * As an optimization, when a variable allocated to the stack is first loaded into
13889 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13890 * the variable again.
13892 orig_next_vreg = cfg->next_vreg;
13893 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13894 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13898 * These arrays contain the first and last instructions accessing a given
13900 * Since we emit bblocks in the same order we process them here, and we
13901 * don't split live ranges, these will precisely describe the live range of
13902 * the variable, i.e. the instruction range where a valid value can be found
13903 * in the variables location.
13904 * The live range is computed using the liveness info computed by the liveness pass.
13905 * We can't use vmv->range, since that is an abstract live range, and we need
13906 * one which is instruction precise.
13907 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13909 /* FIXME: Only do this if debugging info is requested */
13910 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13911 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13912 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13913 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13915 /* Add spill loads/stores */
13916 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13919 if (cfg->verbose_level > 2)
13920 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13922 /* Clear vreg_to_lvreg array */
13923 for (i = 0; i < lvregs_len; i++)
13924 vreg_to_lvreg [lvregs [i]] = 0;
13928 MONO_BB_FOR_EACH_INS (bb, ins) {
13929 const char *spec = INS_INFO (ins->opcode);
13930 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13931 gboolean store, no_lvreg;
13932 int sregs [MONO_MAX_SRC_REGS];
13934 if (G_UNLIKELY (cfg->verbose_level > 2))
13935 mono_print_ins (ins);
13937 if (ins->opcode == OP_NOP)
13941 * We handle LDADDR here as well, since it can only be decomposed
13942 * when variable addresses are known.
13944 if (ins->opcode == OP_LDADDR) {
13945 MonoInst *var = ins->inst_p0;
13947 if (var->opcode == OP_VTARG_ADDR) {
13948 /* Happens on SPARC/S390 where vtypes are passed by reference */
13949 MonoInst *vtaddr = var->inst_left;
13950 if (vtaddr->opcode == OP_REGVAR) {
13951 ins->opcode = OP_MOVE;
13952 ins->sreg1 = vtaddr->dreg;
13954 else if (var->inst_left->opcode == OP_REGOFFSET) {
13955 ins->opcode = OP_LOAD_MEMBASE;
13956 ins->inst_basereg = vtaddr->inst_basereg;
13957 ins->inst_offset = vtaddr->inst_offset;
13960 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13961 /* gsharedvt arg passed by ref */
13962 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13964 ins->opcode = OP_LOAD_MEMBASE;
13965 ins->inst_basereg = var->inst_basereg;
13966 ins->inst_offset = var->inst_offset;
13967 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13968 MonoInst *load, *load2, *load3;
13969 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13970 int reg1, reg2, reg3;
13971 MonoInst *info_var = cfg->gsharedvt_info_var;
13972 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13976 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13979 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13981 g_assert (info_var);
13982 g_assert (locals_var);
13984 /* Mark the instruction used to compute the locals var as used */
13985 cfg->gsharedvt_locals_var_ins = NULL;
13987 /* Load the offset */
13988 if (info_var->opcode == OP_REGOFFSET) {
13989 reg1 = alloc_ireg (cfg);
13990 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13991 } else if (info_var->opcode == OP_REGVAR) {
13993 reg1 = info_var->dreg;
13995 g_assert_not_reached ();
13997 reg2 = alloc_ireg (cfg);
13998 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13999 /* Load the locals area address */
14000 reg3 = alloc_ireg (cfg);
14001 if (locals_var->opcode == OP_REGOFFSET) {
14002 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14003 } else if (locals_var->opcode == OP_REGVAR) {
14004 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14006 g_assert_not_reached ();
14008 /* Compute the address */
14009 ins->opcode = OP_PADD;
14013 mono_bblock_insert_before_ins (bb, ins, load3);
14014 mono_bblock_insert_before_ins (bb, load3, load2);
14016 mono_bblock_insert_before_ins (bb, load2, load);
14018 g_assert (var->opcode == OP_REGOFFSET);
14020 ins->opcode = OP_ADD_IMM;
14021 ins->sreg1 = var->inst_basereg;
14022 ins->inst_imm = var->inst_offset;
14025 *need_local_opts = TRUE;
14026 spec = INS_INFO (ins->opcode);
14029 if (ins->opcode < MONO_CEE_LAST) {
14030 mono_print_ins (ins);
14031 g_assert_not_reached ();
14035 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14039 if (MONO_IS_STORE_MEMBASE (ins)) {
14040 tmp_reg = ins->dreg;
14041 ins->dreg = ins->sreg2;
14042 ins->sreg2 = tmp_reg;
14045 spec2 [MONO_INST_DEST] = ' ';
14046 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14047 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14048 spec2 [MONO_INST_SRC3] = ' ';
14050 } else if (MONO_IS_STORE_MEMINDEX (ins))
14051 g_assert_not_reached ();
14056 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14057 printf ("\t %.3s %d", spec, ins->dreg);
14058 num_sregs = mono_inst_get_src_registers (ins, sregs);
14059 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14060 printf (" %d", sregs [srcindex]);
14067 regtype = spec [MONO_INST_DEST];
14068 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14071 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14072 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14073 MonoInst *store_ins;
14075 MonoInst *def_ins = ins;
14076 int dreg = ins->dreg; /* The original vreg */
14078 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14080 if (var->opcode == OP_REGVAR) {
14081 ins->dreg = var->dreg;
14082 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14084 * Instead of emitting a load+store, use a _membase opcode.
14086 g_assert (var->opcode == OP_REGOFFSET);
14087 if (ins->opcode == OP_MOVE) {
14091 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14092 ins->inst_basereg = var->inst_basereg;
14093 ins->inst_offset = var->inst_offset;
14096 spec = INS_INFO (ins->opcode);
14100 g_assert (var->opcode == OP_REGOFFSET);
14102 prev_dreg = ins->dreg;
14104 /* Invalidate any previous lvreg for this vreg */
14105 vreg_to_lvreg [ins->dreg] = 0;
14109 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14111 store_opcode = OP_STOREI8_MEMBASE_REG;
14114 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14116 #if SIZEOF_REGISTER != 8
14117 if (regtype == 'l') {
14118 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14119 mono_bblock_insert_after_ins (bb, ins, store_ins);
14120 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14121 mono_bblock_insert_after_ins (bb, ins, store_ins);
14122 def_ins = store_ins;
14127 g_assert (store_opcode != OP_STOREV_MEMBASE);
14129 /* Try to fuse the store into the instruction itself */
14130 /* FIXME: Add more instructions */
14131 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14132 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14133 ins->inst_imm = ins->inst_c0;
14134 ins->inst_destbasereg = var->inst_basereg;
14135 ins->inst_offset = var->inst_offset;
14136 spec = INS_INFO (ins->opcode);
14137 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14138 ins->opcode = store_opcode;
14139 ins->inst_destbasereg = var->inst_basereg;
14140 ins->inst_offset = var->inst_offset;
14144 tmp_reg = ins->dreg;
14145 ins->dreg = ins->sreg2;
14146 ins->sreg2 = tmp_reg;
14149 spec2 [MONO_INST_DEST] = ' ';
14150 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14151 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14152 spec2 [MONO_INST_SRC3] = ' ';
14154 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14155 // FIXME: The backends expect the base reg to be in inst_basereg
14156 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14158 ins->inst_basereg = var->inst_basereg;
14159 ins->inst_offset = var->inst_offset;
14160 spec = INS_INFO (ins->opcode);
14162 /* printf ("INS: "); mono_print_ins (ins); */
14163 /* Create a store instruction */
14164 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14166 /* Insert it after the instruction */
14167 mono_bblock_insert_after_ins (bb, ins, store_ins);
14169 def_ins = store_ins;
14172 * We can't assign ins->dreg to var->dreg here, since the
14173 * sregs could use it. So set a flag, and do it after
14176 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14177 dest_has_lvreg = TRUE;
14182 if (def_ins && !live_range_start [dreg]) {
14183 live_range_start [dreg] = def_ins;
14184 live_range_start_bb [dreg] = bb;
14187 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14190 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14191 tmp->inst_c1 = dreg;
14192 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14199 num_sregs = mono_inst_get_src_registers (ins, sregs);
14200 for (srcindex = 0; srcindex < 3; ++srcindex) {
14201 regtype = spec [MONO_INST_SRC1 + srcindex];
14202 sreg = sregs [srcindex];
14204 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14205 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14206 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14207 MonoInst *use_ins = ins;
14208 MonoInst *load_ins;
14209 guint32 load_opcode;
14211 if (var->opcode == OP_REGVAR) {
14212 sregs [srcindex] = var->dreg;
14213 //mono_inst_set_src_registers (ins, sregs);
14214 live_range_end [sreg] = use_ins;
14215 live_range_end_bb [sreg] = bb;
14217 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14220 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14221 /* var->dreg is a hreg */
14222 tmp->inst_c1 = sreg;
14223 mono_bblock_insert_after_ins (bb, ins, tmp);
14229 g_assert (var->opcode == OP_REGOFFSET);
14231 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14233 g_assert (load_opcode != OP_LOADV_MEMBASE);
14235 if (vreg_to_lvreg [sreg]) {
14236 g_assert (vreg_to_lvreg [sreg] != -1);
14238 /* The variable is already loaded to an lvreg */
14239 if (G_UNLIKELY (cfg->verbose_level > 2))
14240 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14241 sregs [srcindex] = vreg_to_lvreg [sreg];
14242 //mono_inst_set_src_registers (ins, sregs);
14246 /* Try to fuse the load into the instruction */
14247 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14248 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14249 sregs [0] = var->inst_basereg;
14250 //mono_inst_set_src_registers (ins, sregs);
14251 ins->inst_offset = var->inst_offset;
14252 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14253 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14254 sregs [1] = var->inst_basereg;
14255 //mono_inst_set_src_registers (ins, sregs);
14256 ins->inst_offset = var->inst_offset;
14258 if (MONO_IS_REAL_MOVE (ins)) {
14259 ins->opcode = OP_NOP;
14262 //printf ("%d ", srcindex); mono_print_ins (ins);
14264 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14266 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14267 if (var->dreg == prev_dreg) {
14269 * sreg refers to the value loaded by the load
14270 * emitted below, but we need to use ins->dreg
14271 * since it refers to the store emitted earlier.
14275 g_assert (sreg != -1);
14276 vreg_to_lvreg [var->dreg] = sreg;
14277 g_assert (lvregs_len < 1024);
14278 lvregs [lvregs_len ++] = var->dreg;
14282 sregs [srcindex] = sreg;
14283 //mono_inst_set_src_registers (ins, sregs);
14285 #if SIZEOF_REGISTER != 8
14286 if (regtype == 'l') {
14287 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14288 mono_bblock_insert_before_ins (bb, ins, load_ins);
14289 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14290 mono_bblock_insert_before_ins (bb, ins, load_ins);
14291 use_ins = load_ins;
14296 #if SIZEOF_REGISTER == 4
14297 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14299 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14300 mono_bblock_insert_before_ins (bb, ins, load_ins);
14301 use_ins = load_ins;
14305 if (var->dreg < orig_next_vreg) {
14306 live_range_end [var->dreg] = use_ins;
14307 live_range_end_bb [var->dreg] = bb;
14310 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14313 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14314 tmp->inst_c1 = var->dreg;
14315 mono_bblock_insert_after_ins (bb, ins, tmp);
14319 mono_inst_set_src_registers (ins, sregs);
14321 if (dest_has_lvreg) {
14322 g_assert (ins->dreg != -1);
14323 vreg_to_lvreg [prev_dreg] = ins->dreg;
14324 g_assert (lvregs_len < 1024);
14325 lvregs [lvregs_len ++] = prev_dreg;
14326 dest_has_lvreg = FALSE;
14330 tmp_reg = ins->dreg;
14331 ins->dreg = ins->sreg2;
14332 ins->sreg2 = tmp_reg;
14335 if (MONO_IS_CALL (ins)) {
14336 /* Clear vreg_to_lvreg array */
14337 for (i = 0; i < lvregs_len; i++)
14338 vreg_to_lvreg [lvregs [i]] = 0;
14340 } else if (ins->opcode == OP_NOP) {
14342 MONO_INST_NULLIFY_SREGS (ins);
14345 if (cfg->verbose_level > 2)
14346 mono_print_ins_index (1, ins);
14349 /* Extend the live range based on the liveness info */
14350 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14351 for (i = 0; i < cfg->num_varinfo; i ++) {
14352 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14354 if (vreg_is_volatile (cfg, vi->vreg))
14355 /* The liveness info is incomplete */
14358 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14359 /* Live from at least the first ins of this bb */
14360 live_range_start [vi->vreg] = bb->code;
14361 live_range_start_bb [vi->vreg] = bb;
14364 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14365 /* Live at least until the last ins of this bb */
14366 live_range_end [vi->vreg] = bb->last_ins;
14367 live_range_end_bb [vi->vreg] = bb;
14373 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14375 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14376 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14378 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14379 for (i = 0; i < cfg->num_varinfo; ++i) {
14380 int vreg = MONO_VARINFO (cfg, i)->vreg;
14383 if (live_range_start [vreg]) {
14384 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14386 ins->inst_c1 = vreg;
14387 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14389 if (live_range_end [vreg]) {
14390 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14392 ins->inst_c1 = vreg;
14393 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14394 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14396 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14402 if (cfg->gsharedvt_locals_var_ins) {
14403 /* Nullify if unused */
14404 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14405 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14408 g_free (live_range_start);
14409 g_free (live_range_end);
14410 g_free (live_range_start_bb);
14411 g_free (live_range_end_bb);
14416 * - use 'iadd' instead of 'int_add'
14417 * - handling ovf opcodes: decompose in method_to_ir.
14418 * - unify iregs/fregs
14419 * -> partly done, the missing parts are:
14420 * - a more complete unification would involve unifying the hregs as well, so
14421 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14422 * would no longer map to the machine hregs, so the code generators would need to
14423 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14424 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14425 * fp/non-fp branches speeds it up by about 15%.
14426 * - use sext/zext opcodes instead of shifts
14428 * - get rid of TEMPLOADs if possible and use vregs instead
14429 * - clean up usage of OP_P/OP_ opcodes
14430 * - cleanup usage of DUMMY_USE
14431 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14433 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14434 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14435 * - make sure handle_stack_args () is called before the branch is emitted
14436 * - when the new IR is done, get rid of all unused stuff
14437 * - COMPARE/BEQ as separate instructions or unify them ?
14438 * - keeping them separate allows specialized compare instructions like
14439 * compare_imm, compare_membase
14440 * - most back ends unify fp compare+branch, fp compare+ceq
14441 * - integrate mono_save_args into inline_method
14442 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14443 * - handle long shift opts on 32 bit platforms somehow: they require
14444 * 3 sregs (2 for arg1 and 1 for arg2)
14445 * - make byref a 'normal' type.
14446 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14447 * variable if needed.
14448 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14449 * like inline_method.
14450 * - remove inlining restrictions
14451 * - fix LNEG and enable cfold of INEG
14452 * - generalize x86 optimizations like ldelema as a peephole optimization
14453 * - add store_mem_imm for amd64
14454 * - optimize the loading of the interruption flag in the managed->native wrappers
14455 * - avoid special handling of OP_NOP in passes
14456 * - move code inserting instructions into one function/macro.
14457 * - try a coalescing phase after liveness analysis
14458 * - add float -> vreg conversion + local optimizations on !x86
14459 * - figure out how to handle decomposed branches during optimizations, ie.
14460 * compare+branch, op_jump_table+op_br etc.
14461 * - promote RuntimeXHandles to vregs
14462 * - vtype cleanups:
14463 * - add a NEW_VARLOADA_VREG macro
14464 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14465 * accessing vtype fields.
14466 * - get rid of I8CONST on 64 bit platforms
14467 * - dealing with the increase in code size due to branches created during opcode
14469 * - use extended basic blocks
14470 * - all parts of the JIT
14471 * - handle_global_vregs () && local regalloc
14472 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14473 * - sources of increase in code size:
14476 * - isinst and castclass
14477 * - lvregs not allocated to global registers even if used multiple times
14478 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14480 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14481 * - add all micro optimizations from the old JIT
14482 * - put tree optimizations into the deadce pass
14483 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14484 * specific function.
14485 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14486 * fcompare + branchCC.
14487 * - create a helper function for allocating a stack slot, taking into account
14488 * MONO_CFG_HAS_SPILLUP.
14490 * - merge the ia64 switch changes.
14491 * - optimize mono_regstate2_alloc_int/float.
14492 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14493 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14494 * parts of the tree could be separated by other instructions, killing the tree
14495 * arguments, or stores killing loads etc. Also, should we fold loads into other
14496 * instructions if the result of the load is used multiple times ?
14497 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14498 * - LAST MERGE: 108395.
14499 * - when returning vtypes in registers, generate IR and append it to the end of the
14500 * last bb instead of doing it in the epilog.
14501 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14509 - When to decompose opcodes:
14510 - earlier: this makes some optimizations hard to implement, since the low level IR
14511 no longer contains the neccessary information. But it is easier to do.
14512 - later: harder to implement, enables more optimizations.
14513 - Branches inside bblocks:
14514 - created when decomposing complex opcodes.
14515 - branches to another bblock: harmless, but not tracked by the branch
14516 optimizations, so need to branch to a label at the start of the bblock.
14517 - branches to inside the same bblock: very problematic, trips up the local
14518 reg allocator. Can be fixed by spitting the current bblock, but that is a
14519 complex operation, since some local vregs can become global vregs etc.
14520 - Local/global vregs:
14521 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14522 local register allocator.
14523 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14524 structure, created by mono_create_var (). Assigned to hregs or the stack by
14525 the global register allocator.
14526 - When to do optimizations like alu->alu_imm:
14527 - earlier -> saves work later on since the IR will be smaller/simpler
14528 - later -> can work on more instructions
14529 - Handling of valuetypes:
14530 - When a vtype is pushed on the stack, a new temporary is created, an
14531 instruction computing its address (LDADDR) is emitted and pushed on
14532 the stack. Need to optimize cases when the vtype is used immediately as in
14533 argument passing, stloc etc.
14534 - Instead of the to_end stuff in the old JIT, simply call the function handling
14535 the values on the stack before emitting the last instruction of the bb.
14538 #endif /* DISABLE_JIT */