2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
72 #include "aot-compiler.h"
73 #include "mini-llvm.h"
75 #define BRANCH_COST 10
76 #define INLINE_LENGTH_LIMIT 20
78 /* These have 'cfg' as an implicit argument */
79 #define INLINE_FAILURE(msg) do { \
80 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
81 inline_failure (cfg, msg); \
82 goto exception_exit; \
85 #define CHECK_CFG_EXCEPTION do {\
86 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
87 goto exception_exit; \
89 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
90 method_access_failure ((cfg), (method), (cmethod)); \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
147 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
148 guchar *ip, guint real_offset, gboolean inline_always);
150 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp, MonoInst *imt_arg);
152 /* helper methods signatures */
153 static MonoMethodSignature *helper_sig_domain_get;
154 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 static MonoMethodSignature *helper_sig_llvmonly_imt_thunk;
158 * Instruction metadata
166 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
167 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
173 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
178 /* keep in sync with the enum in mini.h */
181 #include "mini-ops.h"
186 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
187 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
189 * This should contain the index of the last sreg + 1. This is not the same
190 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
192 const gint8 ins_sreg_counts[] = {
193 #include "mini-ops.h"
198 #define MONO_INIT_VARINFO(vi,id) do { \
199 (vi)->range.first_use.pos.bid = 0xffff; \
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 type = &type->data.generic_class->container_class->byval_arg;
327 g_assert (cfg->gshared);
328 if (mini_type_var_is_vt (type))
331 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
333 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
339 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 printf ("\n%s %d: [IN: ", msg, bb->block_num);
345 for (i = 0; i < bb->in_count; ++i)
346 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
348 for (i = 0; i < bb->out_count; ++i)
349 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
351 for (tree = bb->code; tree; tree = tree->next)
352 mono_print_ins_index (-1, tree);
356 mono_create_helper_signatures (void)
358 helper_sig_domain_get = mono_create_icall_signature ("ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_llvmonly_imt_thunk = mono_create_icall_signature ("ptr ptr ptr");
363 static MONO_NEVER_INLINE void
364 break_on_unverified (void)
366 if (mini_get_debug_options ()->break_on_unverified)
370 static MONO_NEVER_INLINE void
371 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
373 char *method_fname = mono_method_full_name (method, TRUE);
374 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
376 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
377 g_free (method_fname);
378 g_free (cil_method_fname);
381 static MONO_NEVER_INLINE void
382 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
384 char *method_fname = mono_method_full_name (method, TRUE);
385 char *field_fname = mono_field_full_name (field);
386 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
387 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
388 g_free (method_fname);
389 g_free (field_fname);
392 static MONO_NEVER_INLINE void
393 inline_failure (MonoCompile *cfg, const char *msg)
395 if (cfg->verbose_level >= 2)
396 printf ("inline failed: %s\n", msg);
397 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
400 static MONO_NEVER_INLINE void
401 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
403 if (cfg->verbose_level > 2) \
404 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
408 static MONO_NEVER_INLINE void
409 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
412 if (cfg->verbose_level >= 2)
413 printf ("%s\n", cfg->exception_message);
414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
418 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
419 * foo<T> (int i) { ldarg.0; box T; }
421 #define UNVERIFIED do { \
422 if (cfg->gsharedvt) { \
423 if (cfg->verbose_level > 2) \
424 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
426 goto exception_exit; \
428 break_on_unverified (); \
432 #define GET_BBLOCK(cfg,tblock,ip) do { \
433 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
435 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
436 NEW_BBLOCK (cfg, (tblock)); \
437 (tblock)->cil_code = (ip); \
438 ADD_BBLOCK (cfg, (tblock)); \
442 #if defined(TARGET_X86) || defined(TARGET_AMD64)
443 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
444 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
445 (dest)->dreg = alloc_ireg_mp ((cfg)); \
446 (dest)->sreg1 = (sr1); \
447 (dest)->sreg2 = (sr2); \
448 (dest)->inst_imm = (imm); \
449 (dest)->backend.shift_amount = (shift); \
450 MONO_ADD_INS ((cfg)->cbb, (dest)); \
454 /* Emit conversions so both operands of a binary opcode are of the same type */
456 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
458 MonoInst *arg1 = *arg1_ref;
459 MonoInst *arg2 = *arg2_ref;
462 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
463 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
466 /* Mixing r4/r8 is allowed by the spec */
467 if (arg1->type == STACK_R4) {
468 int dreg = alloc_freg (cfg);
470 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
471 conv->type = STACK_R8;
475 if (arg2->type == STACK_R4) {
476 int dreg = alloc_freg (cfg);
478 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
479 conv->type = STACK_R8;
485 #if SIZEOF_REGISTER == 8
486 /* FIXME: Need to add many more cases */
487 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
490 int dr = alloc_preg (cfg);
491 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
492 (ins)->sreg2 = widen->dreg;
497 #define ADD_BINOP(op) do { \
498 MONO_INST_NEW (cfg, ins, (op)); \
500 ins->sreg1 = sp [0]->dreg; \
501 ins->sreg2 = sp [1]->dreg; \
502 type_from_op (cfg, ins, sp [0], sp [1]); \
504 /* Have to insert a widening op */ \
505 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
506 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
507 MONO_ADD_INS ((cfg)->cbb, (ins)); \
508 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
511 #define ADD_UNOP(op) do { \
512 MONO_INST_NEW (cfg, ins, (op)); \
514 ins->sreg1 = sp [0]->dreg; \
515 type_from_op (cfg, ins, sp [0], NULL); \
517 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
518 MONO_ADD_INS ((cfg)->cbb, (ins)); \
519 *sp++ = mono_decompose_opcode (cfg, ins); \
522 #define ADD_BINCOND(next_block) do { \
525 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
526 cmp->sreg1 = sp [0]->dreg; \
527 cmp->sreg2 = sp [1]->dreg; \
528 type_from_op (cfg, cmp, sp [0], sp [1]); \
530 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
531 type_from_op (cfg, ins, sp [0], sp [1]); \
532 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
533 GET_BBLOCK (cfg, tblock, target); \
534 link_bblock (cfg, cfg->cbb, tblock); \
535 ins->inst_true_bb = tblock; \
536 if ((next_block)) { \
537 link_bblock (cfg, cfg->cbb, (next_block)); \
538 ins->inst_false_bb = (next_block); \
539 start_new_bblock = 1; \
541 GET_BBLOCK (cfg, tblock, ip); \
542 link_bblock (cfg, cfg->cbb, tblock); \
543 ins->inst_false_bb = tblock; \
544 start_new_bblock = 2; \
546 if (sp != stack_start) { \
547 handle_stack_args (cfg, stack_start, sp - stack_start); \
548 CHECK_UNVERIFIABLE (cfg); \
550 MONO_ADD_INS (cfg->cbb, cmp); \
551 MONO_ADD_INS (cfg->cbb, ins); \
555 * link_bblock: Links two basic blocks
557 * links two basic blocks in the control flow graph, the 'from'
558 * argument is the starting block and the 'to' argument is the block
559 * the control flow ends to after 'from'.
562 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
564 MonoBasicBlock **newa;
568 if (from->cil_code) {
570 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
572 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
575 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
577 printf ("edge from entry to exit\n");
582 for (i = 0; i < from->out_count; ++i) {
583 if (to == from->out_bb [i]) {
589 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
590 for (i = 0; i < from->out_count; ++i) {
591 newa [i] = from->out_bb [i];
599 for (i = 0; i < to->in_count; ++i) {
600 if (from == to->in_bb [i]) {
606 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
607 for (i = 0; i < to->in_count; ++i) {
608 newa [i] = to->in_bb [i];
617 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
619 link_bblock (cfg, from, to);
623 * mono_find_block_region:
625 * We mark each basic block with a region ID. We use that to avoid BB
626 * optimizations when blocks are in different regions.
629 * A region token that encodes where this region is, and information
630 * about the clause owner for this block.
632 * The region encodes the try/catch/filter clause that owns this block
633 * as well as the type. -1 is a special value that represents a block
634 * that is in none of try/catch/filter.
637 mono_find_block_region (MonoCompile *cfg, int offset)
639 MonoMethodHeader *header = cfg->header;
640 MonoExceptionClause *clause;
643 for (i = 0; i < header->num_clauses; ++i) {
644 clause = &header->clauses [i];
645 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
646 (offset < (clause->handler_offset)))
647 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
649 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
650 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
651 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
652 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
653 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
655 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
658 for (i = 0; i < header->num_clauses; ++i) {
659 clause = &header->clauses [i];
661 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
662 return ((i + 1) << 8) | clause->flags;
669 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
671 MonoMethodHeader *header = cfg->header;
672 MonoExceptionClause *clause;
676 for (i = 0; i < header->num_clauses; ++i) {
677 clause = &header->clauses [i];
678 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
679 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
680 if (clause->flags == type)
681 res = g_list_append (res, clause);
688 mono_create_spvar_for_region (MonoCompile *cfg, int region)
692 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
696 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
697 /* prevent it from being register allocated */
698 var->flags |= MONO_INST_VOLATILE;
700 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
704 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
706 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
710 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
714 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
718 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
719 /* prevent it from being register allocated */
720 var->flags |= MONO_INST_VOLATILE;
722 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
728 * Returns the type used in the eval stack when @type is loaded.
729 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
732 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
736 type = mini_get_underlying_type (type);
737 inst->klass = klass = mono_class_from_mono_type (type);
739 inst->type = STACK_MP;
744 switch (type->type) {
746 inst->type = STACK_INV;
754 inst->type = STACK_I4;
759 case MONO_TYPE_FNPTR:
760 inst->type = STACK_PTR;
762 case MONO_TYPE_CLASS:
763 case MONO_TYPE_STRING:
764 case MONO_TYPE_OBJECT:
765 case MONO_TYPE_SZARRAY:
766 case MONO_TYPE_ARRAY:
767 inst->type = STACK_OBJ;
771 inst->type = STACK_I8;
774 inst->type = cfg->r4_stack_type;
777 inst->type = STACK_R8;
779 case MONO_TYPE_VALUETYPE:
780 if (type->data.klass->enumtype) {
781 type = mono_class_enum_basetype (type->data.klass);
785 inst->type = STACK_VTYPE;
788 case MONO_TYPE_TYPEDBYREF:
789 inst->klass = mono_defaults.typed_reference_class;
790 inst->type = STACK_VTYPE;
792 case MONO_TYPE_GENERICINST:
793 type = &type->data.generic_class->container_class->byval_arg;
797 g_assert (cfg->gshared);
798 if (mini_is_gsharedvt_type (type)) {
799 g_assert (cfg->gsharedvt);
800 inst->type = STACK_VTYPE;
802 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
806 g_error ("unknown type 0x%02x in eval stack type", type->type);
811 * The following tables are used to quickly validate the IL code in type_from_op ().
814 bin_num_table [STACK_MAX] [STACK_MAX] = {
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
820 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
828 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
831 /* reduce the size of this table */
833 bin_int_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
845 bin_comp_table [STACK_MAX] [STACK_MAX] = {
846 /* Inv i L p F & O vt r4 */
848 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
849 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
850 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
851 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
852 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
853 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
854 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
855 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
858 /* reduce the size of this table */
860 shift_table [STACK_MAX] [STACK_MAX] = {
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
872 * Tables to map from the non-specific opcode to the matching
873 * type-specific opcode.
875 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
877 binops_op_map [STACK_MAX] = {
878 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
881 /* handles from CEE_NEG to CEE_CONV_U8 */
883 unops_op_map [STACK_MAX] = {
884 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
887 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
889 ovfops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
893 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
895 ovf2ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
899 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
901 ovf3ops_op_map [STACK_MAX] = {
902 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
905 /* handles from CEE_BEQ to CEE_BLT_UN */
907 beqops_op_map [STACK_MAX] = {
908 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
911 /* handles from CEE_CEQ to CEE_CLT_UN */
913 ceqops_op_map [STACK_MAX] = {
914 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
918 * Sets ins->type (the type on the eval stack) according to the
919 * type of the opcode and the arguments to it.
920 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
922 * FIXME: this function sets ins->type unconditionally in some cases, but
923 * it should set it to invalid for some types (a conv.x on an object)
926 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
928 switch (ins->opcode) {
935 /* FIXME: check unverifiable args for STACK_MP */
936 ins->type = bin_num_table [src1->type] [src2->type];
937 ins->opcode += binops_op_map [ins->type];
944 ins->type = bin_int_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = shift_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
956 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
957 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
958 ins->opcode = OP_LCOMPARE;
959 else if (src1->type == STACK_R4)
960 ins->opcode = OP_RCOMPARE;
961 else if (src1->type == STACK_R8)
962 ins->opcode = OP_FCOMPARE;
964 ins->opcode = OP_ICOMPARE;
966 case OP_ICOMPARE_IMM:
967 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
968 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
969 ins->opcode = OP_LCOMPARE_IMM;
981 ins->opcode += beqops_op_map [src1->type];
984 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
985 ins->opcode += ceqops_op_map [src1->type];
991 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
992 ins->opcode += ceqops_op_map [src1->type];
996 ins->type = neg_table [src1->type];
997 ins->opcode += unops_op_map [ins->type];
1000 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1001 ins->type = src1->type;
1003 ins->type = STACK_INV;
1004 ins->opcode += unops_op_map [ins->type];
1010 ins->type = STACK_I4;
1011 ins->opcode += unops_op_map [src1->type];
1014 ins->type = STACK_R8;
1015 switch (src1->type) {
1018 ins->opcode = OP_ICONV_TO_R_UN;
1021 ins->opcode = OP_LCONV_TO_R_UN;
1025 case CEE_CONV_OVF_I1:
1026 case CEE_CONV_OVF_U1:
1027 case CEE_CONV_OVF_I2:
1028 case CEE_CONV_OVF_U2:
1029 case CEE_CONV_OVF_I4:
1030 case CEE_CONV_OVF_U4:
1031 ins->type = STACK_I4;
1032 ins->opcode += ovf3ops_op_map [src1->type];
1034 case CEE_CONV_OVF_I_UN:
1035 case CEE_CONV_OVF_U_UN:
1036 ins->type = STACK_PTR;
1037 ins->opcode += ovf2ops_op_map [src1->type];
1039 case CEE_CONV_OVF_I1_UN:
1040 case CEE_CONV_OVF_I2_UN:
1041 case CEE_CONV_OVF_I4_UN:
1042 case CEE_CONV_OVF_U1_UN:
1043 case CEE_CONV_OVF_U2_UN:
1044 case CEE_CONV_OVF_U4_UN:
1045 ins->type = STACK_I4;
1046 ins->opcode += ovf2ops_op_map [src1->type];
1049 ins->type = STACK_PTR;
1050 switch (src1->type) {
1052 ins->opcode = OP_ICONV_TO_U;
1056 #if SIZEOF_VOID_P == 8
1057 ins->opcode = OP_LCONV_TO_U;
1059 ins->opcode = OP_MOVE;
1063 ins->opcode = OP_LCONV_TO_U;
1066 ins->opcode = OP_FCONV_TO_U;
1072 ins->type = STACK_I8;
1073 ins->opcode += unops_op_map [src1->type];
1075 case CEE_CONV_OVF_I8:
1076 case CEE_CONV_OVF_U8:
1077 ins->type = STACK_I8;
1078 ins->opcode += ovf3ops_op_map [src1->type];
1080 case CEE_CONV_OVF_U8_UN:
1081 case CEE_CONV_OVF_I8_UN:
1082 ins->type = STACK_I8;
1083 ins->opcode += ovf2ops_op_map [src1->type];
1086 ins->type = cfg->r4_stack_type;
1087 ins->opcode += unops_op_map [src1->type];
1090 ins->type = STACK_R8;
1091 ins->opcode += unops_op_map [src1->type];
1094 ins->type = STACK_R8;
1098 ins->type = STACK_I4;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_CONV_OVF_I:
1103 case CEE_CONV_OVF_U:
1104 ins->type = STACK_PTR;
1105 ins->opcode += ovfops_op_map [src1->type];
1108 case CEE_ADD_OVF_UN:
1110 case CEE_MUL_OVF_UN:
1112 case CEE_SUB_OVF_UN:
1113 ins->type = bin_num_table [src1->type] [src2->type];
1114 ins->opcode += ovfops_op_map [src1->type];
1115 if (ins->type == STACK_R8)
1116 ins->type = STACK_INV;
1118 case OP_LOAD_MEMBASE:
1119 ins->type = STACK_PTR;
1121 case OP_LOADI1_MEMBASE:
1122 case OP_LOADU1_MEMBASE:
1123 case OP_LOADI2_MEMBASE:
1124 case OP_LOADU2_MEMBASE:
1125 case OP_LOADI4_MEMBASE:
1126 case OP_LOADU4_MEMBASE:
1127 ins->type = STACK_PTR;
1129 case OP_LOADI8_MEMBASE:
1130 ins->type = STACK_I8;
1132 case OP_LOADR4_MEMBASE:
1133 ins->type = cfg->r4_stack_type;
1135 case OP_LOADR8_MEMBASE:
1136 ins->type = STACK_R8;
1139 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1143 if (ins->type == STACK_MP)
1144 ins->klass = mono_defaults.object_class;
1149 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1155 param_table [STACK_MAX] [STACK_MAX] = {
1160 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1165 switch (args->type) {
1175 for (i = 0; i < sig->param_count; ++i) {
1176 switch (args [i].type) {
1180 if (!sig->params [i]->byref)
1184 if (sig->params [i]->byref)
1186 switch (sig->params [i]->type) {
1187 case MONO_TYPE_CLASS:
1188 case MONO_TYPE_STRING:
1189 case MONO_TYPE_OBJECT:
1190 case MONO_TYPE_SZARRAY:
1191 case MONO_TYPE_ARRAY:
1198 if (sig->params [i]->byref)
1200 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1209 /*if (!param_table [args [i].type] [sig->params [i]->type])
1217 * When we need a pointer to the current domain many times in a method, we
1218 * call mono_domain_get() once and we store the result in a local variable.
1219 * This function returns the variable that represents the MonoDomain*.
1221 inline static MonoInst *
1222 mono_get_domainvar (MonoCompile *cfg)
1224 if (!cfg->domainvar)
1225 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1226 return cfg->domainvar;
1230 * The got_var contains the address of the Global Offset Table when AOT
1234 mono_get_got_var (MonoCompile *cfg)
1236 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1238 if (!cfg->got_var) {
1239 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1241 return cfg->got_var;
1245 mono_get_vtable_var (MonoCompile *cfg)
1247 g_assert (cfg->gshared);
1249 if (!cfg->rgctx_var) {
1250 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1251 /* force the var to be stack allocated */
1252 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1255 return cfg->rgctx_var;
1259 type_from_stack_type (MonoInst *ins) {
1260 switch (ins->type) {
1261 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1262 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1263 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1264 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1265 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1267 return &ins->klass->this_arg;
1268 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1269 case STACK_VTYPE: return &ins->klass->byval_arg;
1271 g_error ("stack type %d to monotype not handled\n", ins->type);
1276 static G_GNUC_UNUSED int
1277 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1279 t = mono_type_get_underlying_type (t);
1291 case MONO_TYPE_FNPTR:
1293 case MONO_TYPE_CLASS:
1294 case MONO_TYPE_STRING:
1295 case MONO_TYPE_OBJECT:
1296 case MONO_TYPE_SZARRAY:
1297 case MONO_TYPE_ARRAY:
1303 return cfg->r4_stack_type;
1306 case MONO_TYPE_VALUETYPE:
1307 case MONO_TYPE_TYPEDBYREF:
1309 case MONO_TYPE_GENERICINST:
1310 if (mono_type_generic_inst_is_valuetype (t))
1316 g_assert_not_reached ();
1323 array_access_to_klass (int opcode)
1327 return mono_defaults.byte_class;
1329 return mono_defaults.uint16_class;
1332 return mono_defaults.int_class;
1335 return mono_defaults.sbyte_class;
1338 return mono_defaults.int16_class;
1341 return mono_defaults.int32_class;
1343 return mono_defaults.uint32_class;
1346 return mono_defaults.int64_class;
1349 return mono_defaults.single_class;
1352 return mono_defaults.double_class;
1353 case CEE_LDELEM_REF:
1354 case CEE_STELEM_REF:
1355 return mono_defaults.object_class;
1357 g_assert_not_reached ();
1363 * We try to share variables when possible
1366 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1371 /* inlining can result in deeper stacks */
1372 if (slot >= cfg->header->max_stack)
1373 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1375 pos = ins->type - 1 + slot * STACK_MAX;
1377 switch (ins->type) {
1384 if ((vnum = cfg->intvars [pos]))
1385 return cfg->varinfo [vnum];
1386 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1387 cfg->intvars [pos] = res->inst_c0;
1390 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1396 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1399 * Don't use this if a generic_context is set, since that means AOT can't
1400 * look up the method using just the image+token.
1401 * table == 0 means this is a reference made from a wrapper.
1403 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1404 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1405 jump_info_token->image = image;
1406 jump_info_token->token = token;
1407 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1412 * This function is called to handle items that are left on the evaluation stack
1413 * at basic block boundaries. What happens is that we save the values to local variables
1414 * and we reload them later when first entering the target basic block (with the
1415 * handle_loaded_temps () function).
1416 * A single joint point will use the same variables (stored in the array bb->out_stack or
1417 * bb->in_stack, if the basic block is before or after the joint point).
1419 * This function needs to be called _before_ emitting the last instruction of
1420 * the bb (i.e. before emitting a branch).
1421 * If the stack merge fails at a join point, cfg->unverifiable is set.
1424 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1427 MonoBasicBlock *bb = cfg->cbb;
1428 MonoBasicBlock *outb;
1429 MonoInst *inst, **locals;
1434 if (cfg->verbose_level > 3)
1435 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1436 if (!bb->out_scount) {
1437 bb->out_scount = count;
1438 //printf ("bblock %d has out:", bb->block_num);
1440 for (i = 0; i < bb->out_count; ++i) {
1441 outb = bb->out_bb [i];
1442 /* exception handlers are linked, but they should not be considered for stack args */
1443 if (outb->flags & BB_EXCEPTION_HANDLER)
1445 //printf (" %d", outb->block_num);
1446 if (outb->in_stack) {
1448 bb->out_stack = outb->in_stack;
1454 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1455 for (i = 0; i < count; ++i) {
1457 * try to reuse temps already allocated for this purpouse, if they occupy the same
1458 * stack slot and if they are of the same type.
1459 * This won't cause conflicts since if 'local' is used to
1460 * store one of the values in the in_stack of a bblock, then
1461 * the same variable will be used for the same outgoing stack
1463 * This doesn't work when inlining methods, since the bblocks
1464 * in the inlined methods do not inherit their in_stack from
1465 * the bblock they are inlined to. See bug #58863 for an
1468 if (cfg->inlined_method)
1469 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1471 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1476 for (i = 0; i < bb->out_count; ++i) {
1477 outb = bb->out_bb [i];
1478 /* exception handlers are linked, but they should not be considered for stack args */
1479 if (outb->flags & BB_EXCEPTION_HANDLER)
1481 if (outb->in_scount) {
1482 if (outb->in_scount != bb->out_scount) {
1483 cfg->unverifiable = TRUE;
1486 continue; /* check they are the same locals */
1488 outb->in_scount = count;
1489 outb->in_stack = bb->out_stack;
1492 locals = bb->out_stack;
1494 for (i = 0; i < count; ++i) {
1495 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1496 inst->cil_code = sp [i]->cil_code;
1497 sp [i] = locals [i];
1498 if (cfg->verbose_level > 3)
1499 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1503 * It is possible that the out bblocks already have in_stack assigned, and
1504 * the in_stacks differ. In this case, we will store to all the different
1511 /* Find a bblock which has a different in_stack */
1513 while (bindex < bb->out_count) {
1514 outb = bb->out_bb [bindex];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER) {
1520 if (outb->in_stack != locals) {
1521 for (i = 0; i < count; ++i) {
1522 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1523 inst->cil_code = sp [i]->cil_code;
1524 sp [i] = locals [i];
1525 if (cfg->verbose_level > 3)
1526 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1528 locals = outb->in_stack;
1538 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1542 if (cfg->compile_aot) {
1543 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1548 ji.type = patch_type;
1549 ji.data.target = data;
1550 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1552 EMIT_NEW_PCONST (cfg, ins, target);
1558 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1560 int ibitmap_reg = alloc_preg (cfg);
1561 #ifdef COMPRESSED_INTERFACE_BITMAP
1563 MonoInst *res, *ins;
1564 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1565 MONO_ADD_INS (cfg->cbb, ins);
1567 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1568 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1569 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1571 int ibitmap_byte_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1575 if (cfg->compile_aot) {
1576 int iid_reg = alloc_preg (cfg);
1577 int shifted_iid_reg = alloc_preg (cfg);
1578 int ibitmap_byte_address_reg = alloc_preg (cfg);
1579 int masked_iid_reg = alloc_preg (cfg);
1580 int iid_one_bit_reg = alloc_preg (cfg);
1581 int iid_bit_reg = alloc_preg (cfg);
1582 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1584 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1585 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1587 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1588 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1589 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1591 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1598 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1599 * stored in "klass_reg" implements the interface "klass".
1602 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1604 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1608 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1609 * stored in "vtable_reg" implements the interface "klass".
1612 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1614 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1618 * Emit code which checks whenever the interface id of @klass is smaller than
1619 * than the value given by max_iid_reg.
1622 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1623 MonoBasicBlock *false_target)
1625 if (cfg->compile_aot) {
1626 int iid_reg = alloc_preg (cfg);
1627 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1628 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1635 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1638 /* Same as above, but obtains max_iid from a vtable */
1640 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1641 MonoBasicBlock *false_target)
1643 int max_iid_reg = alloc_preg (cfg);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1646 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1649 /* Same as above, but obtains max_iid from a klass */
1651 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1652 MonoBasicBlock *false_target)
1654 int max_iid_reg = alloc_preg (cfg);
1656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1657 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1661 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1663 int idepth_reg = alloc_preg (cfg);
1664 int stypes_reg = alloc_preg (cfg);
1665 int stype = alloc_preg (cfg);
1667 mono_class_setup_supertypes (klass);
1669 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1670 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1674 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1677 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1678 } else if (cfg->compile_aot) {
1679 int const_reg = alloc_preg (cfg);
1680 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1681 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1689 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1691 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1695 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1697 int intf_reg = alloc_preg (cfg);
1699 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1700 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1705 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1709 * Variant of the above that takes a register to the class, not the vtable.
1712 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1714 int intf_bit_reg = alloc_preg (cfg);
1716 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1717 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1720 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1722 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1726 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1729 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1731 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1732 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1734 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1738 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1740 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1744 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1746 if (cfg->compile_aot) {
1747 int const_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1749 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1757 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1760 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1763 int rank_reg = alloc_preg (cfg);
1764 int eclass_reg = alloc_preg (cfg);
1766 g_assert (!klass_inst);
1767 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1769 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1770 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1772 if (klass->cast_class == mono_defaults.object_class) {
1773 int parent_reg = alloc_preg (cfg);
1774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1775 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1776 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1777 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1778 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1779 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1780 } else if (klass->cast_class == mono_defaults.enum_class) {
1781 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1782 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1783 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1785 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1786 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1789 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1790 /* Check that the object is a vector too */
1791 int bounds_reg = alloc_preg (cfg);
1792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1793 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1794 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1797 int idepth_reg = alloc_preg (cfg);
1798 int stypes_reg = alloc_preg (cfg);
1799 int stype = alloc_preg (cfg);
1801 mono_class_setup_supertypes (klass);
1803 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1806 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1810 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1815 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1817 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1821 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1825 g_assert (val == 0);
1830 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1833 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1841 #if SIZEOF_REGISTER == 8
1843 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1849 val_reg = alloc_preg (cfg);
1851 if (SIZEOF_REGISTER == 8)
1852 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1854 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1857 /* This could be optimized further if neccesary */
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1866 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1868 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1897 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1904 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1905 g_assert (size < 10000);
1908 /* This could be optimized further if neccesary */
1910 cur_reg = alloc_preg (cfg);
1911 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1912 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1919 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1921 cur_reg = alloc_preg (cfg);
1922 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1923 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1931 cur_reg = alloc_preg (cfg);
1932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1939 cur_reg = alloc_preg (cfg);
1940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1947 cur_reg = alloc_preg (cfg);
1948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1949 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1957 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1961 if (cfg->compile_aot) {
1962 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1963 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1965 ins->sreg2 = c->dreg;
1966 MONO_ADD_INS (cfg->cbb, ins);
1968 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1970 ins->inst_offset = mini_get_tls_offset (tls_key);
1971 MONO_ADD_INS (cfg->cbb, ins);
1978 * Emit IR to push the current LMF onto the LMF stack.
1981 emit_push_lmf (MonoCompile *cfg)
1984 * Emit IR to push the LMF:
1985 * lmf_addr = <lmf_addr from tls>
1986 * lmf->lmf_addr = lmf_addr
1987 * lmf->prev_lmf = *lmf_addr
1990 int lmf_reg, prev_lmf_reg;
1991 MonoInst *ins, *lmf_ins;
1996 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1997 /* Load current lmf */
1998 lmf_ins = mono_get_lmf_intrinsic (cfg);
2000 MONO_ADD_INS (cfg->cbb, lmf_ins);
2001 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2002 lmf_reg = ins->dreg;
2003 /* Save previous_lmf */
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2006 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2009 * Store lmf_addr in a variable, so it can be allocated to a global register.
2011 if (!cfg->lmf_addr_var)
2012 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2015 ins = mono_get_jit_tls_intrinsic (cfg);
2017 int jit_tls_dreg = ins->dreg;
2019 MONO_ADD_INS (cfg->cbb, ins);
2020 lmf_reg = alloc_preg (cfg);
2021 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2023 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2028 MONO_ADD_INS (cfg->cbb, lmf_ins);
2031 MonoInst *args [16], *jit_tls_ins, *ins;
2033 /* Inline mono_get_lmf_addr () */
2034 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2036 /* Load mono_jit_tls_id */
2037 if (cfg->compile_aot)
2038 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2040 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2041 /* call pthread_getspecific () */
2042 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2043 /* lmf_addr = &jit_tls->lmf */
2044 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2047 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2051 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2053 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2054 lmf_reg = ins->dreg;
2056 prev_lmf_reg = alloc_preg (cfg);
2057 /* Save previous_lmf */
2058 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2059 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2061 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2068 * Emit IR to pop the current LMF from the LMF stack.
2071 emit_pop_lmf (MonoCompile *cfg)
2073 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2079 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2080 lmf_reg = ins->dreg;
2082 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2083 /* Load previous_lmf */
2084 prev_lmf_reg = alloc_preg (cfg);
2085 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2087 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2090 * Emit IR to pop the LMF:
2091 * *(lmf->lmf_addr) = lmf->prev_lmf
2093 /* This could be called before emit_push_lmf () */
2094 if (!cfg->lmf_addr_var)
2095 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2096 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2098 prev_lmf_reg = alloc_preg (cfg);
2099 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2100 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2105 emit_instrumentation_call (MonoCompile *cfg, void *func)
2107 MonoInst *iargs [1];
2110 * Avoid instrumenting inlined methods since it can
2111 * distort profiling results.
2113 if (cfg->method != cfg->current_method)
2116 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2117 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2118 mono_emit_jit_icall (cfg, func, iargs);
2123 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2126 type = mini_get_underlying_type (type);
2127 switch (type->type) {
2128 case MONO_TYPE_VOID:
2129 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2136 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2140 case MONO_TYPE_FNPTR:
2141 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2142 case MONO_TYPE_CLASS:
2143 case MONO_TYPE_STRING:
2144 case MONO_TYPE_OBJECT:
2145 case MONO_TYPE_SZARRAY:
2146 case MONO_TYPE_ARRAY:
2147 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2150 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2153 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2155 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2157 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2158 case MONO_TYPE_VALUETYPE:
2159 if (type->data.klass->enumtype) {
2160 type = mono_class_enum_basetype (type->data.klass);
2163 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2164 case MONO_TYPE_TYPEDBYREF:
2165 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2166 case MONO_TYPE_GENERICINST:
2167 type = &type->data.generic_class->container_class->byval_arg;
2170 case MONO_TYPE_MVAR:
2172 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2174 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2180 * target_type_is_incompatible:
2181 * @cfg: MonoCompile context
2183 * Check that the item @arg on the evaluation stack can be stored
2184 * in the target type (can be a local, or field, etc).
2185 * The cfg arg can be used to check if we need verification or just
2188 * Returns: non-0 value if arg can't be stored on a target.
2191 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2193 MonoType *simple_type;
2196 if (target->byref) {
2197 /* FIXME: check that the pointed to types match */
2198 if (arg->type == STACK_MP) {
2199 MonoClass *base_class = mono_class_from_mono_type (target);
2200 /* This is needed to handle gshared types + ldaddr */
2201 simple_type = mini_get_underlying_type (&base_class->byval_arg);
2202 return target->type != MONO_TYPE_I && arg->klass != base_class && arg->klass != mono_class_from_mono_type (simple_type);
2204 if (arg->type == STACK_PTR)
2209 simple_type = mini_get_underlying_type (target);
2210 switch (simple_type->type) {
2211 case MONO_TYPE_VOID:
2219 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2223 /* STACK_MP is needed when setting pinned locals */
2224 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2229 case MONO_TYPE_FNPTR:
2231 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2232 * in native int. (#688008).
2234 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2237 case MONO_TYPE_CLASS:
2238 case MONO_TYPE_STRING:
2239 case MONO_TYPE_OBJECT:
2240 case MONO_TYPE_SZARRAY:
2241 case MONO_TYPE_ARRAY:
2242 if (arg->type != STACK_OBJ)
2244 /* FIXME: check type compatibility */
2248 if (arg->type != STACK_I8)
2252 if (arg->type != cfg->r4_stack_type)
2256 if (arg->type != STACK_R8)
2259 case MONO_TYPE_VALUETYPE:
2260 if (arg->type != STACK_VTYPE)
2262 klass = mono_class_from_mono_type (simple_type);
2263 if (klass != arg->klass)
2266 case MONO_TYPE_TYPEDBYREF:
2267 if (arg->type != STACK_VTYPE)
2269 klass = mono_class_from_mono_type (simple_type);
2270 if (klass != arg->klass)
2273 case MONO_TYPE_GENERICINST:
2274 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2275 MonoClass *target_class;
2276 if (arg->type != STACK_VTYPE)
2278 klass = mono_class_from_mono_type (simple_type);
2279 target_class = mono_class_from_mono_type (target);
2280 /* The second cases is needed when doing partial sharing */
2281 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2285 if (arg->type != STACK_OBJ)
2287 /* FIXME: check type compatibility */
2291 case MONO_TYPE_MVAR:
2292 g_assert (cfg->gshared);
2293 if (mini_type_var_is_vt (simple_type)) {
2294 if (arg->type != STACK_VTYPE)
2297 if (arg->type != STACK_OBJ)
2302 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2308 * Prepare arguments for passing to a function call.
2309 * Return a non-zero value if the arguments can't be passed to the given
2311 * The type checks are not yet complete and some conversions may need
2312 * casts on 32 or 64 bit architectures.
2314 * FIXME: implement this using target_type_is_incompatible ()
2317 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2319 MonoType *simple_type;
2323 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2327 for (i = 0; i < sig->param_count; ++i) {
2328 if (sig->params [i]->byref) {
2329 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2333 simple_type = mini_get_underlying_type (sig->params [i]);
2335 switch (simple_type->type) {
2336 case MONO_TYPE_VOID:
2345 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2351 case MONO_TYPE_FNPTR:
2352 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2355 case MONO_TYPE_CLASS:
2356 case MONO_TYPE_STRING:
2357 case MONO_TYPE_OBJECT:
2358 case MONO_TYPE_SZARRAY:
2359 case MONO_TYPE_ARRAY:
2360 if (args [i]->type != STACK_OBJ)
2365 if (args [i]->type != STACK_I8)
2369 if (args [i]->type != cfg->r4_stack_type)
2373 if (args [i]->type != STACK_R8)
2376 case MONO_TYPE_VALUETYPE:
2377 if (simple_type->data.klass->enumtype) {
2378 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2381 if (args [i]->type != STACK_VTYPE)
2384 case MONO_TYPE_TYPEDBYREF:
2385 if (args [i]->type != STACK_VTYPE)
2388 case MONO_TYPE_GENERICINST:
2389 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2392 case MONO_TYPE_MVAR:
2394 if (args [i]->type != STACK_VTYPE)
2398 g_error ("unknown type 0x%02x in check_call_signature",
2406 callvirt_to_call (int opcode)
2409 case OP_CALL_MEMBASE:
2411 case OP_VOIDCALL_MEMBASE:
2413 case OP_FCALL_MEMBASE:
2415 case OP_RCALL_MEMBASE:
2417 case OP_VCALL_MEMBASE:
2419 case OP_LCALL_MEMBASE:
2422 g_assert_not_reached ();
2429 callvirt_to_call_reg (int opcode)
2432 case OP_CALL_MEMBASE:
2434 case OP_VOIDCALL_MEMBASE:
2435 return OP_VOIDCALL_REG;
2436 case OP_FCALL_MEMBASE:
2437 return OP_FCALL_REG;
2438 case OP_RCALL_MEMBASE:
2439 return OP_RCALL_REG;
2440 case OP_VCALL_MEMBASE:
2441 return OP_VCALL_REG;
2442 case OP_LCALL_MEMBASE:
2443 return OP_LCALL_REG;
2445 g_assert_not_reached ();
2451 /* Either METHOD or IMT_ARG needs to be set */
2453 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2457 if (COMPILE_LLVM (cfg)) {
2459 method_reg = alloc_preg (cfg);
2460 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2462 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2463 method_reg = ins->dreg;
2467 call->imt_arg_reg = method_reg;
2469 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2474 method_reg = alloc_preg (cfg);
2475 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2477 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2478 method_reg = ins->dreg;
2481 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2484 static MonoJumpInfo *
2485 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2487 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2491 ji->data.target = target;
2497 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2500 return mono_class_check_context_used (klass);
2506 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2509 return mono_method_check_context_used (method);
2515 * check_method_sharing:
2517 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2520 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2522 gboolean pass_vtable = FALSE;
2523 gboolean pass_mrgctx = FALSE;
2525 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2526 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2527 gboolean sharable = FALSE;
2529 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2533 * Pass vtable iff target method might
2534 * be shared, which means that sharing
2535 * is enabled for its class and its
2536 * context is sharable (and it's not a
2539 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2543 if (mini_method_get_context (cmethod) &&
2544 mini_method_get_context (cmethod)->method_inst) {
2545 g_assert (!pass_vtable);
2547 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2550 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2555 if (out_pass_vtable)
2556 *out_pass_vtable = pass_vtable;
2557 if (out_pass_mrgctx)
2558 *out_pass_mrgctx = pass_mrgctx;
2561 inline static MonoCallInst *
2562 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2563 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2567 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2575 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2577 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2579 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2582 call->signature = sig;
2583 call->rgctx_reg = rgctx;
2584 sig_ret = mini_get_underlying_type (sig->ret);
2586 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2589 if (mini_type_is_vtype (sig_ret)) {
2590 call->vret_var = cfg->vret_addr;
2591 //g_assert_not_reached ();
2593 } else if (mini_type_is_vtype (sig_ret)) {
2594 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2597 temp->backend.is_pinvoke = sig->pinvoke;
2600 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2601 * address of return value to increase optimization opportunities.
2602 * Before vtype decomposition, the dreg of the call ins itself represents the
2603 * fact the call modifies the return value. After decomposition, the call will
2604 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2605 * will be transformed into an LDADDR.
2607 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2608 loada->dreg = alloc_preg (cfg);
2609 loada->inst_p0 = temp;
2610 /* We reference the call too since call->dreg could change during optimization */
2611 loada->inst_p1 = call;
2612 MONO_ADD_INS (cfg->cbb, loada);
2614 call->inst.dreg = temp->dreg;
2616 call->vret_var = loada;
2617 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2618 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2620 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2621 if (COMPILE_SOFT_FLOAT (cfg)) {
2623 * If the call has a float argument, we would need to do an r8->r4 conversion using
2624 * an icall, but that cannot be done during the call sequence since it would clobber
2625 * the call registers + the stack. So we do it before emitting the call.
2627 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2629 MonoInst *in = call->args [i];
2631 if (i >= sig->hasthis)
2632 t = sig->params [i - sig->hasthis];
2634 t = &mono_defaults.int_class->byval_arg;
2635 t = mono_type_get_underlying_type (t);
2637 if (!t->byref && t->type == MONO_TYPE_R4) {
2638 MonoInst *iargs [1];
2642 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2644 /* The result will be in an int vreg */
2645 call->args [i] = conv;
2651 call->need_unbox_trampoline = unbox_trampoline;
2654 if (COMPILE_LLVM (cfg))
2655 mono_llvm_emit_call (cfg, call);
2657 mono_arch_emit_call (cfg, call);
2659 mono_arch_emit_call (cfg, call);
2662 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2663 cfg->flags |= MONO_CFG_HAS_CALLS;
2669 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2671 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2672 cfg->uses_rgctx_reg = TRUE;
2673 call->rgctx_reg = TRUE;
2675 call->rgctx_arg_reg = rgctx_reg;
2679 inline static MonoInst*
2680 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2685 gboolean check_sp = FALSE;
2687 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2688 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2690 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2695 rgctx_reg = mono_alloc_preg (cfg);
2696 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2700 if (!cfg->stack_inbalance_var)
2701 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2703 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2704 ins->dreg = cfg->stack_inbalance_var->dreg;
2705 MONO_ADD_INS (cfg->cbb, ins);
2708 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2710 call->inst.sreg1 = addr->dreg;
2713 emit_imt_argument (cfg, call, NULL, imt_arg);
2715 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2720 sp_reg = mono_alloc_preg (cfg);
2722 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2724 MONO_ADD_INS (cfg->cbb, ins);
2726 /* Restore the stack so we don't crash when throwing the exception */
2727 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2728 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2729 MONO_ADD_INS (cfg->cbb, ins);
2731 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2732 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2736 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2738 return (MonoInst*)call;
2742 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2745 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2747 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2750 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2751 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2753 #ifndef DISABLE_REMOTING
2754 gboolean might_be_remote = FALSE;
2756 gboolean virtual_ = this_ins != NULL;
2757 gboolean enable_for_aot = TRUE;
2760 MonoInst *call_target = NULL;
2762 gboolean need_unbox_trampoline;
2765 sig = mono_method_signature (method);
2767 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2768 g_assert_not_reached ();
2771 rgctx_reg = mono_alloc_preg (cfg);
2772 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2775 if (method->string_ctor) {
2776 /* Create the real signature */
2777 /* FIXME: Cache these */
2778 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2779 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2784 context_used = mini_method_check_context_used (cfg, method);
2786 #ifndef DISABLE_REMOTING
2787 might_be_remote = this_ins && sig->hasthis &&
2788 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2789 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2791 if (might_be_remote && context_used) {
2794 g_assert (cfg->gshared);
2796 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2798 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2802 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2803 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args, NULL);
2805 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2807 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2809 #ifndef DISABLE_REMOTING
2810 if (might_be_remote)
2811 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2814 call->method = method;
2815 call->inst.flags |= MONO_INST_HAS_METHOD;
2816 call->inst.inst_left = this_ins;
2817 call->tail_call = tail;
2820 int vtable_reg, slot_reg, this_reg;
2823 this_reg = this_ins->dreg;
2825 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2826 MonoInst *dummy_use;
2828 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2830 /* Make a call to delegate->invoke_impl */
2831 call->inst.inst_basereg = this_reg;
2832 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2833 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2835 /* We must emit a dummy use here because the delegate trampoline will
2836 replace the 'this' argument with the delegate target making this activation
2837 no longer a root for the delegate.
2838 This is an issue for delegates that target collectible code such as dynamic
2839 methods of GC'able assemblies.
2841 For a test case look into #667921.
2843 FIXME: a dummy use is not the best way to do it as the local register allocator
2844 will put it on a caller save register and spil it around the call.
2845 Ideally, we would either put it on a callee save register or only do the store part.
2847 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2849 return (MonoInst*)call;
2852 if ((!cfg->compile_aot || enable_for_aot) &&
2853 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2854 (MONO_METHOD_IS_FINAL (method) &&
2855 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2856 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2858 * the method is not virtual, we just need to ensure this is not null
2859 * and then we can call the method directly.
2861 #ifndef DISABLE_REMOTING
2862 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2864 * The check above ensures method is not gshared, this is needed since
2865 * gshared methods can't have wrappers.
2867 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2871 if (!method->string_ctor)
2872 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2874 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2875 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2877 * the method is virtual, but we can statically dispatch since either
2878 * it's class or the method itself are sealed.
2879 * But first we need to ensure it's not a null reference.
2881 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2883 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2884 } else if (call_target) {
2885 vtable_reg = alloc_preg (cfg);
2886 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2888 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2889 call->inst.sreg1 = call_target->dreg;
2890 call->inst.flags &= !MONO_INST_HAS_METHOD;
2892 vtable_reg = alloc_preg (cfg);
2893 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2894 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2895 guint32 imt_slot = mono_method_get_imt_slot (method);
2896 emit_imt_argument (cfg, call, call->method, imt_arg);
2897 slot_reg = vtable_reg;
2898 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2900 slot_reg = vtable_reg;
2901 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2902 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2904 g_assert (mono_method_signature (method)->generic_param_count);
2905 emit_imt_argument (cfg, call, call->method, imt_arg);
2909 call->inst.sreg1 = slot_reg;
2910 call->inst.inst_offset = offset;
2911 call->is_virtual = TRUE;
2915 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2918 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2920 return (MonoInst*)call;
2924 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2926 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2930 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2937 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2940 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2942 return (MonoInst*)call;
2946 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2948 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2952 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2956 * mono_emit_abs_call:
2958 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2960 inline static MonoInst*
2961 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2962 MonoMethodSignature *sig, MonoInst **args)
2964 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2968 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2971 if (cfg->abs_patches == NULL)
2972 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2973 g_hash_table_insert (cfg->abs_patches, ji, ji);
2974 ins = mono_emit_native_call (cfg, ji, sig, args);
2975 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2979 static MonoMethodSignature*
2980 sig_to_rgctx_sig (MonoMethodSignature *sig)
2982 // FIXME: memory allocation
2983 MonoMethodSignature *res;
2986 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2987 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2988 res->param_count = sig->param_count + 1;
2989 for (i = 0; i < sig->param_count; ++i)
2990 res->params [i] = sig->params [i];
2991 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2995 /* Make an indirect call to FSIG passing an additional argument */
2997 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2999 MonoMethodSignature *csig;
3000 MonoInst *args_buf [16];
3002 int i, pindex, tmp_reg;
3004 /* Make a call with an rgctx/extra arg */
3005 if (fsig->param_count + 2 < 16)
3008 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3011 args [pindex ++] = orig_args [0];
3012 for (i = 0; i < fsig->param_count; ++i)
3013 args [pindex ++] = orig_args [fsig->hasthis + i];
3014 tmp_reg = alloc_preg (cfg);
3015 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3016 csig = sig_to_rgctx_sig (fsig);
3017 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3020 /* Emit an indirect call to the function descriptor ADDR */
3022 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3024 int addr_reg, arg_reg;
3025 MonoInst *call_target;
3027 g_assert (cfg->llvm_only);
3030 * addr points to a <addr, arg> pair, load both of them, and
3031 * make a call to addr, passing arg as an extra arg.
3033 addr_reg = alloc_preg (cfg);
3034 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3035 arg_reg = alloc_preg (cfg);
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3038 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3042 direct_icalls_enabled (MonoCompile *cfg)
3044 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3046 if (cfg->compile_llvm)
3049 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3055 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3058 * Call the jit icall without a wrapper if possible.
3059 * The wrapper is needed for the following reasons:
3060 * - to handle exceptions thrown using mono_raise_exceptions () from the
3061 * icall function. The EH code needs the lmf frame pushed by the
3062 * wrapper to be able to unwind back to managed code.
3063 * - to be able to do stack walks for asynchronously suspended
3064 * threads when debugging.
3066 if (info->no_raise && direct_icalls_enabled (cfg)) {
3070 if (!info->wrapper_method) {
3071 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3072 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3074 mono_memory_barrier ();
3078 * Inline the wrapper method, which is basically a call to the C icall, and
3079 * an exception check.
3081 costs = inline_method (cfg, info->wrapper_method, NULL,
3082 args, NULL, cfg->real_offset, TRUE);
3083 g_assert (costs > 0);
3084 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3088 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3093 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3095 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3096 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3100 * Native code might return non register sized integers
3101 * without initializing the upper bits.
3103 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3104 case OP_LOADI1_MEMBASE:
3105 widen_op = OP_ICONV_TO_I1;
3107 case OP_LOADU1_MEMBASE:
3108 widen_op = OP_ICONV_TO_U1;
3110 case OP_LOADI2_MEMBASE:
3111 widen_op = OP_ICONV_TO_I2;
3113 case OP_LOADU2_MEMBASE:
3114 widen_op = OP_ICONV_TO_U2;
3120 if (widen_op != -1) {
3121 int dreg = alloc_preg (cfg);
3124 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3125 widen->type = ins->type;
3135 get_memcpy_method (void)
3137 static MonoMethod *memcpy_method = NULL;
3138 if (!memcpy_method) {
3139 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3141 g_error ("Old corlib found. Install a new one");
3143 return memcpy_method;
3147 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3149 MonoClassField *field;
3150 gpointer iter = NULL;
3152 while ((field = mono_class_get_fields (klass, &iter))) {
3155 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3157 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3158 if (mini_type_is_reference (mono_field_get_type (field))) {
3159 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3160 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3162 MonoClass *field_class = mono_class_from_mono_type (field->type);
3163 if (field_class->has_references)
3164 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3170 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3172 int card_table_shift_bits;
3173 gpointer card_table_mask;
3175 MonoInst *dummy_use;
3176 int nursery_shift_bits;
3177 size_t nursery_size;
3179 if (!cfg->gen_write_barriers)
3182 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3184 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3186 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3189 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3190 wbarrier->sreg1 = ptr->dreg;
3191 wbarrier->sreg2 = value->dreg;
3192 MONO_ADD_INS (cfg->cbb, wbarrier);
3193 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3194 int offset_reg = alloc_preg (cfg);
3198 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3199 if (card_table_mask)
3200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3202 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3203 * IMM's larger than 32bits.
3205 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3206 card_reg = ins->dreg;
3208 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3209 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3211 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3212 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3215 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3219 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3221 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3222 unsigned need_wb = 0;
3227 /*types with references can't have alignment smaller than sizeof(void*) */
3228 if (align < SIZEOF_VOID_P)
3231 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3232 if (size > 32 * SIZEOF_VOID_P)
3235 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3237 /* We don't unroll more than 5 stores to avoid code bloat. */
3238 if (size > 5 * SIZEOF_VOID_P) {
3239 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3240 size += (SIZEOF_VOID_P - 1);
3241 size &= ~(SIZEOF_VOID_P - 1);
3243 EMIT_NEW_ICONST (cfg, iargs [2], size);
3244 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3245 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3249 destreg = iargs [0]->dreg;
3250 srcreg = iargs [1]->dreg;
3253 dest_ptr_reg = alloc_preg (cfg);
3254 tmp_reg = alloc_preg (cfg);
3257 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3259 while (size >= SIZEOF_VOID_P) {
3260 MonoInst *load_inst;
3261 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3262 load_inst->dreg = tmp_reg;
3263 load_inst->inst_basereg = srcreg;
3264 load_inst->inst_offset = offset;
3265 MONO_ADD_INS (cfg->cbb, load_inst);
3267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3270 emit_write_barrier (cfg, iargs [0], load_inst);
3272 offset += SIZEOF_VOID_P;
3273 size -= SIZEOF_VOID_P;
3276 /*tmp += sizeof (void*)*/
3277 if (size >= SIZEOF_VOID_P) {
3278 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3279 MONO_ADD_INS (cfg->cbb, iargs [0]);
3283 /* Those cannot be references since size < sizeof (void*) */
3285 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3286 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3293 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3299 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3300 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3309 * Emit code to copy a valuetype of type @klass whose address is stored in
3310 * @src->dreg to memory whose address is stored at @dest->dreg.
3313 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3315 MonoInst *iargs [4];
3318 MonoMethod *memcpy_method;
3319 MonoInst *size_ins = NULL;
3320 MonoInst *memcpy_ins = NULL;
3324 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3327 * This check breaks with spilled vars... need to handle it during verification anyway.
3328 * g_assert (klass && klass == src->klass && klass == dest->klass);
3331 if (mini_is_gsharedvt_klass (klass)) {
3333 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3334 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3338 n = mono_class_native_size (klass, &align);
3340 n = mono_class_value_size (klass, &align);
3342 /* if native is true there should be no references in the struct */
3343 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3344 /* Avoid barriers when storing to the stack */
3345 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3346 (dest->opcode == OP_LDADDR))) {
3352 context_used = mini_class_check_context_used (cfg, klass);
3354 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3355 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3357 } else if (context_used) {
3358 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3360 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3361 if (!cfg->compile_aot)
3362 mono_class_compute_gc_descriptor (klass);
3366 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3368 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3373 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3374 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3375 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3380 iargs [2] = size_ins;
3382 EMIT_NEW_ICONST (cfg, iargs [2], n);
3384 memcpy_method = get_memcpy_method ();
3386 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3388 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3393 get_memset_method (void)
3395 static MonoMethod *memset_method = NULL;
3396 if (!memset_method) {
3397 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3399 g_error ("Old corlib found. Install a new one");
3401 return memset_method;
3405 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3407 MonoInst *iargs [3];
3410 MonoMethod *memset_method;
3411 MonoInst *size_ins = NULL;
3412 MonoInst *bzero_ins = NULL;
3413 static MonoMethod *bzero_method;
3415 /* FIXME: Optimize this for the case when dest is an LDADDR */
3416 mono_class_init (klass);
3417 if (mini_is_gsharedvt_klass (klass)) {
3418 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3419 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3421 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3422 g_assert (bzero_method);
3424 iargs [1] = size_ins;
3425 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3429 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3431 n = mono_class_value_size (klass, &align);
3433 if (n <= sizeof (gpointer) * 8) {
3434 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3437 memset_method = get_memset_method ();
3439 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3440 EMIT_NEW_ICONST (cfg, iargs [2], n);
3441 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3448 * Emit IR to return either the this pointer for instance method,
3449 * or the mrgctx for static methods.
3452 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3454 MonoInst *this_ins = NULL;
3456 g_assert (cfg->gshared);
3458 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3459 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3460 !method->klass->valuetype)
3461 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3463 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3464 MonoInst *mrgctx_loc, *mrgctx_var;
3466 g_assert (!this_ins);
3467 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3469 mrgctx_loc = mono_get_vtable_var (cfg);
3470 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3473 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3474 MonoInst *vtable_loc, *vtable_var;
3476 g_assert (!this_ins);
3478 vtable_loc = mono_get_vtable_var (cfg);
3479 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3481 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3482 MonoInst *mrgctx_var = vtable_var;
3485 vtable_reg = alloc_preg (cfg);
3486 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3487 vtable_var->type = STACK_PTR;
3495 vtable_reg = alloc_preg (cfg);
3496 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3501 static MonoJumpInfoRgctxEntry *
3502 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3504 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3505 res->method = method;
3506 res->in_mrgctx = in_mrgctx;
3507 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3508 res->data->type = patch_type;
3509 res->data->data.target = patch_data;
3510 res->info_type = info_type;
3515 static inline MonoInst*
3516 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3518 MonoInst *args [16];
3521 // FIXME: No fastpath since the slot is not a compile time constant
3523 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3524 if (entry->in_mrgctx)
3525 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3527 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3531 * FIXME: This can be called during decompose, which is a problem since it creates
3533 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3535 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3537 MonoBasicBlock *is_null_bb, *end_bb;
3538 MonoInst *res, *ins, *call;
3541 slot = mini_get_rgctx_entry_slot (entry);
3543 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3544 index = MONO_RGCTX_SLOT_INDEX (slot);
3546 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3547 for (depth = 0; ; ++depth) {
3548 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3550 if (index < size - 1)
3555 NEW_BBLOCK (cfg, end_bb);
3556 NEW_BBLOCK (cfg, is_null_bb);
3559 rgctx_reg = rgctx->dreg;
3561 rgctx_reg = alloc_preg (cfg);
3563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3564 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3565 NEW_BBLOCK (cfg, is_null_bb);
3567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3571 for (i = 0; i < depth; ++i) {
3572 int array_reg = alloc_preg (cfg);
3574 /* load ptr to next array */
3575 if (mrgctx && i == 0)
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3579 rgctx_reg = array_reg;
3580 /* is the ptr null? */
3581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3582 /* if yes, jump to actual trampoline */
3583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3587 val_reg = alloc_preg (cfg);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3589 /* is the slot null? */
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3591 /* if yes, jump to actual trampoline */
3592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3595 res_reg = alloc_preg (cfg);
3596 MONO_INST_NEW (cfg, ins, OP_MOVE);
3597 ins->dreg = res_reg;
3598 ins->sreg1 = val_reg;
3599 MONO_ADD_INS (cfg->cbb, ins);
3601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3604 MONO_START_BB (cfg, is_null_bb);
3606 EMIT_NEW_ICONST (cfg, args [1], index);
3608 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3610 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3611 MONO_INST_NEW (cfg, ins, OP_MOVE);
3612 ins->dreg = res_reg;
3613 ins->sreg1 = call->dreg;
3614 MONO_ADD_INS (cfg->cbb, ins);
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3617 MONO_START_BB (cfg, end_bb);
3626 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3629 static inline MonoInst*
3630 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3633 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3635 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3639 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3640 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3642 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3643 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3645 return emit_rgctx_fetch (cfg, rgctx, entry);
3649 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3650 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3652 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3653 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3655 return emit_rgctx_fetch (cfg, rgctx, entry);
3659 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3660 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3662 MonoJumpInfoGSharedVtCall *call_info;
3663 MonoJumpInfoRgctxEntry *entry;
3666 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3667 call_info->sig = sig;
3668 call_info->method = cmethod;
3670 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3671 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3673 return emit_rgctx_fetch (cfg, rgctx, entry);
3677 * emit_get_rgctx_virt_method:
3679 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3682 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3683 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3685 MonoJumpInfoVirtMethod *info;
3686 MonoJumpInfoRgctxEntry *entry;
3689 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3690 info->klass = klass;
3691 info->method = virt_method;
3693 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3694 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3696 return emit_rgctx_fetch (cfg, rgctx, entry);
3700 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3701 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3703 MonoJumpInfoRgctxEntry *entry;
3706 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3707 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3709 return emit_rgctx_fetch (cfg, rgctx, entry);
3713 * emit_get_rgctx_method:
3715 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3716 * normal constants, else emit a load from the rgctx.
3719 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3720 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3722 if (!context_used) {
3725 switch (rgctx_type) {
3726 case MONO_RGCTX_INFO_METHOD:
3727 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3729 case MONO_RGCTX_INFO_METHOD_RGCTX:
3730 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3733 g_assert_not_reached ();
3736 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3737 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3739 return emit_rgctx_fetch (cfg, rgctx, entry);
3744 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3745 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3747 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3748 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3750 return emit_rgctx_fetch (cfg, rgctx, entry);
3754 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3756 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3757 MonoRuntimeGenericContextInfoTemplate *template_;
3762 for (i = 0; i < info->num_entries; ++i) {
3763 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3765 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3769 if (info->num_entries == info->count_entries) {
3770 MonoRuntimeGenericContextInfoTemplate *new_entries;
3771 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3773 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3775 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3776 info->entries = new_entries;
3777 info->count_entries = new_count_entries;
3780 idx = info->num_entries;
3781 template_ = &info->entries [idx];
3782 template_->info_type = rgctx_type;
3783 template_->data = data;
3785 info->num_entries ++;
3791 * emit_get_gsharedvt_info:
3793 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3796 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3801 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3802 /* Load info->entries [idx] */
3803 dreg = alloc_preg (cfg);
3804 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3810 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3812 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3816 * On return the caller must check @klass for load errors.
3819 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3821 MonoInst *vtable_arg;
3824 context_used = mini_class_check_context_used (cfg, klass);
3827 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3828 klass, MONO_RGCTX_INFO_VTABLE);
3830 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3834 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3837 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3841 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3842 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3844 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3845 ins->sreg1 = vtable_arg->dreg;
3846 MONO_ADD_INS (cfg->cbb, ins);
3848 static int byte_offset = -1;
3849 static guint8 bitmask;
3850 int bits_reg, inited_reg;
3851 MonoBasicBlock *inited_bb;
3852 MonoInst *args [16];
3854 if (byte_offset < 0)
3855 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3857 bits_reg = alloc_ireg (cfg);
3858 inited_reg = alloc_ireg (cfg);
3860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3863 NEW_BBLOCK (cfg, inited_bb);
3865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3868 args [0] = vtable_arg;
3869 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3871 MONO_START_BB (cfg, inited_bb);
3876 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3880 if (cfg->gen_seq_points && cfg->method == method) {
3881 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3883 ins->flags |= MONO_INST_NONEMPTY_STACK;
3884 MONO_ADD_INS (cfg->cbb, ins);
3889 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3891 if (mini_get_debug_options ()->better_cast_details) {
3892 int vtable_reg = alloc_preg (cfg);
3893 int klass_reg = alloc_preg (cfg);
3894 MonoBasicBlock *is_null_bb = NULL;
3896 int to_klass_reg, context_used;
3899 NEW_BBLOCK (cfg, is_null_bb);
3901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3902 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3905 tls_get = mono_get_jit_tls_intrinsic (cfg);
3907 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3911 MONO_ADD_INS (cfg->cbb, tls_get);
3912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3915 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3917 context_used = mini_class_check_context_used (cfg, klass);
3919 MonoInst *class_ins;
3921 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3922 to_klass_reg = class_ins->dreg;
3924 to_klass_reg = alloc_preg (cfg);
3925 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3930 MONO_START_BB (cfg, is_null_bb);
3935 reset_cast_details (MonoCompile *cfg)
3937 /* Reset the variables holding the cast details */
3938 if (mini_get_debug_options ()->better_cast_details) {
3939 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3941 MONO_ADD_INS (cfg->cbb, tls_get);
3942 /* It is enough to reset the from field */
3943 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3948 * On return the caller must check @array_class for load errors
3951 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3953 int vtable_reg = alloc_preg (cfg);
3956 context_used = mini_class_check_context_used (cfg, array_class);
3958 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3960 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3962 if (cfg->opt & MONO_OPT_SHARED) {
3963 int class_reg = alloc_preg (cfg);
3966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3967 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3968 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3969 } else if (context_used) {
3970 MonoInst *vtable_ins;
3972 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3973 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3975 if (cfg->compile_aot) {
3979 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3981 vt_reg = alloc_preg (cfg);
3982 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3986 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3988 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3992 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3994 reset_cast_details (cfg);
3998 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3999 * generic code is generated.
4002 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4004 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4007 MonoInst *rgctx, *addr;
4009 /* FIXME: What if the class is shared? We might not
4010 have to get the address of the method from the
4012 addr = emit_get_rgctx_method (cfg, context_used, method,
4013 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4014 if (cfg->llvm_only && cfg->gsharedvt) {
4015 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4017 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4019 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4022 gboolean pass_vtable, pass_mrgctx;
4023 MonoInst *rgctx_arg = NULL;
4025 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4026 g_assert (!pass_mrgctx);
4029 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4032 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4035 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4040 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4044 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4045 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4046 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4047 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4049 obj_reg = sp [0]->dreg;
4050 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4051 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4053 /* FIXME: generics */
4054 g_assert (klass->rank == 0);
4057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4058 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4061 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4064 MonoInst *element_class;
4066 /* This assertion is from the unboxcast insn */
4067 g_assert (klass->rank == 0);
4069 element_class = emit_get_rgctx_klass (cfg, context_used,
4070 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4072 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4073 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4075 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4076 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4077 reset_cast_details (cfg);
4080 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4081 MONO_ADD_INS (cfg->cbb, add);
4082 add->type = STACK_MP;
4089 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4091 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4092 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4096 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4102 args [1] = klass_inst;
4105 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4107 NEW_BBLOCK (cfg, is_ref_bb);
4108 NEW_BBLOCK (cfg, is_nullable_bb);
4109 NEW_BBLOCK (cfg, end_bb);
4110 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4111 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4112 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4117 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4118 addr_reg = alloc_dreg (cfg, STACK_MP);
4122 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4123 MONO_ADD_INS (cfg->cbb, addr);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4128 MONO_START_BB (cfg, is_ref_bb);
4130 /* Save the ref to a temporary */
4131 dreg = alloc_ireg (cfg);
4132 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4133 addr->dreg = addr_reg;
4134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4138 MONO_START_BB (cfg, is_nullable_bb);
4141 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4142 MonoInst *unbox_call;
4143 MonoMethodSignature *unbox_sig;
4145 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4146 unbox_sig->ret = &klass->byval_arg;
4147 unbox_sig->param_count = 1;
4148 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4151 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4153 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4155 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4156 addr->dreg = addr_reg;
4159 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4162 MONO_START_BB (cfg, end_bb);
4165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4171 * Returns NULL and set the cfg exception on error.
4174 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4176 MonoInst *iargs [2];
4181 MonoRgctxInfoType rgctx_info;
4182 MonoInst *iargs [2];
4183 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4185 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4187 if (cfg->opt & MONO_OPT_SHARED)
4188 rgctx_info = MONO_RGCTX_INFO_KLASS;
4190 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4191 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4193 if (cfg->opt & MONO_OPT_SHARED) {
4194 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4196 alloc_ftn = mono_object_new;
4199 alloc_ftn = mono_object_new_specific;
4202 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4203 if (known_instance_size) {
4204 int size = mono_class_instance_size (klass);
4205 if (size < sizeof (MonoObject))
4206 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4208 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4210 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4213 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4216 if (cfg->opt & MONO_OPT_SHARED) {
4217 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4218 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4220 alloc_ftn = mono_object_new;
4221 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4222 /* This happens often in argument checking code, eg. throw new FooException... */
4223 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4224 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4225 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4227 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4228 MonoMethod *managed_alloc = NULL;
4232 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4233 cfg->exception_ptr = klass;
4237 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4239 if (managed_alloc) {
4240 int size = mono_class_instance_size (klass);
4241 if (size < sizeof (MonoObject))
4242 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4244 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4245 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4246 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4248 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4250 guint32 lw = vtable->klass->instance_size;
4251 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4252 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4253 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4256 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4260 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4264 * Returns NULL and set the cfg exception on error.
4267 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4269 MonoInst *alloc, *ins;
4271 if (mono_class_is_nullable (klass)) {
4272 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4275 if (cfg->llvm_only && cfg->gsharedvt) {
4276 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4277 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4278 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4280 /* FIXME: What if the class is shared? We might not
4281 have to get the method address from the RGCTX. */
4282 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4283 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4284 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4286 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4289 gboolean pass_vtable, pass_mrgctx;
4290 MonoInst *rgctx_arg = NULL;
4292 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4293 g_assert (!pass_mrgctx);
4296 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4299 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4302 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4306 if (mini_is_gsharedvt_klass (klass)) {
4307 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4308 MonoInst *res, *is_ref, *src_var, *addr;
4311 dreg = alloc_ireg (cfg);
4313 NEW_BBLOCK (cfg, is_ref_bb);
4314 NEW_BBLOCK (cfg, is_nullable_bb);
4315 NEW_BBLOCK (cfg, end_bb);
4316 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4324 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4327 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4328 ins->opcode = OP_STOREV_MEMBASE;
4330 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4331 res->type = STACK_OBJ;
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4336 MONO_START_BB (cfg, is_ref_bb);
4338 /* val is a vtype, so has to load the value manually */
4339 src_var = get_vreg_to_inst (cfg, val->dreg);
4341 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4342 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4344 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4347 MONO_START_BB (cfg, is_nullable_bb);
4350 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4351 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4353 MonoMethodSignature *box_sig;
4356 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4357 * construct that method at JIT time, so have to do things by hand.
4359 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4360 box_sig->ret = &mono_defaults.object_class->byval_arg;
4361 box_sig->param_count = 1;
4362 box_sig->params [0] = &klass->byval_arg;
4365 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4367 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4368 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4369 res->type = STACK_OBJ;
4373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4375 MONO_START_BB (cfg, end_bb);
4379 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4383 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4389 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4392 MonoGenericContainer *container;
4393 MonoGenericInst *ginst;
4395 if (klass->generic_class) {
4396 container = klass->generic_class->container_class->generic_container;
4397 ginst = klass->generic_class->context.class_inst;
4398 } else if (klass->generic_container && context_used) {
4399 container = klass->generic_container;
4400 ginst = container->context.class_inst;
4405 for (i = 0; i < container->type_argc; ++i) {
4407 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4409 type = ginst->type_argv [i];
4410 if (mini_type_is_reference (type))
4416 static GHashTable* direct_icall_type_hash;
4419 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4421 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4422 if (!direct_icalls_enabled (cfg))
4426 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4427 * Whitelist a few icalls for now.
4429 if (!direct_icall_type_hash) {
4430 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4432 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4433 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4434 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4435 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4436 mono_memory_barrier ();
4437 direct_icall_type_hash = h;
4440 if (cmethod->klass == mono_defaults.math_class)
4442 /* No locking needed */
4443 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4448 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4451 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4453 MonoMethod *mono_castclass;
4456 mono_castclass = mono_marshal_get_castclass_with_cache ();
4458 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4459 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4460 reset_cast_details (cfg);
4466 get_castclass_cache_idx (MonoCompile *cfg)
4468 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4469 cfg->castclass_cache_index ++;
4470 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4474 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4483 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4486 idx = get_castclass_cache_idx (cfg);
4487 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4489 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4490 return emit_castclass_with_cache (cfg, klass, args);
4494 * Returns NULL and set the cfg exception on error.
4497 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4499 MonoBasicBlock *is_null_bb;
4500 int obj_reg = src->dreg;
4501 int vtable_reg = alloc_preg (cfg);
4503 MonoInst *klass_inst = NULL, *res;
4505 context_used = mini_class_check_context_used (cfg, klass);
4507 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4508 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4509 (*inline_costs) += 2;
4511 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4512 MonoMethod *mono_castclass;
4513 MonoInst *iargs [1];
4516 mono_castclass = mono_marshal_get_castclass (klass);
4519 save_cast_details (cfg, klass, src->dreg, TRUE);
4520 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4521 iargs, ip, cfg->real_offset, TRUE);
4522 reset_cast_details (cfg);
4523 CHECK_CFG_EXCEPTION;
4524 g_assert (costs > 0);
4526 cfg->real_offset += 5;
4528 (*inline_costs) += costs;
4536 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4537 MonoInst *cache_ins;
4539 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4544 /* klass - it's the second element of the cache entry*/
4545 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4548 args [2] = cache_ins;
4550 return emit_castclass_with_cache (cfg, klass, args);
4553 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4556 NEW_BBLOCK (cfg, is_null_bb);
4558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4561 save_cast_details (cfg, klass, obj_reg, FALSE);
4563 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4565 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4567 int klass_reg = alloc_preg (cfg);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4571 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4572 /* the remoting code is broken, access the class for now */
4573 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4574 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4576 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4577 cfg->exception_ptr = klass;
4580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4585 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4588 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4592 MONO_START_BB (cfg, is_null_bb);
4594 reset_cast_details (cfg);
4603 * Returns NULL and set the cfg exception on error.
4606 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4609 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4610 int obj_reg = src->dreg;
4611 int vtable_reg = alloc_preg (cfg);
4612 int res_reg = alloc_ireg_ref (cfg);
4613 MonoInst *klass_inst = NULL;
4618 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4619 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4620 MonoInst *cache_ins;
4622 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4627 /* klass - it's the second element of the cache entry*/
4628 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4631 args [2] = cache_ins;
4633 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4636 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4639 NEW_BBLOCK (cfg, is_null_bb);
4640 NEW_BBLOCK (cfg, false_bb);
4641 NEW_BBLOCK (cfg, end_bb);
4643 /* Do the assignment at the beginning, so the other assignment can be if converted */
4644 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4645 ins->type = STACK_OBJ;
4648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4653 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4654 g_assert (!context_used);
4655 /* the is_null_bb target simply copies the input register to the output */
4656 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4658 int klass_reg = alloc_preg (cfg);
4661 int rank_reg = alloc_preg (cfg);
4662 int eclass_reg = alloc_preg (cfg);
4664 g_assert (!context_used);
4665 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4670 if (klass->cast_class == mono_defaults.object_class) {
4671 int parent_reg = alloc_preg (cfg);
4672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4673 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4674 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4676 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4677 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4678 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4680 } else if (klass->cast_class == mono_defaults.enum_class) {
4681 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4683 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4684 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4686 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4687 /* Check that the object is a vector too */
4688 int bounds_reg = alloc_preg (cfg);
4689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4694 /* the is_null_bb target simply copies the input register to the output */
4695 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4697 } else if (mono_class_is_nullable (klass)) {
4698 g_assert (!context_used);
4699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4700 /* the is_null_bb target simply copies the input register to the output */
4701 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4703 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4704 g_assert (!context_used);
4705 /* the remoting code is broken, access the class for now */
4706 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4707 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4709 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4710 cfg->exception_ptr = klass;
4713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4715 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4719 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4722 /* the is_null_bb target simply copies the input register to the output */
4723 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4728 MONO_START_BB (cfg, false_bb);
4730 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4733 MONO_START_BB (cfg, is_null_bb);
4735 MONO_START_BB (cfg, end_bb);
4741 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4743 /* This opcode takes as input an object reference and a class, and returns:
4744 0) if the object is an instance of the class,
4745 1) if the object is not instance of the class,
4746 2) if the object is a proxy whose type cannot be determined */
4749 #ifndef DISABLE_REMOTING
4750 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4752 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4754 int obj_reg = src->dreg;
4755 int dreg = alloc_ireg (cfg);
4757 #ifndef DISABLE_REMOTING
4758 int klass_reg = alloc_preg (cfg);
4761 NEW_BBLOCK (cfg, true_bb);
4762 NEW_BBLOCK (cfg, false_bb);
4763 NEW_BBLOCK (cfg, end_bb);
4764 #ifndef DISABLE_REMOTING
4765 NEW_BBLOCK (cfg, false2_bb);
4766 NEW_BBLOCK (cfg, no_proxy_bb);
4769 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4772 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4773 #ifndef DISABLE_REMOTING
4774 NEW_BBLOCK (cfg, interface_fail_bb);
4777 tmp_reg = alloc_preg (cfg);
4778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4779 #ifndef DISABLE_REMOTING
4780 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4781 MONO_START_BB (cfg, interface_fail_bb);
4782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4784 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4786 tmp_reg = alloc_preg (cfg);
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4788 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4791 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4794 #ifndef DISABLE_REMOTING
4795 tmp_reg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4799 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4800 tmp_reg = alloc_preg (cfg);
4801 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4804 tmp_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4807 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4809 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4810 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4812 MONO_START_BB (cfg, no_proxy_bb);
4814 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4816 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4820 MONO_START_BB (cfg, false_bb);
4822 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4823 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4825 #ifndef DISABLE_REMOTING
4826 MONO_START_BB (cfg, false2_bb);
4828 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4829 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4832 MONO_START_BB (cfg, true_bb);
4834 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4836 MONO_START_BB (cfg, end_bb);
4839 MONO_INST_NEW (cfg, ins, OP_ICONST);
4841 ins->type = STACK_I4;
4847 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4849 /* This opcode takes as input an object reference and a class, and returns:
4850 0) if the object is an instance of the class,
4851 1) if the object is a proxy whose type cannot be determined
4852 an InvalidCastException exception is thrown otherwhise*/
4855 #ifndef DISABLE_REMOTING
4856 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4858 MonoBasicBlock *ok_result_bb;
4860 int obj_reg = src->dreg;
4861 int dreg = alloc_ireg (cfg);
4862 int tmp_reg = alloc_preg (cfg);
4864 #ifndef DISABLE_REMOTING
4865 int klass_reg = alloc_preg (cfg);
4866 NEW_BBLOCK (cfg, end_bb);
4869 NEW_BBLOCK (cfg, ok_result_bb);
4871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4872 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4874 save_cast_details (cfg, klass, obj_reg, FALSE);
4876 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4877 #ifndef DISABLE_REMOTING
4878 NEW_BBLOCK (cfg, interface_fail_bb);
4880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4881 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4882 MONO_START_BB (cfg, interface_fail_bb);
4883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4885 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4887 tmp_reg = alloc_preg (cfg);
4888 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4890 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4892 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4893 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4896 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4900 #ifndef DISABLE_REMOTING
4901 NEW_BBLOCK (cfg, no_proxy_bb);
4903 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4905 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4907 tmp_reg = alloc_preg (cfg);
4908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4911 tmp_reg = alloc_preg (cfg);
4912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4916 NEW_BBLOCK (cfg, fail_1_bb);
4918 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4920 MONO_START_BB (cfg, fail_1_bb);
4922 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4925 MONO_START_BB (cfg, no_proxy_bb);
4927 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4929 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4933 MONO_START_BB (cfg, ok_result_bb);
4935 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4937 #ifndef DISABLE_REMOTING
4938 MONO_START_BB (cfg, end_bb);
4942 MONO_INST_NEW (cfg, ins, OP_ICONST);
4944 ins->type = STACK_I4;
4949 static G_GNUC_UNUSED MonoInst*
4950 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4952 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4953 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4956 switch (enum_type->type) {
4959 #if SIZEOF_REGISTER == 8
4971 MonoInst *load, *and_, *cmp, *ceq;
4972 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4973 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4974 int dest_reg = alloc_ireg (cfg);
4976 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4977 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4978 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4979 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4981 ceq->type = STACK_I4;
4984 load = mono_decompose_opcode (cfg, load);
4985 and_ = mono_decompose_opcode (cfg, and_);
4986 cmp = mono_decompose_opcode (cfg, cmp);
4987 ceq = mono_decompose_opcode (cfg, ceq);
4995 * Returns NULL and set the cfg exception on error.
4997 static G_GNUC_UNUSED MonoInst*
4998 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5002 gpointer trampoline;
5003 MonoInst *obj, *method_ins, *tramp_ins;
5007 if (virtual_ && !cfg->llvm_only) {
5008 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5011 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5015 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5019 if (cfg->llvm_only) {
5020 MonoInst *args [16];
5023 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
5024 * the address of a gshared method. So use a JIT icall.
5025 * FIXME: Optimize this.
5029 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5030 mono_emit_jit_icall (cfg, virtual_ ? mono_init_delegate_virtual : mono_init_delegate, args);
5035 /* Inline the contents of mono_delegate_ctor */
5037 /* Set target field */
5038 /* Optimize away setting of NULL target */
5039 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5040 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5041 if (cfg->gen_write_barriers) {
5042 dreg = alloc_preg (cfg);
5043 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5044 emit_write_barrier (cfg, ptr, target);
5048 /* Set method field */
5049 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5050 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5053 * To avoid looking up the compiled code belonging to the target method
5054 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5055 * store it, and we fill it after the method has been compiled.
5057 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5058 MonoInst *code_slot_ins;
5061 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5063 domain = mono_domain_get ();
5064 mono_domain_lock (domain);
5065 if (!domain_jit_info (domain)->method_code_hash)
5066 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5067 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5069 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5070 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5072 mono_domain_unlock (domain);
5074 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5076 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5079 if (cfg->compile_aot) {
5080 MonoDelegateClassMethodPair *del_tramp;
5082 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5083 del_tramp->klass = klass;
5084 del_tramp->method = context_used ? NULL : method;
5085 del_tramp->is_virtual = virtual_;
5086 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5089 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5091 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5092 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5095 /* Set invoke_impl field */
5097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5099 dreg = alloc_preg (cfg);
5100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5101 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5103 dreg = alloc_preg (cfg);
5104 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5108 dreg = alloc_preg (cfg);
5109 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5112 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5118 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5120 MonoJitICallInfo *info;
5122 /* Need to register the icall so it gets an icall wrapper */
5123 info = mono_get_array_new_va_icall (rank);
5125 cfg->flags |= MONO_CFG_HAS_VARARGS;
5127 /* mono_array_new_va () needs a vararg calling convention */
5128 cfg->disable_llvm = TRUE;
5130 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5131 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5135 * handle_constrained_gsharedvt_call:
5137 * Handle constrained calls where the receiver is a gsharedvt type.
5138 * Return the instruction representing the call. Set the cfg exception on failure.
5141 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5142 gboolean *ref_emit_widen)
5144 MonoInst *ins = NULL;
5145 gboolean emit_widen = *ref_emit_widen;
5148 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5149 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5150 * pack the arguments into an array, and do the rest of the work in in an icall.
5152 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5153 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5154 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5155 MonoInst *args [16];
5158 * This case handles calls to
5159 * - object:ToString()/Equals()/GetHashCode(),
5160 * - System.IComparable<T>:CompareTo()
5161 * - System.IEquatable<T>:Equals ()
5162 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5166 if (mono_method_check_context_used (cmethod))
5167 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5169 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5170 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5172 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5173 if (fsig->hasthis && fsig->param_count) {
5174 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5175 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5176 ins->dreg = alloc_preg (cfg);
5177 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5178 MONO_ADD_INS (cfg->cbb, ins);
5181 if (mini_is_gsharedvt_type (fsig->params [0])) {
5182 int addr_reg, deref_arg_reg;
5184 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5185 deref_arg_reg = alloc_preg (cfg);
5186 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5187 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5189 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5190 addr_reg = ins->dreg;
5191 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5193 EMIT_NEW_ICONST (cfg, args [3], 0);
5194 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5197 EMIT_NEW_ICONST (cfg, args [3], 0);
5198 EMIT_NEW_ICONST (cfg, args [4], 0);
5200 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5203 if (mini_is_gsharedvt_type (fsig->ret)) {
5204 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5205 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5209 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5210 MONO_ADD_INS (cfg->cbb, add);
5212 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5213 MONO_ADD_INS (cfg->cbb, ins);
5214 /* ins represents the call result */
5217 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5220 *ref_emit_widen = emit_widen;
5229 mono_emit_load_got_addr (MonoCompile *cfg)
5231 MonoInst *getaddr, *dummy_use;
5233 if (!cfg->got_var || cfg->got_var_allocated)
5236 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5237 getaddr->cil_code = cfg->header->code;
5238 getaddr->dreg = cfg->got_var->dreg;
5240 /* Add it to the start of the first bblock */
5241 if (cfg->bb_entry->code) {
5242 getaddr->next = cfg->bb_entry->code;
5243 cfg->bb_entry->code = getaddr;
5246 MONO_ADD_INS (cfg->bb_entry, getaddr);
5248 cfg->got_var_allocated = TRUE;
5251 * Add a dummy use to keep the got_var alive, since real uses might
5252 * only be generated by the back ends.
5253 * Add it to end_bblock, so the variable's lifetime covers the whole
5255 * It would be better to make the usage of the got var explicit in all
5256 * cases when the backend needs it (i.e. calls, throw etc.), so this
5257 * wouldn't be needed.
5259 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5260 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5263 static int inline_limit;
5264 static gboolean inline_limit_inited;
5267 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5269 MonoMethodHeaderSummary header;
5271 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5272 MonoMethodSignature *sig = mono_method_signature (method);
5276 if (cfg->disable_inline)
5281 if (cfg->inline_depth > 10)
5284 if (!mono_method_get_header_summary (method, &header))
5287 /*runtime, icall and pinvoke are checked by summary call*/
5288 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5289 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5290 (mono_class_is_marshalbyref (method->klass)) ||
5294 /* also consider num_locals? */
5295 /* Do the size check early to avoid creating vtables */
5296 if (!inline_limit_inited) {
5297 if (g_getenv ("MONO_INLINELIMIT"))
5298 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5300 inline_limit = INLINE_LENGTH_LIMIT;
5301 inline_limit_inited = TRUE;
5303 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5307 * if we can initialize the class of the method right away, we do,
5308 * otherwise we don't allow inlining if the class needs initialization,
5309 * since it would mean inserting a call to mono_runtime_class_init()
5310 * inside the inlined code
5312 if (!(cfg->opt & MONO_OPT_SHARED)) {
5313 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5314 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5315 vtable = mono_class_vtable (cfg->domain, method->klass);
5318 if (!cfg->compile_aot)
5319 mono_runtime_class_init (vtable);
5320 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5321 if (cfg->run_cctors && method->klass->has_cctor) {
5322 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5323 if (!method->klass->runtime_info)
5324 /* No vtable created yet */
5326 vtable = mono_class_vtable (cfg->domain, method->klass);
5329 /* This makes so that inline cannot trigger */
5330 /* .cctors: too many apps depend on them */
5331 /* running with a specific order... */
5332 if (! vtable->initialized)
5334 mono_runtime_class_init (vtable);
5336 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5337 if (!method->klass->runtime_info)
5338 /* No vtable created yet */
5340 vtable = mono_class_vtable (cfg->domain, method->klass);
5343 if (!vtable->initialized)
5348 * If we're compiling for shared code
5349 * the cctor will need to be run at aot method load time, for example,
5350 * or at the end of the compilation of the inlining method.
5352 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5356 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5357 if (mono_arch_is_soft_float ()) {
5359 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5361 for (i = 0; i < sig->param_count; ++i)
5362 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5367 if (g_list_find (cfg->dont_inline, method))
5374 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5376 if (!cfg->compile_aot) {
5378 if (vtable->initialized)
5382 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5383 if (cfg->method == method)
5387 if (!mono_class_needs_cctor_run (klass, method))
5390 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5391 /* The initialization is already done before the method is called */
5398 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5402 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5405 if (mini_is_gsharedvt_variable_klass (klass)) {
5408 mono_class_init (klass);
5409 size = mono_class_array_element_size (klass);
5412 mult_reg = alloc_preg (cfg);
5413 array_reg = arr->dreg;
5414 index_reg = index->dreg;
5416 #if SIZEOF_REGISTER == 8
5417 /* The array reg is 64 bits but the index reg is only 32 */
5418 if (COMPILE_LLVM (cfg)) {
5420 index2_reg = index_reg;
5422 index2_reg = alloc_preg (cfg);
5423 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5426 if (index->type == STACK_I8) {
5427 index2_reg = alloc_preg (cfg);
5428 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5430 index2_reg = index_reg;
5435 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5438 if (size == 1 || size == 2 || size == 4 || size == 8) {
5439 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5441 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5442 ins->klass = mono_class_get_element_class (klass);
5443 ins->type = STACK_MP;
5449 add_reg = alloc_ireg_mp (cfg);
5452 MonoInst *rgctx_ins;
5455 g_assert (cfg->gshared);
5456 context_used = mini_class_check_context_used (cfg, klass);
5457 g_assert (context_used);
5458 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5459 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5463 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5464 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5465 ins->klass = mono_class_get_element_class (klass);
5466 ins->type = STACK_MP;
5467 MONO_ADD_INS (cfg->cbb, ins);
5473 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5475 int bounds_reg = alloc_preg (cfg);
5476 int add_reg = alloc_ireg_mp (cfg);
5477 int mult_reg = alloc_preg (cfg);
5478 int mult2_reg = alloc_preg (cfg);
5479 int low1_reg = alloc_preg (cfg);
5480 int low2_reg = alloc_preg (cfg);
5481 int high1_reg = alloc_preg (cfg);
5482 int high2_reg = alloc_preg (cfg);
5483 int realidx1_reg = alloc_preg (cfg);
5484 int realidx2_reg = alloc_preg (cfg);
5485 int sum_reg = alloc_preg (cfg);
5486 int index1, index2, tmpreg;
5490 mono_class_init (klass);
5491 size = mono_class_array_element_size (klass);
5493 index1 = index_ins1->dreg;
5494 index2 = index_ins2->dreg;
5496 #if SIZEOF_REGISTER == 8
5497 /* The array reg is 64 bits but the index reg is only 32 */
5498 if (COMPILE_LLVM (cfg)) {
5501 tmpreg = alloc_preg (cfg);
5502 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5504 tmpreg = alloc_preg (cfg);
5505 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5509 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5513 /* range checking */
5514 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5515 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5518 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5519 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5521 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5522 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5523 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5526 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5527 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5528 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5529 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5530 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5531 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5533 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5534 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5536 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5537 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5539 ins->type = STACK_MP;
5541 MONO_ADD_INS (cfg->cbb, ins);
5547 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5551 MonoMethod *addr_method;
5553 MonoClass *eclass = cmethod->klass->element_class;
5555 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5558 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5560 /* emit_ldelema_2 depends on OP_LMUL */
5561 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5562 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5565 if (mini_is_gsharedvt_variable_klass (eclass))
5568 element_size = mono_class_array_element_size (eclass);
5569 addr_method = mono_marshal_get_array_address (rank, element_size);
5570 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5575 static MonoBreakPolicy
5576 always_insert_breakpoint (MonoMethod *method)
5578 return MONO_BREAK_POLICY_ALWAYS;
5581 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5584 * mono_set_break_policy:
5585 * policy_callback: the new callback function
5587 * Allow embedders to decide wherther to actually obey breakpoint instructions
5588 * (both break IL instructions and Debugger.Break () method calls), for example
5589 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5590 * untrusted or semi-trusted code.
5592 * @policy_callback will be called every time a break point instruction needs to
5593 * be inserted with the method argument being the method that calls Debugger.Break()
5594 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5595 * if it wants the breakpoint to not be effective in the given method.
5596 * #MONO_BREAK_POLICY_ALWAYS is the default.
5599 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5601 if (policy_callback)
5602 break_policy_func = policy_callback;
5604 break_policy_func = always_insert_breakpoint;
5608 should_insert_brekpoint (MonoMethod *method) {
5609 switch (break_policy_func (method)) {
5610 case MONO_BREAK_POLICY_ALWAYS:
5612 case MONO_BREAK_POLICY_NEVER:
5614 case MONO_BREAK_POLICY_ON_DBG:
5615 g_warning ("mdb no longer supported");
5618 g_warning ("Incorrect value returned from break policy callback");
5623 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5625 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5627 MonoInst *addr, *store, *load;
5628 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5630 /* the bounds check is already done by the callers */
5631 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5633 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5634 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5635 if (mini_type_is_reference (fsig->params [2]))
5636 emit_write_barrier (cfg, addr, load);
5638 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5639 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5646 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5648 return mini_type_is_reference (&klass->byval_arg);
5652 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5654 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5655 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5656 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5657 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5658 MonoInst *iargs [3];
5661 mono_class_setup_vtable (obj_array);
5662 g_assert (helper->slot);
5664 if (sp [0]->type != STACK_OBJ)
5666 if (sp [2]->type != STACK_OBJ)
5673 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5677 if (mini_is_gsharedvt_variable_klass (klass)) {
5680 // FIXME-VT: OP_ICONST optimization
5681 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5682 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5683 ins->opcode = OP_STOREV_MEMBASE;
5684 } else if (sp [1]->opcode == OP_ICONST) {
5685 int array_reg = sp [0]->dreg;
5686 int index_reg = sp [1]->dreg;
5687 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5689 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5690 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5693 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5694 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5696 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5697 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5698 if (generic_class_is_reference_type (cfg, klass))
5699 emit_write_barrier (cfg, addr, sp [2]);
5706 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5711 eklass = mono_class_from_mono_type (fsig->params [2]);
5713 eklass = mono_class_from_mono_type (fsig->ret);
5716 return emit_array_store (cfg, eklass, args, FALSE);
5718 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5719 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5725 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5728 int param_size, return_size;
5730 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5731 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5733 if (cfg->verbose_level > 3)
5734 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5736 //Don't allow mixing reference types with value types
5737 if (param_klass->valuetype != return_klass->valuetype) {
5738 if (cfg->verbose_level > 3)
5739 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5743 if (!param_klass->valuetype) {
5744 if (cfg->verbose_level > 3)
5745 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5750 if (param_klass->has_references || return_klass->has_references)
5753 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5754 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5755 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5756 if (cfg->verbose_level > 3)
5757 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5761 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5762 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5763 if (cfg->verbose_level > 3)
5764 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5768 param_size = mono_class_value_size (param_klass, &align);
5769 return_size = mono_class_value_size (return_klass, &align);
5771 //We can do it if sizes match
5772 if (param_size == return_size) {
5773 if (cfg->verbose_level > 3)
5774 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5778 //No simple way to handle struct if sizes don't match
5779 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5780 if (cfg->verbose_level > 3)
5781 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5786 * Same reg size category.
5787 * A quick note on why we don't require widening here.
5788 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5790 * Since the source value comes from a function argument, the JIT will already have
5791 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5793 if (param_size <= 4 && return_size <= 4) {
5794 if (cfg->verbose_level > 3)
5795 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5803 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5805 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5806 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5808 if (mini_is_gsharedvt_variable_type (fsig->ret))
5811 //Valuetypes that are semantically equivalent or numbers than can be widened to
5812 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5815 //Arrays of valuetypes that are semantically equivalent
5816 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5823 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5825 #ifdef MONO_ARCH_SIMD_INTRINSICS
5826 MonoInst *ins = NULL;
5828 if (cfg->opt & MONO_OPT_SIMD) {
5829 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5835 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5839 emit_memory_barrier (MonoCompile *cfg, int kind)
5841 MonoInst *ins = NULL;
5842 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5843 MONO_ADD_INS (cfg->cbb, ins);
5844 ins->backend.memory_barrier_kind = kind;
5850 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5852 MonoInst *ins = NULL;
5855 /* The LLVM backend supports these intrinsics */
5856 if (cmethod->klass == mono_defaults.math_class) {
5857 if (strcmp (cmethod->name, "Sin") == 0) {
5859 } else if (strcmp (cmethod->name, "Cos") == 0) {
5861 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5863 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5867 if (opcode && fsig->param_count == 1) {
5868 MONO_INST_NEW (cfg, ins, opcode);
5869 ins->type = STACK_R8;
5870 ins->dreg = mono_alloc_freg (cfg);
5871 ins->sreg1 = args [0]->dreg;
5872 MONO_ADD_INS (cfg->cbb, ins);
5876 if (cfg->opt & MONO_OPT_CMOV) {
5877 if (strcmp (cmethod->name, "Min") == 0) {
5878 if (fsig->params [0]->type == MONO_TYPE_I4)
5880 if (fsig->params [0]->type == MONO_TYPE_U4)
5881 opcode = OP_IMIN_UN;
5882 else if (fsig->params [0]->type == MONO_TYPE_I8)
5884 else if (fsig->params [0]->type == MONO_TYPE_U8)
5885 opcode = OP_LMIN_UN;
5886 } else if (strcmp (cmethod->name, "Max") == 0) {
5887 if (fsig->params [0]->type == MONO_TYPE_I4)
5889 if (fsig->params [0]->type == MONO_TYPE_U4)
5890 opcode = OP_IMAX_UN;
5891 else if (fsig->params [0]->type == MONO_TYPE_I8)
5893 else if (fsig->params [0]->type == MONO_TYPE_U8)
5894 opcode = OP_LMAX_UN;
5898 if (opcode && fsig->param_count == 2) {
5899 MONO_INST_NEW (cfg, ins, opcode);
5900 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5901 ins->dreg = mono_alloc_ireg (cfg);
5902 ins->sreg1 = args [0]->dreg;
5903 ins->sreg2 = args [1]->dreg;
5904 MONO_ADD_INS (cfg->cbb, ins);
5912 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5914 if (cmethod->klass == mono_defaults.array_class) {
5915 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5916 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5917 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5918 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5919 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5920 return emit_array_unsafe_mov (cfg, fsig, args);
5927 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5929 MonoInst *ins = NULL;
5931 static MonoClass *runtime_helpers_class = NULL;
5932 if (! runtime_helpers_class)
5933 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5934 "System.Runtime.CompilerServices", "RuntimeHelpers");
5936 if (cmethod->klass == mono_defaults.string_class) {
5937 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5938 int dreg = alloc_ireg (cfg);
5939 int index_reg = alloc_preg (cfg);
5940 int add_reg = alloc_preg (cfg);
5942 #if SIZEOF_REGISTER == 8
5943 if (COMPILE_LLVM (cfg)) {
5944 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5946 /* The array reg is 64 bits but the index reg is only 32 */
5947 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5950 index_reg = args [1]->dreg;
5952 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5954 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5955 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5956 add_reg = ins->dreg;
5957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5960 int mult_reg = alloc_preg (cfg);
5961 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5962 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5963 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5964 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5966 type_from_op (cfg, ins, NULL, NULL);
5968 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5969 int dreg = alloc_ireg (cfg);
5970 /* Decompose later to allow more optimizations */
5971 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5972 ins->type = STACK_I4;
5973 ins->flags |= MONO_INST_FAULT;
5974 cfg->cbb->has_array_access = TRUE;
5975 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5980 } else if (cmethod->klass == mono_defaults.object_class) {
5982 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5983 int dreg = alloc_ireg_ref (cfg);
5984 int vt_reg = alloc_preg (cfg);
5985 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5986 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5987 type_from_op (cfg, ins, NULL, NULL);
5990 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5991 int dreg = alloc_ireg (cfg);
5992 int t1 = alloc_ireg (cfg);
5994 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5995 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5996 ins->type = STACK_I4;
5999 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6000 MONO_INST_NEW (cfg, ins, OP_NOP);
6001 MONO_ADD_INS (cfg->cbb, ins);
6005 } else if (cmethod->klass == mono_defaults.array_class) {
6006 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6007 return emit_array_generic_access (cfg, fsig, args, FALSE);
6008 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6009 return emit_array_generic_access (cfg, fsig, args, TRUE);
6011 #ifndef MONO_BIG_ARRAYS
6013 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6016 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6017 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6018 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6019 int dreg = alloc_ireg (cfg);
6020 int bounds_reg = alloc_ireg_mp (cfg);
6021 MonoBasicBlock *end_bb, *szarray_bb;
6022 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6024 NEW_BBLOCK (cfg, end_bb);
6025 NEW_BBLOCK (cfg, szarray_bb);
6027 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6028 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6030 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6031 /* Non-szarray case */
6033 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6034 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6036 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6037 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6039 MONO_START_BB (cfg, szarray_bb);
6042 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6043 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6045 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6046 MONO_START_BB (cfg, end_bb);
6048 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6049 ins->type = STACK_I4;
6055 if (cmethod->name [0] != 'g')
6058 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6059 int dreg = alloc_ireg (cfg);
6060 int vtable_reg = alloc_preg (cfg);
6061 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6062 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6063 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6064 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6065 type_from_op (cfg, ins, NULL, NULL);
6068 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6069 int dreg = alloc_ireg (cfg);
6071 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6072 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6073 type_from_op (cfg, ins, NULL, NULL);
6078 } else if (cmethod->klass == runtime_helpers_class) {
6080 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6081 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6085 } else if (cmethod->klass == mono_defaults.thread_class) {
6086 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6087 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6088 MONO_ADD_INS (cfg->cbb, ins);
6090 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6091 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6092 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6094 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6096 if (fsig->params [0]->type == MONO_TYPE_I1)
6097 opcode = OP_LOADI1_MEMBASE;
6098 else if (fsig->params [0]->type == MONO_TYPE_U1)
6099 opcode = OP_LOADU1_MEMBASE;
6100 else if (fsig->params [0]->type == MONO_TYPE_I2)
6101 opcode = OP_LOADI2_MEMBASE;
6102 else if (fsig->params [0]->type == MONO_TYPE_U2)
6103 opcode = OP_LOADU2_MEMBASE;
6104 else if (fsig->params [0]->type == MONO_TYPE_I4)
6105 opcode = OP_LOADI4_MEMBASE;
6106 else if (fsig->params [0]->type == MONO_TYPE_U4)
6107 opcode = OP_LOADU4_MEMBASE;
6108 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6109 opcode = OP_LOADI8_MEMBASE;
6110 else if (fsig->params [0]->type == MONO_TYPE_R4)
6111 opcode = OP_LOADR4_MEMBASE;
6112 else if (fsig->params [0]->type == MONO_TYPE_R8)
6113 opcode = OP_LOADR8_MEMBASE;
6114 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6115 opcode = OP_LOAD_MEMBASE;
6118 MONO_INST_NEW (cfg, ins, opcode);
6119 ins->inst_basereg = args [0]->dreg;
6120 ins->inst_offset = 0;
6121 MONO_ADD_INS (cfg->cbb, ins);
6123 switch (fsig->params [0]->type) {
6130 ins->dreg = mono_alloc_ireg (cfg);
6131 ins->type = STACK_I4;
6135 ins->dreg = mono_alloc_lreg (cfg);
6136 ins->type = STACK_I8;
6140 ins->dreg = mono_alloc_ireg (cfg);
6141 #if SIZEOF_REGISTER == 8
6142 ins->type = STACK_I8;
6144 ins->type = STACK_I4;
6149 ins->dreg = mono_alloc_freg (cfg);
6150 ins->type = STACK_R8;
6153 g_assert (mini_type_is_reference (fsig->params [0]));
6154 ins->dreg = mono_alloc_ireg_ref (cfg);
6155 ins->type = STACK_OBJ;
6159 if (opcode == OP_LOADI8_MEMBASE)
6160 ins = mono_decompose_opcode (cfg, ins);
6162 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6166 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6168 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6170 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6171 opcode = OP_STOREI1_MEMBASE_REG;
6172 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6173 opcode = OP_STOREI2_MEMBASE_REG;
6174 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6175 opcode = OP_STOREI4_MEMBASE_REG;
6176 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6177 opcode = OP_STOREI8_MEMBASE_REG;
6178 else if (fsig->params [0]->type == MONO_TYPE_R4)
6179 opcode = OP_STORER4_MEMBASE_REG;
6180 else if (fsig->params [0]->type == MONO_TYPE_R8)
6181 opcode = OP_STORER8_MEMBASE_REG;
6182 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6183 opcode = OP_STORE_MEMBASE_REG;
6186 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6188 MONO_INST_NEW (cfg, ins, opcode);
6189 ins->sreg1 = args [1]->dreg;
6190 ins->inst_destbasereg = args [0]->dreg;
6191 ins->inst_offset = 0;
6192 MONO_ADD_INS (cfg->cbb, ins);
6194 if (opcode == OP_STOREI8_MEMBASE_REG)
6195 ins = mono_decompose_opcode (cfg, ins);
6200 } else if (cmethod->klass->image == mono_defaults.corlib &&
6201 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6202 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6205 #if SIZEOF_REGISTER == 8
6206 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6207 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6208 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6209 ins->dreg = mono_alloc_preg (cfg);
6210 ins->sreg1 = args [0]->dreg;
6211 ins->type = STACK_I8;
6212 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6213 MONO_ADD_INS (cfg->cbb, ins);
6217 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6219 /* 64 bit reads are already atomic */
6220 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6221 load_ins->dreg = mono_alloc_preg (cfg);
6222 load_ins->inst_basereg = args [0]->dreg;
6223 load_ins->inst_offset = 0;
6224 load_ins->type = STACK_I8;
6225 MONO_ADD_INS (cfg->cbb, load_ins);
6227 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6234 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6235 MonoInst *ins_iconst;
6238 if (fsig->params [0]->type == MONO_TYPE_I4) {
6239 opcode = OP_ATOMIC_ADD_I4;
6240 cfg->has_atomic_add_i4 = TRUE;
6242 #if SIZEOF_REGISTER == 8
6243 else if (fsig->params [0]->type == MONO_TYPE_I8)
6244 opcode = OP_ATOMIC_ADD_I8;
6247 if (!mono_arch_opcode_supported (opcode))
6249 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6250 ins_iconst->inst_c0 = 1;
6251 ins_iconst->dreg = mono_alloc_ireg (cfg);
6252 MONO_ADD_INS (cfg->cbb, ins_iconst);
6254 MONO_INST_NEW (cfg, ins, opcode);
6255 ins->dreg = mono_alloc_ireg (cfg);
6256 ins->inst_basereg = args [0]->dreg;
6257 ins->inst_offset = 0;
6258 ins->sreg2 = ins_iconst->dreg;
6259 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6260 MONO_ADD_INS (cfg->cbb, ins);
6262 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6263 MonoInst *ins_iconst;
6266 if (fsig->params [0]->type == MONO_TYPE_I4) {
6267 opcode = OP_ATOMIC_ADD_I4;
6268 cfg->has_atomic_add_i4 = TRUE;
6270 #if SIZEOF_REGISTER == 8
6271 else if (fsig->params [0]->type == MONO_TYPE_I8)
6272 opcode = OP_ATOMIC_ADD_I8;
6275 if (!mono_arch_opcode_supported (opcode))
6277 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6278 ins_iconst->inst_c0 = -1;
6279 ins_iconst->dreg = mono_alloc_ireg (cfg);
6280 MONO_ADD_INS (cfg->cbb, ins_iconst);
6282 MONO_INST_NEW (cfg, ins, opcode);
6283 ins->dreg = mono_alloc_ireg (cfg);
6284 ins->inst_basereg = args [0]->dreg;
6285 ins->inst_offset = 0;
6286 ins->sreg2 = ins_iconst->dreg;
6287 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6288 MONO_ADD_INS (cfg->cbb, ins);
6290 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6293 if (fsig->params [0]->type == MONO_TYPE_I4) {
6294 opcode = OP_ATOMIC_ADD_I4;
6295 cfg->has_atomic_add_i4 = TRUE;
6297 #if SIZEOF_REGISTER == 8
6298 else if (fsig->params [0]->type == MONO_TYPE_I8)
6299 opcode = OP_ATOMIC_ADD_I8;
6302 if (!mono_arch_opcode_supported (opcode))
6304 MONO_INST_NEW (cfg, ins, opcode);
6305 ins->dreg = mono_alloc_ireg (cfg);
6306 ins->inst_basereg = args [0]->dreg;
6307 ins->inst_offset = 0;
6308 ins->sreg2 = args [1]->dreg;
6309 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6310 MONO_ADD_INS (cfg->cbb, ins);
6313 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6314 MonoInst *f2i = NULL, *i2f;
6315 guint32 opcode, f2i_opcode, i2f_opcode;
6316 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6317 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6319 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6320 fsig->params [0]->type == MONO_TYPE_R4) {
6321 opcode = OP_ATOMIC_EXCHANGE_I4;
6322 f2i_opcode = OP_MOVE_F_TO_I4;
6323 i2f_opcode = OP_MOVE_I4_TO_F;
6324 cfg->has_atomic_exchange_i4 = TRUE;
6326 #if SIZEOF_REGISTER == 8
6328 fsig->params [0]->type == MONO_TYPE_I8 ||
6329 fsig->params [0]->type == MONO_TYPE_R8 ||
6330 fsig->params [0]->type == MONO_TYPE_I) {
6331 opcode = OP_ATOMIC_EXCHANGE_I8;
6332 f2i_opcode = OP_MOVE_F_TO_I8;
6333 i2f_opcode = OP_MOVE_I8_TO_F;
6336 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6337 opcode = OP_ATOMIC_EXCHANGE_I4;
6338 cfg->has_atomic_exchange_i4 = TRUE;
6344 if (!mono_arch_opcode_supported (opcode))
6348 /* TODO: Decompose these opcodes instead of bailing here. */
6349 if (COMPILE_SOFT_FLOAT (cfg))
6352 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6353 f2i->dreg = mono_alloc_ireg (cfg);
6354 f2i->sreg1 = args [1]->dreg;
6355 if (f2i_opcode == OP_MOVE_F_TO_I4)
6356 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6357 MONO_ADD_INS (cfg->cbb, f2i);
6360 MONO_INST_NEW (cfg, ins, opcode);
6361 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6362 ins->inst_basereg = args [0]->dreg;
6363 ins->inst_offset = 0;
6364 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6365 MONO_ADD_INS (cfg->cbb, ins);
6367 switch (fsig->params [0]->type) {
6369 ins->type = STACK_I4;
6372 ins->type = STACK_I8;
6375 #if SIZEOF_REGISTER == 8
6376 ins->type = STACK_I8;
6378 ins->type = STACK_I4;
6383 ins->type = STACK_R8;
6386 g_assert (mini_type_is_reference (fsig->params [0]));
6387 ins->type = STACK_OBJ;
6392 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6393 i2f->dreg = mono_alloc_freg (cfg);
6394 i2f->sreg1 = ins->dreg;
6395 i2f->type = STACK_R8;
6396 if (i2f_opcode == OP_MOVE_I4_TO_F)
6397 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6398 MONO_ADD_INS (cfg->cbb, i2f);
6403 if (cfg->gen_write_barriers && is_ref)
6404 emit_write_barrier (cfg, args [0], args [1]);
6406 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6407 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6408 guint32 opcode, f2i_opcode, i2f_opcode;
6409 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6410 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6412 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6413 fsig->params [1]->type == MONO_TYPE_R4) {
6414 opcode = OP_ATOMIC_CAS_I4;
6415 f2i_opcode = OP_MOVE_F_TO_I4;
6416 i2f_opcode = OP_MOVE_I4_TO_F;
6417 cfg->has_atomic_cas_i4 = TRUE;
6419 #if SIZEOF_REGISTER == 8
6421 fsig->params [1]->type == MONO_TYPE_I8 ||
6422 fsig->params [1]->type == MONO_TYPE_R8 ||
6423 fsig->params [1]->type == MONO_TYPE_I) {
6424 opcode = OP_ATOMIC_CAS_I8;
6425 f2i_opcode = OP_MOVE_F_TO_I8;
6426 i2f_opcode = OP_MOVE_I8_TO_F;
6429 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6430 opcode = OP_ATOMIC_CAS_I4;
6431 cfg->has_atomic_cas_i4 = TRUE;
6437 if (!mono_arch_opcode_supported (opcode))
6441 /* TODO: Decompose these opcodes instead of bailing here. */
6442 if (COMPILE_SOFT_FLOAT (cfg))
6445 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6446 f2i_new->dreg = mono_alloc_ireg (cfg);
6447 f2i_new->sreg1 = args [1]->dreg;
6448 if (f2i_opcode == OP_MOVE_F_TO_I4)
6449 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6450 MONO_ADD_INS (cfg->cbb, f2i_new);
6452 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6453 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6454 f2i_cmp->sreg1 = args [2]->dreg;
6455 if (f2i_opcode == OP_MOVE_F_TO_I4)
6456 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6457 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6460 MONO_INST_NEW (cfg, ins, opcode);
6461 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6462 ins->sreg1 = args [0]->dreg;
6463 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6464 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6465 MONO_ADD_INS (cfg->cbb, ins);
6467 switch (fsig->params [1]->type) {
6469 ins->type = STACK_I4;
6472 ins->type = STACK_I8;
6475 #if SIZEOF_REGISTER == 8
6476 ins->type = STACK_I8;
6478 ins->type = STACK_I4;
6482 ins->type = cfg->r4_stack_type;
6485 ins->type = STACK_R8;
6488 g_assert (mini_type_is_reference (fsig->params [1]));
6489 ins->type = STACK_OBJ;
6494 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6495 i2f->dreg = mono_alloc_freg (cfg);
6496 i2f->sreg1 = ins->dreg;
6497 i2f->type = STACK_R8;
6498 if (i2f_opcode == OP_MOVE_I4_TO_F)
6499 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6500 MONO_ADD_INS (cfg->cbb, i2f);
6505 if (cfg->gen_write_barriers && is_ref)
6506 emit_write_barrier (cfg, args [0], args [1]);
6508 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6509 fsig->params [1]->type == MONO_TYPE_I4) {
6510 MonoInst *cmp, *ceq;
6512 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6515 /* int32 r = CAS (location, value, comparand); */
6516 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6517 ins->dreg = alloc_ireg (cfg);
6518 ins->sreg1 = args [0]->dreg;
6519 ins->sreg2 = args [1]->dreg;
6520 ins->sreg3 = args [2]->dreg;
6521 ins->type = STACK_I4;
6522 MONO_ADD_INS (cfg->cbb, ins);
6524 /* bool result = r == comparand; */
6525 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6526 cmp->sreg1 = ins->dreg;
6527 cmp->sreg2 = args [2]->dreg;
6528 cmp->type = STACK_I4;
6529 MONO_ADD_INS (cfg->cbb, cmp);
6531 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6532 ceq->dreg = alloc_ireg (cfg);
6533 ceq->type = STACK_I4;
6534 MONO_ADD_INS (cfg->cbb, ceq);
6536 /* *success = result; */
6537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6539 cfg->has_atomic_cas_i4 = TRUE;
6541 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6542 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6546 } else if (cmethod->klass->image == mono_defaults.corlib &&
6547 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6548 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6551 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6553 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6554 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6556 if (fsig->params [0]->type == MONO_TYPE_I1)
6557 opcode = OP_ATOMIC_LOAD_I1;
6558 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6559 opcode = OP_ATOMIC_LOAD_U1;
6560 else if (fsig->params [0]->type == MONO_TYPE_I2)
6561 opcode = OP_ATOMIC_LOAD_I2;
6562 else if (fsig->params [0]->type == MONO_TYPE_U2)
6563 opcode = OP_ATOMIC_LOAD_U2;
6564 else if (fsig->params [0]->type == MONO_TYPE_I4)
6565 opcode = OP_ATOMIC_LOAD_I4;
6566 else if (fsig->params [0]->type == MONO_TYPE_U4)
6567 opcode = OP_ATOMIC_LOAD_U4;
6568 else if (fsig->params [0]->type == MONO_TYPE_R4)
6569 opcode = OP_ATOMIC_LOAD_R4;
6570 else if (fsig->params [0]->type == MONO_TYPE_R8)
6571 opcode = OP_ATOMIC_LOAD_R8;
6572 #if SIZEOF_REGISTER == 8
6573 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6574 opcode = OP_ATOMIC_LOAD_I8;
6575 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6576 opcode = OP_ATOMIC_LOAD_U8;
6578 else if (fsig->params [0]->type == MONO_TYPE_I)
6579 opcode = OP_ATOMIC_LOAD_I4;
6580 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6581 opcode = OP_ATOMIC_LOAD_U4;
6585 if (!mono_arch_opcode_supported (opcode))
6588 MONO_INST_NEW (cfg, ins, opcode);
6589 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6590 ins->sreg1 = args [0]->dreg;
6591 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6592 MONO_ADD_INS (cfg->cbb, ins);
6594 switch (fsig->params [0]->type) {
6595 case MONO_TYPE_BOOLEAN:
6602 ins->type = STACK_I4;
6606 ins->type = STACK_I8;
6610 #if SIZEOF_REGISTER == 8
6611 ins->type = STACK_I8;
6613 ins->type = STACK_I4;
6617 ins->type = cfg->r4_stack_type;
6620 ins->type = STACK_R8;
6623 g_assert (mini_type_is_reference (fsig->params [0]));
6624 ins->type = STACK_OBJ;
6630 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6632 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6634 if (fsig->params [0]->type == MONO_TYPE_I1)
6635 opcode = OP_ATOMIC_STORE_I1;
6636 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6637 opcode = OP_ATOMIC_STORE_U1;
6638 else if (fsig->params [0]->type == MONO_TYPE_I2)
6639 opcode = OP_ATOMIC_STORE_I2;
6640 else if (fsig->params [0]->type == MONO_TYPE_U2)
6641 opcode = OP_ATOMIC_STORE_U2;
6642 else if (fsig->params [0]->type == MONO_TYPE_I4)
6643 opcode = OP_ATOMIC_STORE_I4;
6644 else if (fsig->params [0]->type == MONO_TYPE_U4)
6645 opcode = OP_ATOMIC_STORE_U4;
6646 else if (fsig->params [0]->type == MONO_TYPE_R4)
6647 opcode = OP_ATOMIC_STORE_R4;
6648 else if (fsig->params [0]->type == MONO_TYPE_R8)
6649 opcode = OP_ATOMIC_STORE_R8;
6650 #if SIZEOF_REGISTER == 8
6651 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6652 opcode = OP_ATOMIC_STORE_I8;
6653 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6654 opcode = OP_ATOMIC_STORE_U8;
6656 else if (fsig->params [0]->type == MONO_TYPE_I)
6657 opcode = OP_ATOMIC_STORE_I4;
6658 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6659 opcode = OP_ATOMIC_STORE_U4;
6663 if (!mono_arch_opcode_supported (opcode))
6666 MONO_INST_NEW (cfg, ins, opcode);
6667 ins->dreg = args [0]->dreg;
6668 ins->sreg1 = args [1]->dreg;
6669 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6670 MONO_ADD_INS (cfg->cbb, ins);
6672 if (cfg->gen_write_barriers && is_ref)
6673 emit_write_barrier (cfg, args [0], args [1]);
6679 } else if (cmethod->klass->image == mono_defaults.corlib &&
6680 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6681 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6682 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6683 if (should_insert_brekpoint (cfg->method)) {
6684 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6686 MONO_INST_NEW (cfg, ins, OP_NOP);
6687 MONO_ADD_INS (cfg->cbb, ins);
6691 } else if (cmethod->klass->image == mono_defaults.corlib &&
6692 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6693 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6694 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6696 EMIT_NEW_ICONST (cfg, ins, 1);
6698 EMIT_NEW_ICONST (cfg, ins, 0);
6701 } else if (cmethod->klass->image == mono_defaults.corlib &&
6702 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6703 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6704 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6705 /* No stack walks are current available, so implement this as an intrinsic */
6706 MonoInst *assembly_ins;
6708 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6709 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6712 } else if (cmethod->klass == mono_defaults.math_class) {
6714 * There is general branchless code for Min/Max, but it does not work for
6716 * http://everything2.com/?node_id=1051618
6718 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6719 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6720 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6721 !strcmp (cmethod->klass->name, "Selector")) ||
6722 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6723 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6724 !strcmp (cmethod->klass->name, "Selector"))
6726 if (cfg->backend->have_objc_get_selector &&
6727 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6728 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6731 MonoJumpInfoToken *ji;
6734 cfg->disable_llvm = TRUE;
6736 if (args [0]->opcode == OP_GOT_ENTRY) {
6737 pi = (MonoInst *)args [0]->inst_p1;
6738 g_assert (pi->opcode == OP_PATCH_INFO);
6739 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6740 ji = (MonoJumpInfoToken *)pi->inst_p0;
6742 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6743 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6746 NULLIFY_INS (args [0]);
6749 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6750 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6751 ins->dreg = mono_alloc_ireg (cfg);
6753 ins->inst_p0 = mono_string_to_utf8 (s);
6754 MONO_ADD_INS (cfg->cbb, ins);
6759 #ifdef MONO_ARCH_SIMD_INTRINSICS
6760 if (cfg->opt & MONO_OPT_SIMD) {
6761 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6767 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6771 if (COMPILE_LLVM (cfg)) {
6772 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6777 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6781 * This entry point could be used later for arbitrary method
6784 inline static MonoInst*
6785 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6786 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6788 if (method->klass == mono_defaults.string_class) {
6789 /* managed string allocation support */
6790 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6791 MonoInst *iargs [2];
6792 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6793 MonoMethod *managed_alloc = NULL;
6795 g_assert (vtable); /*Should not fail since it System.String*/
6796 #ifndef MONO_CROSS_COMPILE
6797 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6801 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6802 iargs [1] = args [0];
6803 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6810 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6812 MonoInst *store, *temp;
6815 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6816 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6819 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6820 * would be different than the MonoInst's used to represent arguments, and
6821 * the ldelema implementation can't deal with that.
6822 * Solution: When ldelema is used on an inline argument, create a var for
6823 * it, emit ldelema on that var, and emit the saving code below in
6824 * inline_method () if needed.
6826 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6827 cfg->args [i] = temp;
6828 /* This uses cfg->args [i] which is set by the preceeding line */
6829 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6830 store->cil_code = sp [0]->cil_code;
6835 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6836 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6838 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6840 check_inline_called_method_name_limit (MonoMethod *called_method)
6843 static const char *limit = NULL;
6845 if (limit == NULL) {
6846 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6848 if (limit_string != NULL)
6849 limit = limit_string;
6854 if (limit [0] != '\0') {
6855 char *called_method_name = mono_method_full_name (called_method, TRUE);
6857 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6858 g_free (called_method_name);
6860 //return (strncmp_result <= 0);
6861 return (strncmp_result == 0);
6868 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6870 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6873 static const char *limit = NULL;
6875 if (limit == NULL) {
6876 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6877 if (limit_string != NULL) {
6878 limit = limit_string;
6884 if (limit [0] != '\0') {
6885 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6887 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6888 g_free (caller_method_name);
6890 //return (strncmp_result <= 0);
6891 return (strncmp_result == 0);
6899 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6901 static double r8_0 = 0.0;
6902 static float r4_0 = 0.0;
6906 rtype = mini_get_underlying_type (rtype);
6910 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6911 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6912 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6913 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6914 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6915 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6916 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6917 ins->type = STACK_R4;
6918 ins->inst_p0 = (void*)&r4_0;
6920 MONO_ADD_INS (cfg->cbb, ins);
6921 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6922 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6923 ins->type = STACK_R8;
6924 ins->inst_p0 = (void*)&r8_0;
6926 MONO_ADD_INS (cfg->cbb, ins);
6927 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6928 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6929 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6930 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6931 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6933 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6938 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6942 rtype = mini_get_underlying_type (rtype);
6946 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6947 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6948 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6949 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6950 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6951 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6952 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6953 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6954 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6955 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6956 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6957 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6958 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6959 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6961 emit_init_rvar (cfg, dreg, rtype);
6965 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6967 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6969 MonoInst *var = cfg->locals [local];
6970 if (COMPILE_SOFT_FLOAT (cfg)) {
6972 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6973 emit_init_rvar (cfg, reg, type);
6974 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6977 emit_init_rvar (cfg, var->dreg, type);
6979 emit_dummy_init_rvar (cfg, var->dreg, type);
6986 * Return the cost of inlining CMETHOD.
6989 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6990 guchar *ip, guint real_offset, gboolean inline_always)
6992 MonoInst *ins, *rvar = NULL;
6993 MonoMethodHeader *cheader;
6994 MonoBasicBlock *ebblock, *sbblock;
6996 MonoMethod *prev_inlined_method;
6997 MonoInst **prev_locals, **prev_args;
6998 MonoType **prev_arg_types;
6999 guint prev_real_offset;
7000 GHashTable *prev_cbb_hash;
7001 MonoBasicBlock **prev_cil_offset_to_bb;
7002 MonoBasicBlock *prev_cbb;
7003 unsigned char* prev_cil_start;
7004 guint32 prev_cil_offset_to_bb_len;
7005 MonoMethod *prev_current_method;
7006 MonoGenericContext *prev_generic_context;
7007 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7009 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7011 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7012 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7015 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7016 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7021 fsig = mono_method_signature (cmethod);
7023 if (cfg->verbose_level > 2)
7024 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7026 if (!cmethod->inline_info) {
7027 cfg->stat_inlineable_methods++;
7028 cmethod->inline_info = 1;
7031 /* allocate local variables */
7032 cheader = mono_method_get_header (cmethod);
7034 if (cheader == NULL || mono_loader_get_last_error ()) {
7035 MonoLoaderError *error = mono_loader_get_last_error ();
7038 mono_metadata_free_mh (cheader);
7039 if (inline_always && error)
7040 mono_cfg_set_exception (cfg, error->exception_type);
7042 mono_loader_clear_error ();
7046 /*Must verify before creating locals as it can cause the JIT to assert.*/
7047 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7048 mono_metadata_free_mh (cheader);
7052 /* allocate space to store the return value */
7053 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7054 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7057 prev_locals = cfg->locals;
7058 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7059 for (i = 0; i < cheader->num_locals; ++i)
7060 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7062 /* allocate start and end blocks */
7063 /* This is needed so if the inline is aborted, we can clean up */
7064 NEW_BBLOCK (cfg, sbblock);
7065 sbblock->real_offset = real_offset;
7067 NEW_BBLOCK (cfg, ebblock);
7068 ebblock->block_num = cfg->num_bblocks++;
7069 ebblock->real_offset = real_offset;
7071 prev_args = cfg->args;
7072 prev_arg_types = cfg->arg_types;
7073 prev_inlined_method = cfg->inlined_method;
7074 cfg->inlined_method = cmethod;
7075 cfg->ret_var_set = FALSE;
7076 cfg->inline_depth ++;
7077 prev_real_offset = cfg->real_offset;
7078 prev_cbb_hash = cfg->cbb_hash;
7079 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7080 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7081 prev_cil_start = cfg->cil_start;
7082 prev_cbb = cfg->cbb;
7083 prev_current_method = cfg->current_method;
7084 prev_generic_context = cfg->generic_context;
7085 prev_ret_var_set = cfg->ret_var_set;
7086 prev_disable_inline = cfg->disable_inline;
7088 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7091 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7093 ret_var_set = cfg->ret_var_set;
7095 cfg->inlined_method = prev_inlined_method;
7096 cfg->real_offset = prev_real_offset;
7097 cfg->cbb_hash = prev_cbb_hash;
7098 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7099 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7100 cfg->cil_start = prev_cil_start;
7101 cfg->locals = prev_locals;
7102 cfg->args = prev_args;
7103 cfg->arg_types = prev_arg_types;
7104 cfg->current_method = prev_current_method;
7105 cfg->generic_context = prev_generic_context;
7106 cfg->ret_var_set = prev_ret_var_set;
7107 cfg->disable_inline = prev_disable_inline;
7108 cfg->inline_depth --;
7110 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7111 if (cfg->verbose_level > 2)
7112 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7114 cfg->stat_inlined_methods++;
7116 /* always add some code to avoid block split failures */
7117 MONO_INST_NEW (cfg, ins, OP_NOP);
7118 MONO_ADD_INS (prev_cbb, ins);
7120 prev_cbb->next_bb = sbblock;
7121 link_bblock (cfg, prev_cbb, sbblock);
7124 * Get rid of the begin and end bblocks if possible to aid local
7127 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7129 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7130 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7132 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7133 MonoBasicBlock *prev = ebblock->in_bb [0];
7134 mono_merge_basic_blocks (cfg, prev, ebblock);
7136 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7137 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7138 cfg->cbb = prev_cbb;
7142 * Its possible that the rvar is set in some prev bblock, but not in others.
7148 for (i = 0; i < ebblock->in_count; ++i) {
7149 bb = ebblock->in_bb [i];
7151 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7154 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7164 * If the inlined method contains only a throw, then the ret var is not
7165 * set, so set it to a dummy value.
7168 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7170 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7173 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7176 if (cfg->verbose_level > 2)
7177 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7178 cfg->exception_type = MONO_EXCEPTION_NONE;
7179 mono_loader_clear_error ();
7181 /* This gets rid of the newly added bblocks */
7182 cfg->cbb = prev_cbb;
7184 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7189 * Some of these comments may well be out-of-date.
7190 * Design decisions: we do a single pass over the IL code (and we do bblock
7191 * splitting/merging in the few cases when it's required: a back jump to an IL
7192 * address that was not already seen as bblock starting point).
7193 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7194 * Complex operations are decomposed in simpler ones right away. We need to let the
7195 * arch-specific code peek and poke inside this process somehow (except when the
7196 * optimizations can take advantage of the full semantic info of coarse opcodes).
7197 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7198 * MonoInst->opcode initially is the IL opcode or some simplification of that
7199 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7200 * opcode with value bigger than OP_LAST.
7201 * At this point the IR can be handed over to an interpreter, a dumb code generator
7202 * or to the optimizing code generator that will translate it to SSA form.
7204 * Profiling directed optimizations.
7205 * We may compile by default with few or no optimizations and instrument the code
7206 * or the user may indicate what methods to optimize the most either in a config file
7207 * or through repeated runs where the compiler applies offline the optimizations to
7208 * each method and then decides if it was worth it.
7211 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7212 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7213 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7214 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7215 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7216 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7217 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7218 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7220 /* offset from br.s -> br like opcodes */
7221 #define BIG_BRANCH_OFFSET 13
7224 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7226 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7228 return b == NULL || b == bb;
7232 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7234 unsigned char *ip = start;
7235 unsigned char *target;
7238 MonoBasicBlock *bblock;
7239 const MonoOpcode *opcode;
7242 cli_addr = ip - start;
7243 i = mono_opcode_value ((const guint8 **)&ip, end);
7246 opcode = &mono_opcodes [i];
7247 switch (opcode->argument) {
7248 case MonoInlineNone:
7251 case MonoInlineString:
7252 case MonoInlineType:
7253 case MonoInlineField:
7254 case MonoInlineMethod:
7257 case MonoShortInlineR:
7264 case MonoShortInlineVar:
7265 case MonoShortInlineI:
7268 case MonoShortInlineBrTarget:
7269 target = start + cli_addr + 2 + (signed char)ip [1];
7270 GET_BBLOCK (cfg, bblock, target);
7273 GET_BBLOCK (cfg, bblock, ip);
7275 case MonoInlineBrTarget:
7276 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7277 GET_BBLOCK (cfg, bblock, target);
7280 GET_BBLOCK (cfg, bblock, ip);
7282 case MonoInlineSwitch: {
7283 guint32 n = read32 (ip + 1);
7286 cli_addr += 5 + 4 * n;
7287 target = start + cli_addr;
7288 GET_BBLOCK (cfg, bblock, target);
7290 for (j = 0; j < n; ++j) {
7291 target = start + cli_addr + (gint32)read32 (ip);
7292 GET_BBLOCK (cfg, bblock, target);
7302 g_assert_not_reached ();
7305 if (i == CEE_THROW) {
7306 unsigned char *bb_start = ip - 1;
7308 /* Find the start of the bblock containing the throw */
7310 while ((bb_start >= start) && !bblock) {
7311 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7315 bblock->out_of_line = 1;
7325 static inline MonoMethod *
7326 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7330 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7331 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7334 method = mono_class_inflate_generic_method_checked (method, context, &error);
7335 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7338 method = mono_get_method_full (m->klass->image, token, klass, context);
7344 static inline MonoMethod *
7345 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7347 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7349 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7355 static inline MonoClass*
7356 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7361 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7362 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7364 klass = mono_class_inflate_generic_class (klass, context);
7366 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7367 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7370 mono_class_init (klass);
7374 static inline MonoMethodSignature*
7375 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7377 MonoMethodSignature *fsig;
7379 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7380 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7382 fsig = mono_metadata_parse_signature (method->klass->image, token);
7386 fsig = mono_inflate_generic_signature(fsig, context, &error);
7388 g_assert(mono_error_ok(&error));
7394 throw_exception (void)
7396 static MonoMethod *method = NULL;
7399 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7400 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7407 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7409 MonoMethod *thrower = throw_exception ();
7412 EMIT_NEW_PCONST (cfg, args [0], ex);
7413 mono_emit_method_call (cfg, thrower, args, NULL);
7417 * Return the original method is a wrapper is specified. We can only access
7418 * the custom attributes from the original method.
7421 get_original_method (MonoMethod *method)
7423 if (method->wrapper_type == MONO_WRAPPER_NONE)
7426 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7427 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7430 /* in other cases we need to find the original method */
7431 return mono_marshal_method_from_wrapper (method);
7435 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7437 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7438 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7440 emit_throw_exception (cfg, ex);
7444 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7446 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7447 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7449 emit_throw_exception (cfg, ex);
7453 * Check that the IL instructions at ip are the array initialization
7454 * sequence and return the pointer to the data and the size.
7457 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7460 * newarr[System.Int32]
7462 * ldtoken field valuetype ...
7463 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7465 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7467 guint32 token = read32 (ip + 7);
7468 guint32 field_token = read32 (ip + 2);
7469 guint32 field_index = field_token & 0xffffff;
7471 const char *data_ptr;
7473 MonoMethod *cmethod;
7474 MonoClass *dummy_class;
7475 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7479 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7483 *out_field_token = field_token;
7485 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7488 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7490 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7491 case MONO_TYPE_BOOLEAN:
7495 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7496 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7497 case MONO_TYPE_CHAR:
7514 if (size > mono_type_size (field->type, &dummy_align))
7517 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7518 if (!image_is_dynamic (method->klass->image)) {
7519 field_index = read32 (ip + 2) & 0xffffff;
7520 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7521 data_ptr = mono_image_rva_map (method->klass->image, rva);
7522 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7523 /* for aot code we do the lookup on load */
7524 if (aot && data_ptr)
7525 return (const char *)GUINT_TO_POINTER (rva);
7527 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7529 data_ptr = mono_field_get_data (field);
7537 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7539 char *method_fname = mono_method_full_name (method, TRUE);
7541 MonoMethodHeader *header = mono_method_get_header (method);
7543 if (header->code_size == 0)
7544 method_code = g_strdup ("method body is empty.");
7546 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7547 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7548 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7549 g_free (method_fname);
7550 g_free (method_code);
7551 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7555 set_exception_object (MonoCompile *cfg, MonoException *exception)
7557 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7558 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7559 cfg->exception_ptr = exception;
7563 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7566 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7567 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7568 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7569 /* Optimize reg-reg moves away */
7571 * Can't optimize other opcodes, since sp[0] might point to
7572 * the last ins of a decomposed opcode.
7574 sp [0]->dreg = (cfg)->locals [n]->dreg;
7576 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7581 * ldloca inhibits many optimizations so try to get rid of it in common
7584 static inline unsigned char *
7585 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7595 local = read16 (ip + 2);
7599 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7600 /* From the INITOBJ case */
7601 token = read32 (ip + 2);
7602 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7603 CHECK_TYPELOAD (klass);
7604 type = mini_get_underlying_type (&klass->byval_arg);
7605 emit_init_local (cfg, local, type, TRUE);
7613 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp, MonoInst *imt_arg)
7615 MonoInst *icall_args [16];
7616 MonoInst *call_target, *ins;
7618 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7619 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7622 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7624 // FIXME: Pass a vtable to the icalls
7627 slot = mono_method_get_imt_slot (cmethod);
7629 slot = mono_method_get_vtable_index (cmethod);
7631 if (!fsig->generic_param_count && !is_iface && !imt_arg && !is_gsharedvt) {
7633 * The simplest case, a normal virtual call.
7635 int this_reg = sp [0]->dreg;
7636 int vtable_reg = alloc_preg (cfg);
7637 int slot_reg = alloc_preg (cfg);
7638 int addr_reg = alloc_preg (cfg);
7639 int arg_reg = alloc_preg (cfg);
7641 MonoBasicBlock *non_null_bb;
7643 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7644 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7646 /* Load the vtable slot, which contains a function descriptor. */
7647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7649 NEW_BBLOCK (cfg, non_null_bb);
7651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7652 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7656 // FIXME: Make the wrapper use the preserveall cconv
7657 // FIXME: Use one icall per slot for small slot numbers ?
7658 icall_args [0] = sp [0];
7659 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7660 /* Make the icall return the vtable slot value to save some code space */
7661 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7662 ins->dreg = slot_reg;
7663 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7666 MONO_START_BB (cfg, non_null_bb);
7667 /* Load the address + arg from the vtable slot */
7668 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7671 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7674 if (!fsig->generic_param_count && is_iface && !imt_arg && !is_gsharedvt) {
7676 * A simple interface call
7678 * We make a call through an imt slot to obtain the function descriptor we need to call.
7679 * The imt slot contains a function descriptor for a runtime function + arg.
7680 * The slot is already initialized when the vtable is created so there is no need
7683 int this_reg = sp [0]->dreg;
7684 int vtable_reg = alloc_preg (cfg);
7685 int slot_reg = alloc_preg (cfg);
7686 int addr_reg = alloc_preg (cfg);
7687 int arg_reg = alloc_preg (cfg);
7689 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7692 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7694 /* Load the imt slot, which contains a function descriptor. */
7695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7697 /* Load the address + arg of the imt thunk from the imt slot */
7698 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7699 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7701 * IMT thunks in llvm-only mode are C functions which take an info argument
7702 * plus the imt method and return the ftndesc to call.
7704 icall_args [0] = thunk_arg_ins;
7705 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
7706 icall_args [1] = ins;
7707 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7709 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7712 if (fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7714 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7715 * dynamically extended as more instantiations are discovered.
7717 int this_reg = sp [0]->dreg;
7718 int vtable_reg = alloc_preg (cfg);
7719 int slot_reg = alloc_preg (cfg);
7720 int addr_reg = alloc_preg (cfg);
7721 int arg_reg = alloc_preg (cfg);
7722 int ftndesc_reg = alloc_preg (cfg);
7724 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7725 MonoBasicBlock *slowpath_bb, *end_bb;
7727 NEW_BBLOCK (cfg, slowpath_bb);
7728 NEW_BBLOCK (cfg, end_bb);
7730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7731 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7733 /* Load the imt slot, which contains a function descriptor. */
7734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7736 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7737 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7742 /* Same as with iface calls */
7743 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7744 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7745 icall_args [0] = thunk_arg_ins;
7746 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7747 cmethod, MONO_RGCTX_INFO_METHOD);
7748 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
7749 ftndesc_ins->dreg = ftndesc_reg;
7751 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7752 * they don't know about yet. Fall back to the slowpath in that case.
7754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7757 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7760 MONO_START_BB (cfg, slowpath_bb);
7761 icall_args [0] = sp [0];
7762 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7763 imt_arg = emit_get_rgctx_method (cfg, context_used,
7764 cmethod, MONO_RGCTX_INFO_METHOD);
7765 icall_args [2] = imt_arg;
7766 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7767 ftndesc_ins->dreg = ftndesc_reg;
7768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7771 MONO_START_BB (cfg, end_bb);
7772 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7775 // FIXME: Optimize this
7777 icall_args [0] = sp [0];
7778 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7780 if (fsig->generic_param_count) {
7781 /* virtual generic call */
7782 g_assert (!imt_arg);
7783 /* Same as the virtual generic case above */
7784 imt_arg = emit_get_rgctx_method (cfg, context_used,
7785 cmethod, MONO_RGCTX_INFO_METHOD);
7786 icall_args [2] = imt_arg;
7787 } else if (imt_arg) {
7788 icall_args [2] = imt_arg;
7790 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
7791 icall_args [2] = ins;
7794 // FIXME: For generic virtual calls, avoid computing the rgctx twice
7796 arg_reg = alloc_preg (cfg);
7797 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7798 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7800 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
7802 * We handle virtual calls made from gsharedvt methods here instead
7803 * of the gsharedvt block above.
7806 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7808 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7811 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
7813 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
7817 * Pass the extra argument even if the callee doesn't receive it, most
7818 * calling conventions allow this.
7820 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7824 is_exception_class (MonoClass *klass)
7827 if (klass == mono_defaults.exception_class)
7829 klass = klass->parent;
7835 * is_jit_optimizer_disabled:
7837 * Determine whenever M's assembly has a DebuggableAttribute with the
7838 * IsJITOptimizerDisabled flag set.
7841 is_jit_optimizer_disabled (MonoMethod *m)
7843 MonoAssembly *ass = m->klass->image->assembly;
7844 MonoCustomAttrInfo* attrs;
7845 static MonoClass *klass;
7847 gboolean val = FALSE;
7850 if (ass->jit_optimizer_disabled_inited)
7851 return ass->jit_optimizer_disabled;
7854 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7857 ass->jit_optimizer_disabled = FALSE;
7858 mono_memory_barrier ();
7859 ass->jit_optimizer_disabled_inited = TRUE;
7863 attrs = mono_custom_attrs_from_assembly (ass);
7865 for (i = 0; i < attrs->num_attrs; ++i) {
7866 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7868 MonoMethodSignature *sig;
7870 if (!attr->ctor || attr->ctor->klass != klass)
7872 /* Decode the attribute. See reflection.c */
7873 p = (const char*)attr->data;
7874 g_assert (read16 (p) == 0x0001);
7877 // FIXME: Support named parameters
7878 sig = mono_method_signature (attr->ctor);
7879 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7881 /* Two boolean arguments */
7885 mono_custom_attrs_free (attrs);
7888 ass->jit_optimizer_disabled = val;
7889 mono_memory_barrier ();
7890 ass->jit_optimizer_disabled_inited = TRUE;
7896 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7898 gboolean supported_tail_call;
7901 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7903 for (i = 0; i < fsig->param_count; ++i) {
7904 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7905 /* These can point to the current method's stack */
7906 supported_tail_call = FALSE;
7908 if (fsig->hasthis && cmethod->klass->valuetype)
7909 /* this might point to the current method's stack */
7910 supported_tail_call = FALSE;
7911 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7912 supported_tail_call = FALSE;
7913 if (cfg->method->save_lmf)
7914 supported_tail_call = FALSE;
7915 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7916 supported_tail_call = FALSE;
7917 if (call_opcode != CEE_CALL)
7918 supported_tail_call = FALSE;
7920 /* Debugging support */
7922 if (supported_tail_call) {
7923 if (!mono_debug_count ())
7924 supported_tail_call = FALSE;
7928 return supported_tail_call;
7934 * Handle calls made to ctors from NEWOBJ opcodes.
7937 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7938 MonoInst **sp, guint8 *ip, int *inline_costs)
7940 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7942 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7943 mono_method_is_generic_sharable (cmethod, TRUE)) {
7944 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7945 mono_class_vtable (cfg->domain, cmethod->klass);
7946 CHECK_TYPELOAD (cmethod->klass);
7948 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7949 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7952 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7953 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7955 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7957 CHECK_TYPELOAD (cmethod->klass);
7958 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7963 /* Avoid virtual calls to ctors if possible */
7964 if (mono_class_is_marshalbyref (cmethod->klass))
7965 callvirt_this_arg = sp [0];
7967 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7968 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7969 CHECK_CFG_EXCEPTION;
7970 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7971 mono_method_check_inlining (cfg, cmethod) &&
7972 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7975 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7976 cfg->real_offset += 5;
7978 *inline_costs += costs - 5;
7980 INLINE_FAILURE ("inline failure");
7981 // FIXME-VT: Clean this up
7982 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7983 GSHAREDVT_FAILURE(*ip);
7984 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7986 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7989 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7991 if (cfg->llvm_only) {
7992 // FIXME: Avoid initializing vtable_arg
7993 emit_llvmonly_calli (cfg, fsig, sp, addr);
7995 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7997 } else if (context_used &&
7998 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7999 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8000 MonoInst *cmethod_addr;
8002 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8004 if (cfg->llvm_only) {
8005 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8006 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8007 emit_llvmonly_calli (cfg, fsig, sp, addr);
8009 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8010 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8012 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8015 INLINE_FAILURE ("ctor call");
8016 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8017 callvirt_this_arg, NULL, vtable_arg);
8024 emit_setret (MonoCompile *cfg, MonoInst *val)
8026 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8029 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8032 if (!cfg->vret_addr) {
8033 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8035 EMIT_NEW_RETLOADA (cfg, ret_addr);
8037 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8038 ins->klass = mono_class_from_mono_type (ret_type);
8041 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8042 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8043 MonoInst *iargs [1];
8047 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8048 mono_arch_emit_setret (cfg, cfg->method, conv);
8050 mono_arch_emit_setret (cfg, cfg->method, val);
8053 mono_arch_emit_setret (cfg, cfg->method, val);
8059 * mono_method_to_ir:
8061 * Translate the .net IL into linear IR.
8064 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8065 MonoInst *return_var, MonoInst **inline_args,
8066 guint inline_offset, gboolean is_virtual_call)
8069 MonoInst *ins, **sp, **stack_start;
8070 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8071 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8072 MonoMethod *cmethod, *method_definition;
8073 MonoInst **arg_array;
8074 MonoMethodHeader *header;
8076 guint32 token, ins_flag;
8078 MonoClass *constrained_class = NULL;
8079 unsigned char *ip, *end, *target, *err_pos;
8080 MonoMethodSignature *sig;
8081 MonoGenericContext *generic_context = NULL;
8082 MonoGenericContainer *generic_container = NULL;
8083 MonoType **param_types;
8084 int i, n, start_new_bblock, dreg;
8085 int num_calls = 0, inline_costs = 0;
8086 int breakpoint_id = 0;
8088 GSList *class_inits = NULL;
8089 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8091 gboolean init_locals, seq_points, skip_dead_blocks;
8092 gboolean sym_seq_points = FALSE;
8093 MonoDebugMethodInfo *minfo;
8094 MonoBitSet *seq_point_locs = NULL;
8095 MonoBitSet *seq_point_set_locs = NULL;
8097 cfg->disable_inline = is_jit_optimizer_disabled (method);
8099 /* serialization and xdomain stuff may need access to private fields and methods */
8100 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8101 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8102 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8103 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8104 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8105 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8107 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8108 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8109 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8110 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8111 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8113 image = method->klass->image;
8114 header = mono_method_get_header (method);
8116 MonoLoaderError *error;
8118 if ((error = mono_loader_get_last_error ())) {
8119 mono_cfg_set_exception (cfg, error->exception_type);
8121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
8122 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
8124 goto exception_exit;
8126 generic_container = mono_method_get_generic_container (method);
8127 sig = mono_method_signature (method);
8128 num_args = sig->hasthis + sig->param_count;
8129 ip = (unsigned char*)header->code;
8130 cfg->cil_start = ip;
8131 end = ip + header->code_size;
8132 cfg->stat_cil_code_size += header->code_size;
8134 seq_points = cfg->gen_seq_points && cfg->method == method;
8136 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8137 /* We could hit a seq point before attaching to the JIT (#8338) */
8141 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8142 minfo = mono_debug_lookup_method (method);
8144 MonoSymSeqPoint *sps;
8145 int i, n_il_offsets;
8147 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8148 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8149 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8150 sym_seq_points = TRUE;
8151 for (i = 0; i < n_il_offsets; ++i) {
8152 if (sps [i].il_offset < header->code_size)
8153 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8156 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8157 /* Methods without line number info like auto-generated property accessors */
8158 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8159 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8160 sym_seq_points = TRUE;
8165 * Methods without init_locals set could cause asserts in various passes
8166 * (#497220). To work around this, we emit dummy initialization opcodes
8167 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8168 * on some platforms.
8170 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8171 init_locals = header->init_locals;
8175 method_definition = method;
8176 while (method_definition->is_inflated) {
8177 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8178 method_definition = imethod->declaring;
8181 /* SkipVerification is not allowed if core-clr is enabled */
8182 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8184 dont_verify_stloc = TRUE;
8187 if (sig->is_inflated)
8188 generic_context = mono_method_get_context (method);
8189 else if (generic_container)
8190 generic_context = &generic_container->context;
8191 cfg->generic_context = generic_context;
8194 g_assert (!sig->has_type_parameters);
8196 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8197 g_assert (method->is_inflated);
8198 g_assert (mono_method_get_context (method)->method_inst);
8200 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8201 g_assert (sig->generic_param_count);
8203 if (cfg->method == method) {
8204 cfg->real_offset = 0;
8206 cfg->real_offset = inline_offset;
8209 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8210 cfg->cil_offset_to_bb_len = header->code_size;
8212 cfg->current_method = method;
8214 if (cfg->verbose_level > 2)
8215 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8217 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8219 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8220 for (n = 0; n < sig->param_count; ++n)
8221 param_types [n + sig->hasthis] = sig->params [n];
8222 cfg->arg_types = param_types;
8224 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8225 if (cfg->method == method) {
8227 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8228 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8231 NEW_BBLOCK (cfg, start_bblock);
8232 cfg->bb_entry = start_bblock;
8233 start_bblock->cil_code = NULL;
8234 start_bblock->cil_length = 0;
8237 NEW_BBLOCK (cfg, end_bblock);
8238 cfg->bb_exit = end_bblock;
8239 end_bblock->cil_code = NULL;
8240 end_bblock->cil_length = 0;
8241 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8242 g_assert (cfg->num_bblocks == 2);
8244 arg_array = cfg->args;
8246 if (header->num_clauses) {
8247 cfg->spvars = g_hash_table_new (NULL, NULL);
8248 cfg->exvars = g_hash_table_new (NULL, NULL);
8250 /* handle exception clauses */
8251 for (i = 0; i < header->num_clauses; ++i) {
8252 MonoBasicBlock *try_bb;
8253 MonoExceptionClause *clause = &header->clauses [i];
8254 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8256 try_bb->real_offset = clause->try_offset;
8257 try_bb->try_start = TRUE;
8258 try_bb->region = ((i + 1) << 8) | clause->flags;
8259 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8260 tblock->real_offset = clause->handler_offset;
8261 tblock->flags |= BB_EXCEPTION_HANDLER;
8264 * Linking the try block with the EH block hinders inlining as we won't be able to
8265 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8267 if (COMPILE_LLVM (cfg))
8268 link_bblock (cfg, try_bb, tblock);
8270 if (*(ip + clause->handler_offset) == CEE_POP)
8271 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8273 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8274 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8275 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8276 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8277 MONO_ADD_INS (tblock, ins);
8279 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8280 /* finally clauses already have a seq point */
8281 /* seq points for filter clauses are emitted below */
8282 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8283 MONO_ADD_INS (tblock, ins);
8286 /* todo: is a fault block unsafe to optimize? */
8287 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8288 tblock->flags |= BB_EXCEPTION_UNSAFE;
8291 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8293 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8295 /* catch and filter blocks get the exception object on the stack */
8296 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8297 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8299 /* mostly like handle_stack_args (), but just sets the input args */
8300 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8301 tblock->in_scount = 1;
8302 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8303 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8307 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8308 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8309 if (!cfg->compile_llvm) {
8310 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8311 ins->dreg = tblock->in_stack [0]->dreg;
8312 MONO_ADD_INS (tblock, ins);
8315 MonoInst *dummy_use;
8318 * Add a dummy use for the exvar so its liveness info will be
8321 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8324 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8325 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8326 MONO_ADD_INS (tblock, ins);
8329 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8330 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8331 tblock->flags |= BB_EXCEPTION_HANDLER;
8332 tblock->real_offset = clause->data.filter_offset;
8333 tblock->in_scount = 1;
8334 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8335 /* The filter block shares the exvar with the handler block */
8336 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8337 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8338 MONO_ADD_INS (tblock, ins);
8342 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8343 clause->data.catch_class &&
8345 mono_class_check_context_used (clause->data.catch_class)) {
8347 * In shared generic code with catch
8348 * clauses containing type variables
8349 * the exception handling code has to
8350 * be able to get to the rgctx.
8351 * Therefore we have to make sure that
8352 * the vtable/mrgctx argument (for
8353 * static or generic methods) or the
8354 * "this" argument (for non-static
8355 * methods) are live.
8357 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8358 mini_method_get_context (method)->method_inst ||
8359 method->klass->valuetype) {
8360 mono_get_vtable_var (cfg);
8362 MonoInst *dummy_use;
8364 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8369 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8370 cfg->cbb = start_bblock;
8371 cfg->args = arg_array;
8372 mono_save_args (cfg, sig, inline_args);
8375 /* FIRST CODE BLOCK */
8376 NEW_BBLOCK (cfg, tblock);
8377 tblock->cil_code = ip;
8381 ADD_BBLOCK (cfg, tblock);
8383 if (cfg->method == method) {
8384 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8385 if (breakpoint_id) {
8386 MONO_INST_NEW (cfg, ins, OP_BREAK);
8387 MONO_ADD_INS (cfg->cbb, ins);
8391 /* we use a separate basic block for the initialization code */
8392 NEW_BBLOCK (cfg, init_localsbb);
8393 cfg->bb_init = init_localsbb;
8394 init_localsbb->real_offset = cfg->real_offset;
8395 start_bblock->next_bb = init_localsbb;
8396 init_localsbb->next_bb = cfg->cbb;
8397 link_bblock (cfg, start_bblock, init_localsbb);
8398 link_bblock (cfg, init_localsbb, cfg->cbb);
8400 cfg->cbb = init_localsbb;
8402 if (cfg->gsharedvt && cfg->method == method) {
8403 MonoGSharedVtMethodInfo *info;
8404 MonoInst *var, *locals_var;
8407 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8408 info->method = cfg->method;
8409 info->count_entries = 16;
8410 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8411 cfg->gsharedvt_info = info;
8413 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8414 /* prevent it from being register allocated */
8415 //var->flags |= MONO_INST_VOLATILE;
8416 cfg->gsharedvt_info_var = var;
8418 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8419 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8421 /* Allocate locals */
8422 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8423 /* prevent it from being register allocated */
8424 //locals_var->flags |= MONO_INST_VOLATILE;
8425 cfg->gsharedvt_locals_var = locals_var;
8427 dreg = alloc_ireg (cfg);
8428 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8430 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8431 ins->dreg = locals_var->dreg;
8433 MONO_ADD_INS (cfg->cbb, ins);
8434 cfg->gsharedvt_locals_var_ins = ins;
8436 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8439 ins->flags |= MONO_INST_INIT;
8443 if (mono_security_core_clr_enabled ()) {
8444 /* check if this is native code, e.g. an icall or a p/invoke */
8445 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8446 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8448 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8449 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8451 /* if this ia a native call then it can only be JITted from platform code */
8452 if ((icall || pinvk) && method->klass && method->klass->image) {
8453 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8454 MonoException *ex = icall ? mono_get_exception_security () :
8455 mono_get_exception_method_access ();
8456 emit_throw_exception (cfg, ex);
8463 CHECK_CFG_EXCEPTION;
8465 if (header->code_size == 0)
8468 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8473 if (cfg->method == method)
8474 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8476 for (n = 0; n < header->num_locals; ++n) {
8477 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8482 /* We force the vtable variable here for all shared methods
8483 for the possibility that they might show up in a stack
8484 trace where their exact instantiation is needed. */
8485 if (cfg->gshared && method == cfg->method) {
8486 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8487 mini_method_get_context (method)->method_inst ||
8488 method->klass->valuetype) {
8489 mono_get_vtable_var (cfg);
8491 /* FIXME: Is there a better way to do this?
8492 We need the variable live for the duration
8493 of the whole method. */
8494 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8498 /* add a check for this != NULL to inlined methods */
8499 if (is_virtual_call) {
8502 NEW_ARGLOAD (cfg, arg_ins, 0);
8503 MONO_ADD_INS (cfg->cbb, arg_ins);
8504 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8507 skip_dead_blocks = !dont_verify;
8508 if (skip_dead_blocks) {
8509 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8514 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8515 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8518 start_new_bblock = 0;
8520 if (cfg->method == method)
8521 cfg->real_offset = ip - header->code;
8523 cfg->real_offset = inline_offset;
8528 if (start_new_bblock) {
8529 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8530 if (start_new_bblock == 2) {
8531 g_assert (ip == tblock->cil_code);
8533 GET_BBLOCK (cfg, tblock, ip);
8535 cfg->cbb->next_bb = tblock;
8537 start_new_bblock = 0;
8538 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8539 if (cfg->verbose_level > 3)
8540 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8541 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8545 g_slist_free (class_inits);
8548 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8549 link_bblock (cfg, cfg->cbb, tblock);
8550 if (sp != stack_start) {
8551 handle_stack_args (cfg, stack_start, sp - stack_start);
8553 CHECK_UNVERIFIABLE (cfg);
8555 cfg->cbb->next_bb = tblock;
8557 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8558 if (cfg->verbose_level > 3)
8559 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8560 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8563 g_slist_free (class_inits);
8568 if (skip_dead_blocks) {
8569 int ip_offset = ip - header->code;
8571 if (ip_offset == bb->end)
8575 int op_size = mono_opcode_size (ip, end);
8576 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8578 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8580 if (ip_offset + op_size == bb->end) {
8581 MONO_INST_NEW (cfg, ins, OP_NOP);
8582 MONO_ADD_INS (cfg->cbb, ins);
8583 start_new_bblock = 1;
8591 * Sequence points are points where the debugger can place a breakpoint.
8592 * Currently, we generate these automatically at points where the IL
8595 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8597 * Make methods interruptable at the beginning, and at the targets of
8598 * backward branches.
8599 * Also, do this at the start of every bblock in methods with clauses too,
8600 * to be able to handle instructions with inprecise control flow like
8602 * Backward branches are handled at the end of method-to-ir ().
8604 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8605 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8607 /* Avoid sequence points on empty IL like .volatile */
8608 // FIXME: Enable this
8609 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8610 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8611 if ((sp != stack_start) && !sym_seq_point)
8612 ins->flags |= MONO_INST_NONEMPTY_STACK;
8613 MONO_ADD_INS (cfg->cbb, ins);
8616 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8619 cfg->cbb->real_offset = cfg->real_offset;
8621 if ((cfg->method == method) && cfg->coverage_info) {
8622 guint32 cil_offset = ip - header->code;
8623 cfg->coverage_info->data [cil_offset].cil_code = ip;
8625 /* TODO: Use an increment here */
8626 #if defined(TARGET_X86)
8627 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8628 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8630 MONO_ADD_INS (cfg->cbb, ins);
8632 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8637 if (cfg->verbose_level > 3)
8638 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8642 if (seq_points && !sym_seq_points && sp != stack_start) {
8644 * The C# compiler uses these nops to notify the JIT that it should
8645 * insert seq points.
8647 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8648 MONO_ADD_INS (cfg->cbb, ins);
8650 if (cfg->keep_cil_nops)
8651 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8653 MONO_INST_NEW (cfg, ins, OP_NOP);
8655 MONO_ADD_INS (cfg->cbb, ins);
8658 if (should_insert_brekpoint (cfg->method)) {
8659 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8661 MONO_INST_NEW (cfg, ins, OP_NOP);
8664 MONO_ADD_INS (cfg->cbb, ins);
8670 CHECK_STACK_OVF (1);
8671 n = (*ip)-CEE_LDARG_0;
8673 EMIT_NEW_ARGLOAD (cfg, ins, n);
8681 CHECK_STACK_OVF (1);
8682 n = (*ip)-CEE_LDLOC_0;
8684 EMIT_NEW_LOCLOAD (cfg, ins, n);
8693 n = (*ip)-CEE_STLOC_0;
8696 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8698 emit_stloc_ir (cfg, sp, header, n);
8705 CHECK_STACK_OVF (1);
8708 EMIT_NEW_ARGLOAD (cfg, ins, n);
8714 CHECK_STACK_OVF (1);
8717 NEW_ARGLOADA (cfg, ins, n);
8718 MONO_ADD_INS (cfg->cbb, ins);
8728 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8730 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8735 CHECK_STACK_OVF (1);
8738 EMIT_NEW_LOCLOAD (cfg, ins, n);
8742 case CEE_LDLOCA_S: {
8743 unsigned char *tmp_ip;
8745 CHECK_STACK_OVF (1);
8746 CHECK_LOCAL (ip [1]);
8748 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8754 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8763 CHECK_LOCAL (ip [1]);
8764 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8766 emit_stloc_ir (cfg, sp, header, ip [1]);
8771 CHECK_STACK_OVF (1);
8772 EMIT_NEW_PCONST (cfg, ins, NULL);
8773 ins->type = STACK_OBJ;
8778 CHECK_STACK_OVF (1);
8779 EMIT_NEW_ICONST (cfg, ins, -1);
8792 CHECK_STACK_OVF (1);
8793 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8799 CHECK_STACK_OVF (1);
8801 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8807 CHECK_STACK_OVF (1);
8808 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8814 CHECK_STACK_OVF (1);
8815 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8816 ins->type = STACK_I8;
8817 ins->dreg = alloc_dreg (cfg, STACK_I8);
8819 ins->inst_l = (gint64)read64 (ip);
8820 MONO_ADD_INS (cfg->cbb, ins);
8826 gboolean use_aotconst = FALSE;
8828 #ifdef TARGET_POWERPC
8829 /* FIXME: Clean this up */
8830 if (cfg->compile_aot)
8831 use_aotconst = TRUE;
8834 /* FIXME: we should really allocate this only late in the compilation process */
8835 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8837 CHECK_STACK_OVF (1);
8843 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8845 dreg = alloc_freg (cfg);
8846 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8847 ins->type = cfg->r4_stack_type;
8849 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8850 ins->type = cfg->r4_stack_type;
8851 ins->dreg = alloc_dreg (cfg, STACK_R8);
8853 MONO_ADD_INS (cfg->cbb, ins);
8863 gboolean use_aotconst = FALSE;
8865 #ifdef TARGET_POWERPC
8866 /* FIXME: Clean this up */
8867 if (cfg->compile_aot)
8868 use_aotconst = TRUE;
8871 /* FIXME: we should really allocate this only late in the compilation process */
8872 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8874 CHECK_STACK_OVF (1);
8880 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8882 dreg = alloc_freg (cfg);
8883 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8884 ins->type = STACK_R8;
8886 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8887 ins->type = STACK_R8;
8888 ins->dreg = alloc_dreg (cfg, STACK_R8);
8890 MONO_ADD_INS (cfg->cbb, ins);
8899 MonoInst *temp, *store;
8901 CHECK_STACK_OVF (1);
8905 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8906 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8908 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8911 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8924 if (sp [0]->type == STACK_R8)
8925 /* we need to pop the value from the x86 FP stack */
8926 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8931 MonoMethodSignature *fsig;
8934 INLINE_FAILURE ("jmp");
8935 GSHAREDVT_FAILURE (*ip);
8938 if (stack_start != sp)
8940 token = read32 (ip + 1);
8941 /* FIXME: check the signature matches */
8942 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8944 if (!cmethod || mono_loader_get_last_error ())
8947 if (cfg->gshared && mono_method_check_context_used (cmethod))
8948 GENERIC_SHARING_FAILURE (CEE_JMP);
8950 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8952 fsig = mono_method_signature (cmethod);
8953 n = fsig->param_count + fsig->hasthis;
8954 if (cfg->llvm_only) {
8957 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8958 for (i = 0; i < n; ++i)
8959 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8960 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8962 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8963 * have to emit a normal return since llvm expects it.
8966 emit_setret (cfg, ins);
8967 MONO_INST_NEW (cfg, ins, OP_BR);
8968 ins->inst_target_bb = end_bblock;
8969 MONO_ADD_INS (cfg->cbb, ins);
8970 link_bblock (cfg, cfg->cbb, end_bblock);
8973 } else if (cfg->backend->have_op_tail_call) {
8974 /* Handle tail calls similarly to calls */
8977 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8978 call->method = cmethod;
8979 call->tail_call = TRUE;
8980 call->signature = mono_method_signature (cmethod);
8981 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8982 call->inst.inst_p0 = cmethod;
8983 for (i = 0; i < n; ++i)
8984 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8986 mono_arch_emit_call (cfg, call);
8987 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8988 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8990 for (i = 0; i < num_args; ++i)
8991 /* Prevent arguments from being optimized away */
8992 arg_array [i]->flags |= MONO_INST_VOLATILE;
8994 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8995 ins = (MonoInst*)call;
8996 ins->inst_p0 = cmethod;
8997 MONO_ADD_INS (cfg->cbb, ins);
9001 start_new_bblock = 1;
9006 MonoMethodSignature *fsig;
9009 token = read32 (ip + 1);
9013 //GSHAREDVT_FAILURE (*ip);
9018 fsig = mini_get_signature (method, token, generic_context);
9020 if (method->dynamic && fsig->pinvoke) {
9024 * This is a call through a function pointer using a pinvoke
9025 * signature. Have to create a wrapper and call that instead.
9026 * FIXME: This is very slow, need to create a wrapper at JIT time
9027 * instead based on the signature.
9029 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9030 EMIT_NEW_PCONST (cfg, args [1], fsig);
9032 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9035 n = fsig->param_count + fsig->hasthis;
9039 //g_assert (!virtual_ || fsig->hasthis);
9043 inline_costs += 10 * num_calls++;
9046 * Making generic calls out of gsharedvt methods.
9047 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9048 * patching gshared method addresses into a gsharedvt method.
9050 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9052 * We pass the address to the gsharedvt trampoline in the rgctx reg
9054 MonoInst *callee = addr;
9056 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9058 GSHAREDVT_FAILURE (*ip);
9062 GSHAREDVT_FAILURE (*ip);
9064 addr = emit_get_rgctx_sig (cfg, context_used,
9065 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9066 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9070 /* Prevent inlining of methods with indirect calls */
9071 INLINE_FAILURE ("indirect call");
9073 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9074 MonoJumpInfoType info_type;
9078 * Instead of emitting an indirect call, emit a direct call
9079 * with the contents of the aotconst as the patch info.
9081 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9082 info_type = (MonoJumpInfoType)addr->inst_c1;
9083 info_data = addr->inst_p0;
9085 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9086 info_data = addr->inst_right->inst_left;
9089 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9090 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9095 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9099 /* End of call, INS should contain the result of the call, if any */
9101 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9103 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9106 CHECK_CFG_EXCEPTION;
9110 constrained_class = NULL;
9114 case CEE_CALLVIRT: {
9115 MonoInst *addr = NULL;
9116 MonoMethodSignature *fsig = NULL;
9118 int virtual_ = *ip == CEE_CALLVIRT;
9119 gboolean pass_imt_from_rgctx = FALSE;
9120 MonoInst *imt_arg = NULL;
9121 MonoInst *keep_this_alive = NULL;
9122 gboolean pass_vtable = FALSE;
9123 gboolean pass_mrgctx = FALSE;
9124 MonoInst *vtable_arg = NULL;
9125 gboolean check_this = FALSE;
9126 gboolean supported_tail_call = FALSE;
9127 gboolean tail_call = FALSE;
9128 gboolean need_seq_point = FALSE;
9129 guint32 call_opcode = *ip;
9130 gboolean emit_widen = TRUE;
9131 gboolean push_res = TRUE;
9132 gboolean skip_ret = FALSE;
9133 gboolean delegate_invoke = FALSE;
9134 gboolean direct_icall = FALSE;
9135 gboolean constrained_partial_call = FALSE;
9136 MonoMethod *cil_method;
9139 token = read32 (ip + 1);
9143 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9144 cil_method = cmethod;
9146 if (constrained_class) {
9147 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9148 if (!mini_is_gsharedvt_klass (constrained_class)) {
9149 g_assert (!cmethod->klass->valuetype);
9150 if (!mini_type_is_reference (&constrained_class->byval_arg))
9151 constrained_partial_call = TRUE;
9155 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9156 if (cfg->verbose_level > 2)
9157 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9158 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9159 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9161 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9165 if (cfg->verbose_level > 2)
9166 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9168 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9170 * This is needed since get_method_constrained can't find
9171 * the method in klass representing a type var.
9172 * The type var is guaranteed to be a reference type in this
9175 if (!mini_is_gsharedvt_klass (constrained_class))
9176 g_assert (!cmethod->klass->valuetype);
9178 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9184 if (!cmethod || mono_loader_get_last_error ())
9186 if (!dont_verify && !cfg->skip_visibility) {
9187 MonoMethod *target_method = cil_method;
9188 if (method->is_inflated) {
9189 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
9191 if (!mono_method_can_access_method (method_definition, target_method) &&
9192 !mono_method_can_access_method (method, cil_method))
9193 METHOD_ACCESS_FAILURE (method, cil_method);
9196 if (mono_security_core_clr_enabled ())
9197 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9199 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9200 /* MS.NET seems to silently convert this to a callvirt */
9205 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9206 * converts to a callvirt.
9208 * tests/bug-515884.il is an example of this behavior
9210 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9211 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9212 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9216 if (!cmethod->klass->inited)
9217 if (!mono_class_init (cmethod->klass))
9218 TYPE_LOAD_ERROR (cmethod->klass);
9220 fsig = mono_method_signature (cmethod);
9223 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9224 mini_class_is_system_array (cmethod->klass)) {
9225 array_rank = cmethod->klass->rank;
9226 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9227 direct_icall = TRUE;
9228 } else if (fsig->pinvoke) {
9229 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9230 fsig = mono_method_signature (wrapper);
9231 } else if (constrained_class) {
9233 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9237 if (cfg->llvm_only && !cfg->method->wrapper_type)
9238 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9240 /* See code below */
9241 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9242 MonoBasicBlock *tbb;
9244 GET_BBLOCK (cfg, tbb, ip + 5);
9245 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9247 * We want to extend the try block to cover the call, but we can't do it if the
9248 * call is made directly since its followed by an exception check.
9250 direct_icall = FALSE;
9254 mono_save_token_info (cfg, image, token, cil_method);
9256 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9257 need_seq_point = TRUE;
9259 /* Don't support calls made using type arguments for now */
9261 if (cfg->gsharedvt) {
9262 if (mini_is_gsharedvt_signature (fsig))
9263 GSHAREDVT_FAILURE (*ip);
9267 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9268 g_assert_not_reached ();
9270 n = fsig->param_count + fsig->hasthis;
9272 if (!cfg->gshared && cmethod->klass->generic_container)
9276 g_assert (!mono_method_check_context_used (cmethod));
9280 //g_assert (!virtual_ || fsig->hasthis);
9285 * We have the `constrained.' prefix opcode.
9287 if (constrained_class) {
9288 if (mini_is_gsharedvt_klass (constrained_class)) {
9289 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9290 /* The 'Own method' case below */
9291 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9292 /* 'The type parameter is instantiated as a reference type' case below. */
9294 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9295 CHECK_CFG_EXCEPTION;
9301 if (constrained_partial_call) {
9302 gboolean need_box = TRUE;
9305 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9306 * called method is not known at compile time either. The called method could end up being
9307 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9308 * to box the receiver.
9309 * A simple solution would be to box always and make a normal virtual call, but that would
9310 * be bad performance wise.
9312 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9314 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9319 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9320 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9321 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9322 ins->klass = constrained_class;
9323 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9324 CHECK_CFG_EXCEPTION;
9325 } else if (need_box) {
9327 MonoBasicBlock *is_ref_bb, *end_bb;
9328 MonoInst *nonbox_call;
9331 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9333 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9334 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9336 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9338 NEW_BBLOCK (cfg, is_ref_bb);
9339 NEW_BBLOCK (cfg, end_bb);
9341 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9346 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9351 MONO_START_BB (cfg, is_ref_bb);
9352 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9353 ins->klass = constrained_class;
9354 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9355 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9359 MONO_START_BB (cfg, end_bb);
9362 nonbox_call->dreg = ins->dreg;
9365 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9366 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9367 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9370 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9372 * The type parameter is instantiated as a valuetype,
9373 * but that type doesn't override the method we're
9374 * calling, so we need to box `this'.
9376 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9377 ins->klass = constrained_class;
9378 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9379 CHECK_CFG_EXCEPTION;
9380 } else if (!constrained_class->valuetype) {
9381 int dreg = alloc_ireg_ref (cfg);
9384 * The type parameter is instantiated as a reference
9385 * type. We have a managed pointer on the stack, so
9386 * we need to dereference it here.
9388 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9389 ins->type = STACK_OBJ;
9392 if (cmethod->klass->valuetype) {
9395 /* Interface method */
9398 mono_class_setup_vtable (constrained_class);
9399 CHECK_TYPELOAD (constrained_class);
9400 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9402 TYPE_LOAD_ERROR (constrained_class);
9403 slot = mono_method_get_vtable_slot (cmethod);
9405 TYPE_LOAD_ERROR (cmethod->klass);
9406 cmethod = constrained_class->vtable [ioffset + slot];
9408 if (cmethod->klass == mono_defaults.enum_class) {
9409 /* Enum implements some interfaces, so treat this as the first case */
9410 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9411 ins->klass = constrained_class;
9412 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9413 CHECK_CFG_EXCEPTION;
9418 constrained_class = NULL;
9421 if (check_call_signature (cfg, fsig, sp))
9424 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9425 delegate_invoke = TRUE;
9427 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9428 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9429 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9437 * If the callee is a shared method, then its static cctor
9438 * might not get called after the call was patched.
9440 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9441 emit_class_init (cfg, cmethod->klass);
9442 CHECK_TYPELOAD (cmethod->klass);
9445 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9448 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9450 context_used = mini_method_check_context_used (cfg, cmethod);
9452 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9453 /* Generic method interface
9454 calls are resolved via a
9455 helper function and don't
9457 if (!cmethod_context || !cmethod_context->method_inst)
9458 pass_imt_from_rgctx = TRUE;
9462 * If a shared method calls another
9463 * shared method then the caller must
9464 * have a generic sharing context
9465 * because the magic trampoline
9466 * requires it. FIXME: We shouldn't
9467 * have to force the vtable/mrgctx
9468 * variable here. Instead there
9469 * should be a flag in the cfg to
9470 * request a generic sharing context.
9473 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9474 mono_get_vtable_var (cfg);
9479 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9481 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9483 CHECK_TYPELOAD (cmethod->klass);
9484 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9489 g_assert (!vtable_arg);
9491 if (!cfg->compile_aot) {
9493 * emit_get_rgctx_method () calls mono_class_vtable () so check
9494 * for type load errors before.
9496 mono_class_setup_vtable (cmethod->klass);
9497 CHECK_TYPELOAD (cmethod->klass);
9500 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9502 /* !marshalbyref is needed to properly handle generic methods + remoting */
9503 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9504 MONO_METHOD_IS_FINAL (cmethod)) &&
9505 !mono_class_is_marshalbyref (cmethod->klass)) {
9512 if (pass_imt_from_rgctx) {
9513 g_assert (!pass_vtable);
9515 imt_arg = emit_get_rgctx_method (cfg, context_used,
9516 cmethod, MONO_RGCTX_INFO_METHOD);
9520 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9522 /* Calling virtual generic methods */
9523 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9524 !(MONO_METHOD_IS_FINAL (cmethod) &&
9525 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9526 fsig->generic_param_count &&
9527 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9529 MonoInst *this_temp, *this_arg_temp, *store;
9530 MonoInst *iargs [4];
9532 g_assert (fsig->is_inflated);
9534 /* Prevent inlining of methods that contain indirect calls */
9535 INLINE_FAILURE ("virtual generic call");
9537 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9538 GSHAREDVT_FAILURE (*ip);
9540 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9541 g_assert (!imt_arg);
9543 g_assert (cmethod->is_inflated);
9544 imt_arg = emit_get_rgctx_method (cfg, context_used,
9545 cmethod, MONO_RGCTX_INFO_METHOD);
9546 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9548 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9549 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9550 MONO_ADD_INS (cfg->cbb, store);
9552 /* FIXME: This should be a managed pointer */
9553 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9555 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9556 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9557 cmethod, MONO_RGCTX_INFO_METHOD);
9558 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9559 addr = mono_emit_jit_icall (cfg,
9560 mono_helper_compile_generic_method, iargs);
9562 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9564 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9571 * Implement a workaround for the inherent races involved in locking:
9577 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9578 * try block, the Exit () won't be executed, see:
9579 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9580 * To work around this, we extend such try blocks to include the last x bytes
9581 * of the Monitor.Enter () call.
9583 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9584 MonoBasicBlock *tbb;
9586 GET_BBLOCK (cfg, tbb, ip + 5);
9588 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9589 * from Monitor.Enter like ArgumentNullException.
9591 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9592 /* Mark this bblock as needing to be extended */
9593 tbb->extend_try_block = TRUE;
9597 /* Conversion to a JIT intrinsic */
9598 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9599 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9600 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9607 if ((cfg->opt & MONO_OPT_INLINE) &&
9608 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9609 mono_method_check_inlining (cfg, cmethod)) {
9611 gboolean always = FALSE;
9613 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9614 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9615 /* Prevent inlining of methods that call wrappers */
9616 INLINE_FAILURE ("wrapper call");
9617 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9621 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9623 cfg->real_offset += 5;
9625 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9626 /* *sp is already set by inline_method */
9631 inline_costs += costs;
9637 /* Tail recursion elimination */
9638 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9639 gboolean has_vtargs = FALSE;
9642 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9643 INLINE_FAILURE ("tail call");
9645 /* keep it simple */
9646 for (i = fsig->param_count - 1; i >= 0; i--) {
9647 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9652 for (i = 0; i < n; ++i)
9653 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9654 MONO_INST_NEW (cfg, ins, OP_BR);
9655 MONO_ADD_INS (cfg->cbb, ins);
9656 tblock = start_bblock->out_bb [0];
9657 link_bblock (cfg, cfg->cbb, tblock);
9658 ins->inst_target_bb = tblock;
9659 start_new_bblock = 1;
9661 /* skip the CEE_RET, too */
9662 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9669 inline_costs += 10 * num_calls++;
9672 * Making generic calls out of gsharedvt methods.
9673 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9674 * patching gshared method addresses into a gsharedvt method.
9676 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9677 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9678 (!(cfg->llvm_only && virtual_))) {
9679 MonoRgctxInfoType info_type;
9682 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9683 //GSHAREDVT_FAILURE (*ip);
9684 // disable for possible remoting calls
9685 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9686 GSHAREDVT_FAILURE (*ip);
9687 if (fsig->generic_param_count) {
9688 /* virtual generic call */
9689 g_assert (!imt_arg);
9690 /* Same as the virtual generic case above */
9691 imt_arg = emit_get_rgctx_method (cfg, context_used,
9692 cmethod, MONO_RGCTX_INFO_METHOD);
9693 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9695 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9696 /* This can happen when we call a fully instantiated iface method */
9697 imt_arg = emit_get_rgctx_method (cfg, context_used,
9698 cmethod, MONO_RGCTX_INFO_METHOD);
9703 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9704 keep_this_alive = sp [0];
9706 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9707 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9709 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9710 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9712 if (cfg->llvm_only) {
9713 // FIXME: Avoid initializing vtable_arg
9714 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9716 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9721 /* Generic sharing */
9724 * Use this if the callee is gsharedvt sharable too, since
9725 * at runtime we might find an instantiation so the call cannot
9726 * be patched (the 'no_patch' code path in mini-trampolines.c).
9728 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9729 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9730 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9731 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9732 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9733 INLINE_FAILURE ("gshared");
9735 g_assert (cfg->gshared && cmethod);
9739 * We are compiling a call to a
9740 * generic method from shared code,
9741 * which means that we have to look up
9742 * the method in the rgctx and do an
9746 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9748 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9749 if (cfg->llvm_only) {
9750 // FIXME: Avoid initializing imt_arg/vtable_arg
9751 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9753 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9758 /* Direct calls to icalls */
9760 MonoMethod *wrapper;
9763 /* Inline the wrapper */
9764 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9766 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9767 g_assert (costs > 0);
9768 cfg->real_offset += 5;
9770 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9771 /* *sp is already set by inline_method */
9776 inline_costs += costs;
9785 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9786 MonoInst *val = sp [fsig->param_count];
9788 if (val->type == STACK_OBJ) {
9789 MonoInst *iargs [2];
9794 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9797 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9798 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9799 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9800 emit_write_barrier (cfg, addr, val);
9801 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9802 GSHAREDVT_FAILURE (*ip);
9803 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9804 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9806 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9807 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9808 if (!cmethod->klass->element_class->valuetype && !readonly)
9809 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9810 CHECK_TYPELOAD (cmethod->klass);
9813 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9816 g_assert_not_reached ();
9823 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9827 /* Tail prefix / tail call optimization */
9829 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9830 /* FIXME: runtime generic context pointer for jumps? */
9831 /* FIXME: handle this for generic sharing eventually */
9832 if ((ins_flag & MONO_INST_TAILCALL) &&
9833 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9834 supported_tail_call = TRUE;
9836 if (supported_tail_call) {
9839 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9840 INLINE_FAILURE ("tail call");
9842 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9844 if (cfg->backend->have_op_tail_call) {
9845 /* Handle tail calls similarly to normal calls */
9848 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9850 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9851 call->tail_call = TRUE;
9852 call->method = cmethod;
9853 call->signature = mono_method_signature (cmethod);
9856 * We implement tail calls by storing the actual arguments into the
9857 * argument variables, then emitting a CEE_JMP.
9859 for (i = 0; i < n; ++i) {
9860 /* Prevent argument from being register allocated */
9861 arg_array [i]->flags |= MONO_INST_VOLATILE;
9862 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9864 ins = (MonoInst*)call;
9865 ins->inst_p0 = cmethod;
9866 ins->inst_p1 = arg_array [0];
9867 MONO_ADD_INS (cfg->cbb, ins);
9868 link_bblock (cfg, cfg->cbb, end_bblock);
9869 start_new_bblock = 1;
9871 // FIXME: Eliminate unreachable epilogs
9874 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9875 * only reachable from this call.
9877 GET_BBLOCK (cfg, tblock, ip + 5);
9878 if (tblock == cfg->cbb || tblock->in_count == 0)
9887 * Synchronized wrappers.
9888 * Its hard to determine where to replace a method with its synchronized
9889 * wrapper without causing an infinite recursion. The current solution is
9890 * to add the synchronized wrapper in the trampolines, and to
9891 * change the called method to a dummy wrapper, and resolve that wrapper
9892 * to the real method in mono_jit_compile_method ().
9894 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9895 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9896 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9897 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9901 * Virtual calls in llvm-only mode.
9903 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9904 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp, imt_arg);
9909 INLINE_FAILURE ("call");
9910 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9911 imt_arg, vtable_arg);
9913 if (tail_call && !cfg->llvm_only) {
9914 link_bblock (cfg, cfg->cbb, end_bblock);
9915 start_new_bblock = 1;
9917 // FIXME: Eliminate unreachable epilogs
9920 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9921 * only reachable from this call.
9923 GET_BBLOCK (cfg, tblock, ip + 5);
9924 if (tblock == cfg->cbb || tblock->in_count == 0)
9931 /* End of call, INS should contain the result of the call, if any */
9933 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9936 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9941 if (keep_this_alive) {
9942 MonoInst *dummy_use;
9944 /* See mono_emit_method_call_full () */
9945 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9948 CHECK_CFG_EXCEPTION;
9952 g_assert (*ip == CEE_RET);
9956 constrained_class = NULL;
9958 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9962 if (cfg->method != method) {
9963 /* return from inlined method */
9965 * If in_count == 0, that means the ret is unreachable due to
9966 * being preceeded by a throw. In that case, inline_method () will
9967 * handle setting the return value
9968 * (test case: test_0_inline_throw ()).
9970 if (return_var && cfg->cbb->in_count) {
9971 MonoType *ret_type = mono_method_signature (method)->ret;
9977 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9980 //g_assert (returnvar != -1);
9981 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9982 cfg->ret_var_set = TRUE;
9985 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9987 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9991 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9993 if (seq_points && !sym_seq_points) {
9995 * Place a seq point here too even through the IL stack is not
9996 * empty, so a step over on
9999 * will work correctly.
10001 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10002 MONO_ADD_INS (cfg->cbb, ins);
10005 g_assert (!return_var);
10009 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10012 emit_setret (cfg, *sp);
10015 if (sp != stack_start)
10017 MONO_INST_NEW (cfg, ins, OP_BR);
10019 ins->inst_target_bb = end_bblock;
10020 MONO_ADD_INS (cfg->cbb, ins);
10021 link_bblock (cfg, cfg->cbb, end_bblock);
10022 start_new_bblock = 1;
10026 MONO_INST_NEW (cfg, ins, OP_BR);
10028 target = ip + 1 + (signed char)(*ip);
10030 GET_BBLOCK (cfg, tblock, target);
10031 link_bblock (cfg, cfg->cbb, tblock);
10032 ins->inst_target_bb = tblock;
10033 if (sp != stack_start) {
10034 handle_stack_args (cfg, stack_start, sp - stack_start);
10036 CHECK_UNVERIFIABLE (cfg);
10038 MONO_ADD_INS (cfg->cbb, ins);
10039 start_new_bblock = 1;
10040 inline_costs += BRANCH_COST;
10054 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10056 target = ip + 1 + *(signed char*)ip;
10059 ADD_BINCOND (NULL);
10062 inline_costs += BRANCH_COST;
10066 MONO_INST_NEW (cfg, ins, OP_BR);
10069 target = ip + 4 + (gint32)read32(ip);
10071 GET_BBLOCK (cfg, tblock, target);
10072 link_bblock (cfg, cfg->cbb, tblock);
10073 ins->inst_target_bb = tblock;
10074 if (sp != stack_start) {
10075 handle_stack_args (cfg, stack_start, sp - stack_start);
10077 CHECK_UNVERIFIABLE (cfg);
10080 MONO_ADD_INS (cfg->cbb, ins);
10082 start_new_bblock = 1;
10083 inline_costs += BRANCH_COST;
10085 case CEE_BRFALSE_S:
10090 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10091 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10092 guint32 opsize = is_short ? 1 : 4;
10094 CHECK_OPSIZE (opsize);
10096 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10099 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10104 GET_BBLOCK (cfg, tblock, target);
10105 link_bblock (cfg, cfg->cbb, tblock);
10106 GET_BBLOCK (cfg, tblock, ip);
10107 link_bblock (cfg, cfg->cbb, tblock);
10109 if (sp != stack_start) {
10110 handle_stack_args (cfg, stack_start, sp - stack_start);
10111 CHECK_UNVERIFIABLE (cfg);
10114 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10115 cmp->sreg1 = sp [0]->dreg;
10116 type_from_op (cfg, cmp, sp [0], NULL);
10119 #if SIZEOF_REGISTER == 4
10120 if (cmp->opcode == OP_LCOMPARE_IMM) {
10121 /* Convert it to OP_LCOMPARE */
10122 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10123 ins->type = STACK_I8;
10124 ins->dreg = alloc_dreg (cfg, STACK_I8);
10126 MONO_ADD_INS (cfg->cbb, ins);
10127 cmp->opcode = OP_LCOMPARE;
10128 cmp->sreg2 = ins->dreg;
10131 MONO_ADD_INS (cfg->cbb, cmp);
10133 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10134 type_from_op (cfg, ins, sp [0], NULL);
10135 MONO_ADD_INS (cfg->cbb, ins);
10136 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10137 GET_BBLOCK (cfg, tblock, target);
10138 ins->inst_true_bb = tblock;
10139 GET_BBLOCK (cfg, tblock, ip);
10140 ins->inst_false_bb = tblock;
10141 start_new_bblock = 2;
10144 inline_costs += BRANCH_COST;
10159 MONO_INST_NEW (cfg, ins, *ip);
10161 target = ip + 4 + (gint32)read32(ip);
10164 ADD_BINCOND (NULL);
10167 inline_costs += BRANCH_COST;
10171 MonoBasicBlock **targets;
10172 MonoBasicBlock *default_bblock;
10173 MonoJumpInfoBBTable *table;
10174 int offset_reg = alloc_preg (cfg);
10175 int target_reg = alloc_preg (cfg);
10176 int table_reg = alloc_preg (cfg);
10177 int sum_reg = alloc_preg (cfg);
10178 gboolean use_op_switch;
10182 n = read32 (ip + 1);
10185 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10189 CHECK_OPSIZE (n * sizeof (guint32));
10190 target = ip + n * sizeof (guint32);
10192 GET_BBLOCK (cfg, default_bblock, target);
10193 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10195 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10196 for (i = 0; i < n; ++i) {
10197 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10198 targets [i] = tblock;
10199 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10203 if (sp != stack_start) {
10205 * Link the current bb with the targets as well, so handle_stack_args
10206 * will set their in_stack correctly.
10208 link_bblock (cfg, cfg->cbb, default_bblock);
10209 for (i = 0; i < n; ++i)
10210 link_bblock (cfg, cfg->cbb, targets [i]);
10212 handle_stack_args (cfg, stack_start, sp - stack_start);
10214 CHECK_UNVERIFIABLE (cfg);
10216 /* Undo the links */
10217 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10218 for (i = 0; i < n; ++i)
10219 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10225 for (i = 0; i < n; ++i)
10226 link_bblock (cfg, cfg->cbb, targets [i]);
10228 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10229 table->table = targets;
10230 table->table_size = n;
10232 use_op_switch = FALSE;
10234 /* ARM implements SWITCH statements differently */
10235 /* FIXME: Make it use the generic implementation */
10236 if (!cfg->compile_aot)
10237 use_op_switch = TRUE;
10240 if (COMPILE_LLVM (cfg))
10241 use_op_switch = TRUE;
10243 cfg->cbb->has_jump_table = 1;
10245 if (use_op_switch) {
10246 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10247 ins->sreg1 = src1->dreg;
10248 ins->inst_p0 = table;
10249 ins->inst_many_bb = targets;
10250 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10251 MONO_ADD_INS (cfg->cbb, ins);
10253 if (sizeof (gpointer) == 8)
10254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10256 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10258 #if SIZEOF_REGISTER == 8
10259 /* The upper word might not be zero, and we add it to a 64 bit address later */
10260 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10263 if (cfg->compile_aot) {
10264 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10266 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10267 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10268 ins->inst_p0 = table;
10269 ins->dreg = table_reg;
10270 MONO_ADD_INS (cfg->cbb, ins);
10273 /* FIXME: Use load_memindex */
10274 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10276 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10278 start_new_bblock = 1;
10279 inline_costs += (BRANCH_COST * 2);
10292 case CEE_LDIND_REF:
10299 dreg = alloc_freg (cfg);
10302 dreg = alloc_lreg (cfg);
10304 case CEE_LDIND_REF:
10305 dreg = alloc_ireg_ref (cfg);
10308 dreg = alloc_preg (cfg);
10311 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10312 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10313 if (*ip == CEE_LDIND_R4)
10314 ins->type = cfg->r4_stack_type;
10315 ins->flags |= ins_flag;
10316 MONO_ADD_INS (cfg->cbb, ins);
10318 if (ins_flag & MONO_INST_VOLATILE) {
10319 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10320 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10325 case CEE_STIND_REF:
10336 if (ins_flag & MONO_INST_VOLATILE) {
10337 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10338 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10341 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10342 ins->flags |= ins_flag;
10345 MONO_ADD_INS (cfg->cbb, ins);
10347 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10348 emit_write_barrier (cfg, sp [0], sp [1]);
10357 MONO_INST_NEW (cfg, ins, (*ip));
10359 ins->sreg1 = sp [0]->dreg;
10360 ins->sreg2 = sp [1]->dreg;
10361 type_from_op (cfg, ins, sp [0], sp [1]);
10363 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10365 /* Use the immediate opcodes if possible */
10366 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10367 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10368 if (imm_opcode != -1) {
10369 ins->opcode = imm_opcode;
10370 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10373 NULLIFY_INS (sp [1]);
10377 MONO_ADD_INS ((cfg)->cbb, (ins));
10379 *sp++ = mono_decompose_opcode (cfg, ins);
10396 MONO_INST_NEW (cfg, ins, (*ip));
10398 ins->sreg1 = sp [0]->dreg;
10399 ins->sreg2 = sp [1]->dreg;
10400 type_from_op (cfg, ins, sp [0], sp [1]);
10402 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10403 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10405 /* FIXME: Pass opcode to is_inst_imm */
10407 /* Use the immediate opcodes if possible */
10408 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10411 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10412 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10413 /* Keep emulated opcodes which are optimized away later */
10414 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10415 imm_opcode = mono_op_to_op_imm (ins->opcode);
10418 if (imm_opcode != -1) {
10419 ins->opcode = imm_opcode;
10420 if (sp [1]->opcode == OP_I8CONST) {
10421 #if SIZEOF_REGISTER == 8
10422 ins->inst_imm = sp [1]->inst_l;
10424 ins->inst_ls_word = sp [1]->inst_ls_word;
10425 ins->inst_ms_word = sp [1]->inst_ms_word;
10429 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10432 /* Might be followed by an instruction added by add_widen_op */
10433 if (sp [1]->next == NULL)
10434 NULLIFY_INS (sp [1]);
10437 MONO_ADD_INS ((cfg)->cbb, (ins));
10439 *sp++ = mono_decompose_opcode (cfg, ins);
10452 case CEE_CONV_OVF_I8:
10453 case CEE_CONV_OVF_U8:
10454 case CEE_CONV_R_UN:
10457 /* Special case this earlier so we have long constants in the IR */
10458 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10459 int data = sp [-1]->inst_c0;
10460 sp [-1]->opcode = OP_I8CONST;
10461 sp [-1]->type = STACK_I8;
10462 #if SIZEOF_REGISTER == 8
10463 if ((*ip) == CEE_CONV_U8)
10464 sp [-1]->inst_c0 = (guint32)data;
10466 sp [-1]->inst_c0 = data;
10468 sp [-1]->inst_ls_word = data;
10469 if ((*ip) == CEE_CONV_U8)
10470 sp [-1]->inst_ms_word = 0;
10472 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10474 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10481 case CEE_CONV_OVF_I4:
10482 case CEE_CONV_OVF_I1:
10483 case CEE_CONV_OVF_I2:
10484 case CEE_CONV_OVF_I:
10485 case CEE_CONV_OVF_U:
10488 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10489 ADD_UNOP (CEE_CONV_OVF_I8);
10496 case CEE_CONV_OVF_U1:
10497 case CEE_CONV_OVF_U2:
10498 case CEE_CONV_OVF_U4:
10501 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10502 ADD_UNOP (CEE_CONV_OVF_U8);
10509 case CEE_CONV_OVF_I1_UN:
10510 case CEE_CONV_OVF_I2_UN:
10511 case CEE_CONV_OVF_I4_UN:
10512 case CEE_CONV_OVF_I8_UN:
10513 case CEE_CONV_OVF_U1_UN:
10514 case CEE_CONV_OVF_U2_UN:
10515 case CEE_CONV_OVF_U4_UN:
10516 case CEE_CONV_OVF_U8_UN:
10517 case CEE_CONV_OVF_I_UN:
10518 case CEE_CONV_OVF_U_UN:
10525 CHECK_CFG_EXCEPTION;
10529 case CEE_ADD_OVF_UN:
10531 case CEE_MUL_OVF_UN:
10533 case CEE_SUB_OVF_UN:
10539 GSHAREDVT_FAILURE (*ip);
10542 token = read32 (ip + 1);
10543 klass = mini_get_class (method, token, generic_context);
10544 CHECK_TYPELOAD (klass);
10546 if (generic_class_is_reference_type (cfg, klass)) {
10547 MonoInst *store, *load;
10548 int dreg = alloc_ireg_ref (cfg);
10550 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10551 load->flags |= ins_flag;
10552 MONO_ADD_INS (cfg->cbb, load);
10554 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10555 store->flags |= ins_flag;
10556 MONO_ADD_INS (cfg->cbb, store);
10558 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10559 emit_write_barrier (cfg, sp [0], sp [1]);
10561 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10567 int loc_index = -1;
10573 token = read32 (ip + 1);
10574 klass = mini_get_class (method, token, generic_context);
10575 CHECK_TYPELOAD (klass);
10577 /* Optimize the common ldobj+stloc combination */
10580 loc_index = ip [6];
10587 loc_index = ip [5] - CEE_STLOC_0;
10594 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10595 CHECK_LOCAL (loc_index);
10597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10598 ins->dreg = cfg->locals [loc_index]->dreg;
10599 ins->flags |= ins_flag;
10602 if (ins_flag & MONO_INST_VOLATILE) {
10603 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10604 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10610 /* Optimize the ldobj+stobj combination */
10611 /* The reference case ends up being a load+store anyway */
10612 /* Skip this if the operation is volatile. */
10613 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10618 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10625 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10626 ins->flags |= ins_flag;
10629 if (ins_flag & MONO_INST_VOLATILE) {
10630 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10631 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10640 CHECK_STACK_OVF (1);
10642 n = read32 (ip + 1);
10644 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10645 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10646 ins->type = STACK_OBJ;
10649 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10650 MonoInst *iargs [1];
10651 char *str = (char *)mono_method_get_wrapper_data (method, n);
10653 if (cfg->compile_aot)
10654 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10656 EMIT_NEW_PCONST (cfg, iargs [0], str);
10657 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10659 if (cfg->opt & MONO_OPT_SHARED) {
10660 MonoInst *iargs [3];
10662 if (cfg->compile_aot) {
10663 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10665 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10666 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10667 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10668 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10669 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10671 if (cfg->cbb->out_of_line) {
10672 MonoInst *iargs [2];
10674 if (image == mono_defaults.corlib) {
10676 * Avoid relocations in AOT and save some space by using a
10677 * version of helper_ldstr specialized to mscorlib.
10679 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10680 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10682 /* Avoid creating the string object */
10683 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10684 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10685 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10689 if (cfg->compile_aot) {
10690 NEW_LDSTRCONST (cfg, ins, image, n);
10692 MONO_ADD_INS (cfg->cbb, ins);
10695 NEW_PCONST (cfg, ins, NULL);
10696 ins->type = STACK_OBJ;
10697 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10699 OUT_OF_MEMORY_FAILURE;
10702 MONO_ADD_INS (cfg->cbb, ins);
10711 MonoInst *iargs [2];
10712 MonoMethodSignature *fsig;
10715 MonoInst *vtable_arg = NULL;
10718 token = read32 (ip + 1);
10719 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10720 if (!cmethod || mono_loader_get_last_error ())
10722 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10725 mono_save_token_info (cfg, image, token, cmethod);
10727 if (!mono_class_init (cmethod->klass))
10728 TYPE_LOAD_ERROR (cmethod->klass);
10730 context_used = mini_method_check_context_used (cfg, cmethod);
10732 if (mono_security_core_clr_enabled ())
10733 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10735 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10736 emit_class_init (cfg, cmethod->klass);
10737 CHECK_TYPELOAD (cmethod->klass);
10741 if (cfg->gsharedvt) {
10742 if (mini_is_gsharedvt_variable_signature (sig))
10743 GSHAREDVT_FAILURE (*ip);
10747 n = fsig->param_count;
10751 * Generate smaller code for the common newobj <exception> instruction in
10752 * argument checking code.
10754 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10755 is_exception_class (cmethod->klass) && n <= 2 &&
10756 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10757 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10758 MonoInst *iargs [3];
10762 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10765 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10768 iargs [1] = sp [0];
10769 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10772 iargs [1] = sp [0];
10773 iargs [2] = sp [1];
10774 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10777 g_assert_not_reached ();
10785 /* move the args to allow room for 'this' in the first position */
10791 /* check_call_signature () requires sp[0] to be set */
10792 this_ins.type = STACK_OBJ;
10793 sp [0] = &this_ins;
10794 if (check_call_signature (cfg, fsig, sp))
10799 if (mini_class_is_system_array (cmethod->klass)) {
10800 *sp = emit_get_rgctx_method (cfg, context_used,
10801 cmethod, MONO_RGCTX_INFO_METHOD);
10803 /* Avoid varargs in the common case */
10804 if (fsig->param_count == 1)
10805 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10806 else if (fsig->param_count == 2)
10807 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10808 else if (fsig->param_count == 3)
10809 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10810 else if (fsig->param_count == 4)
10811 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10813 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10814 } else if (cmethod->string_ctor) {
10815 g_assert (!context_used);
10816 g_assert (!vtable_arg);
10817 /* we simply pass a null pointer */
10818 EMIT_NEW_PCONST (cfg, *sp, NULL);
10819 /* now call the string ctor */
10820 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10822 if (cmethod->klass->valuetype) {
10823 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10824 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10825 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10830 * The code generated by mini_emit_virtual_call () expects
10831 * iargs [0] to be a boxed instance, but luckily the vcall
10832 * will be transformed into a normal call there.
10834 } else if (context_used) {
10835 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10838 MonoVTable *vtable = NULL;
10840 if (!cfg->compile_aot)
10841 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10842 CHECK_TYPELOAD (cmethod->klass);
10845 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10846 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10847 * As a workaround, we call class cctors before allocating objects.
10849 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10850 emit_class_init (cfg, cmethod->klass);
10851 if (cfg->verbose_level > 2)
10852 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10853 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10856 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10859 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10862 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10864 /* Now call the actual ctor */
10865 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10866 CHECK_CFG_EXCEPTION;
10869 if (alloc == NULL) {
10871 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10872 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10880 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10881 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10884 case CEE_CASTCLASS:
10888 token = read32 (ip + 1);
10889 klass = mini_get_class (method, token, generic_context);
10890 CHECK_TYPELOAD (klass);
10891 if (sp [0]->type != STACK_OBJ)
10894 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10895 CHECK_CFG_EXCEPTION;
10904 token = read32 (ip + 1);
10905 klass = mini_get_class (method, token, generic_context);
10906 CHECK_TYPELOAD (klass);
10907 if (sp [0]->type != STACK_OBJ)
10910 context_used = mini_class_check_context_used (cfg, klass);
10912 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10913 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10914 MonoInst *args [3];
10921 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10924 idx = get_castclass_cache_idx (cfg);
10925 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10927 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10930 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10931 MonoMethod *mono_isinst;
10932 MonoInst *iargs [1];
10935 mono_isinst = mono_marshal_get_isinst (klass);
10936 iargs [0] = sp [0];
10938 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10939 iargs, ip, cfg->real_offset, TRUE);
10940 CHECK_CFG_EXCEPTION;
10941 g_assert (costs > 0);
10944 cfg->real_offset += 5;
10948 inline_costs += costs;
10951 ins = handle_isinst (cfg, klass, *sp, context_used);
10952 CHECK_CFG_EXCEPTION;
10958 case CEE_UNBOX_ANY: {
10959 MonoInst *res, *addr;
10964 token = read32 (ip + 1);
10965 klass = mini_get_class (method, token, generic_context);
10966 CHECK_TYPELOAD (klass);
10968 mono_save_token_info (cfg, image, token, klass);
10970 context_used = mini_class_check_context_used (cfg, klass);
10972 if (mini_is_gsharedvt_klass (klass)) {
10973 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10975 } else if (generic_class_is_reference_type (cfg, klass)) {
10976 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10977 CHECK_CFG_EXCEPTION;
10978 } else if (mono_class_is_nullable (klass)) {
10979 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10981 addr = handle_unbox (cfg, klass, sp, context_used);
10983 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10994 MonoClass *enum_class;
10995 MonoMethod *has_flag;
11001 token = read32 (ip + 1);
11002 klass = mini_get_class (method, token, generic_context);
11003 CHECK_TYPELOAD (klass);
11005 mono_save_token_info (cfg, image, token, klass);
11007 context_used = mini_class_check_context_used (cfg, klass);
11009 if (generic_class_is_reference_type (cfg, klass)) {
11015 if (klass == mono_defaults.void_class)
11017 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11019 /* frequent check in generic code: box (struct), brtrue */
11024 * <push int/long ptr>
11027 * constrained. MyFlags
11028 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11030 * If we find this sequence and the operand types on box and constrained
11031 * are equal, we can emit a specialized instruction sequence instead of
11032 * the very slow HasFlag () call.
11034 if ((cfg->opt & MONO_OPT_INTRINS) &&
11035 /* Cheap checks first. */
11036 ip + 5 + 6 + 5 < end &&
11037 ip [5] == CEE_PREFIX1 &&
11038 ip [6] == CEE_CONSTRAINED_ &&
11039 ip [11] == CEE_CALLVIRT &&
11040 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11041 mono_class_is_enum (klass) &&
11042 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11043 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11044 has_flag->klass == mono_defaults.enum_class &&
11045 !strcmp (has_flag->name, "HasFlag") &&
11046 has_flag->signature->hasthis &&
11047 has_flag->signature->param_count == 1) {
11048 CHECK_TYPELOAD (enum_class);
11050 if (enum_class == klass) {
11051 MonoInst *enum_this, *enum_flag;
11056 enum_this = sp [0];
11057 enum_flag = sp [1];
11059 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11064 // FIXME: LLVM can't handle the inconsistent bb linking
11065 if (!mono_class_is_nullable (klass) &&
11066 !mini_is_gsharedvt_klass (klass) &&
11067 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11068 (ip [5] == CEE_BRTRUE ||
11069 ip [5] == CEE_BRTRUE_S ||
11070 ip [5] == CEE_BRFALSE ||
11071 ip [5] == CEE_BRFALSE_S)) {
11072 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11074 MonoBasicBlock *true_bb, *false_bb;
11078 if (cfg->verbose_level > 3) {
11079 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11080 printf ("<box+brtrue opt>\n");
11085 case CEE_BRFALSE_S:
11088 target = ip + 1 + (signed char)(*ip);
11095 target = ip + 4 + (gint)(read32 (ip));
11099 g_assert_not_reached ();
11103 * We need to link both bblocks, since it is needed for handling stack
11104 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11105 * Branching to only one of them would lead to inconsistencies, so
11106 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11108 GET_BBLOCK (cfg, true_bb, target);
11109 GET_BBLOCK (cfg, false_bb, ip);
11111 mono_link_bblock (cfg, cfg->cbb, true_bb);
11112 mono_link_bblock (cfg, cfg->cbb, false_bb);
11114 if (sp != stack_start) {
11115 handle_stack_args (cfg, stack_start, sp - stack_start);
11117 CHECK_UNVERIFIABLE (cfg);
11120 if (COMPILE_LLVM (cfg)) {
11121 dreg = alloc_ireg (cfg);
11122 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11125 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11127 /* The JIT can't eliminate the iconst+compare */
11128 MONO_INST_NEW (cfg, ins, OP_BR);
11129 ins->inst_target_bb = is_true ? true_bb : false_bb;
11130 MONO_ADD_INS (cfg->cbb, ins);
11133 start_new_bblock = 1;
11137 *sp++ = handle_box (cfg, val, klass, context_used);
11139 CHECK_CFG_EXCEPTION;
11148 token = read32 (ip + 1);
11149 klass = mini_get_class (method, token, generic_context);
11150 CHECK_TYPELOAD (klass);
11152 mono_save_token_info (cfg, image, token, klass);
11154 context_used = mini_class_check_context_used (cfg, klass);
11156 if (mono_class_is_nullable (klass)) {
11159 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11160 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11164 ins = handle_unbox (cfg, klass, sp, context_used);
11177 MonoClassField *field;
11178 #ifndef DISABLE_REMOTING
11182 gboolean is_instance;
11184 gpointer addr = NULL;
11185 gboolean is_special_static;
11187 MonoInst *store_val = NULL;
11188 MonoInst *thread_ins;
11191 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11193 if (op == CEE_STFLD) {
11196 store_val = sp [1];
11201 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11203 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11206 if (op == CEE_STSFLD) {
11209 store_val = sp [0];
11214 token = read32 (ip + 1);
11215 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11216 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11217 klass = field->parent;
11220 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11223 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11224 FIELD_ACCESS_FAILURE (method, field);
11225 mono_class_init (klass);
11227 /* if the class is Critical then transparent code cannot access it's fields */
11228 if (!is_instance && mono_security_core_clr_enabled ())
11229 ensure_method_is_allowed_to_access_field (cfg, method, field);
11231 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11232 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11233 if (mono_security_core_clr_enabled ())
11234 ensure_method_is_allowed_to_access_field (cfg, method, field);
11237 ftype = mono_field_get_type (field);
11240 * LDFLD etc. is usable on static fields as well, so convert those cases to
11243 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11255 g_assert_not_reached ();
11257 is_instance = FALSE;
11260 context_used = mini_class_check_context_used (cfg, klass);
11262 /* INSTANCE CASE */
11264 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11265 if (op == CEE_STFLD) {
11266 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11268 #ifndef DISABLE_REMOTING
11269 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11270 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11271 MonoInst *iargs [5];
11273 GSHAREDVT_FAILURE (op);
11275 iargs [0] = sp [0];
11276 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11277 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11278 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11280 iargs [4] = sp [1];
11282 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11283 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11284 iargs, ip, cfg->real_offset, TRUE);
11285 CHECK_CFG_EXCEPTION;
11286 g_assert (costs > 0);
11288 cfg->real_offset += 5;
11290 inline_costs += costs;
11292 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11299 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11301 if (mini_is_gsharedvt_klass (klass)) {
11302 MonoInst *offset_ins;
11304 context_used = mini_class_check_context_used (cfg, klass);
11306 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11307 /* The value is offset by 1 */
11308 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11309 dreg = alloc_ireg_mp (cfg);
11310 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11311 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11312 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11314 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11316 if (sp [0]->opcode != OP_LDADDR)
11317 store->flags |= MONO_INST_FAULT;
11319 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11320 /* insert call to write barrier */
11324 dreg = alloc_ireg_mp (cfg);
11325 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11326 emit_write_barrier (cfg, ptr, sp [1]);
11329 store->flags |= ins_flag;
11336 #ifndef DISABLE_REMOTING
11337 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11338 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11339 MonoInst *iargs [4];
11341 GSHAREDVT_FAILURE (op);
11343 iargs [0] = sp [0];
11344 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11345 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11346 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11347 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11348 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11349 iargs, ip, cfg->real_offset, TRUE);
11350 CHECK_CFG_EXCEPTION;
11351 g_assert (costs > 0);
11353 cfg->real_offset += 5;
11357 inline_costs += costs;
11359 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11365 if (sp [0]->type == STACK_VTYPE) {
11368 /* Have to compute the address of the variable */
11370 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11372 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11374 g_assert (var->klass == klass);
11376 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11380 if (op == CEE_LDFLDA) {
11381 if (sp [0]->type == STACK_OBJ) {
11382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11383 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11386 dreg = alloc_ireg_mp (cfg);
11388 if (mini_is_gsharedvt_klass (klass)) {
11389 MonoInst *offset_ins;
11391 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11392 /* The value is offset by 1 */
11393 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11394 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11396 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11398 ins->klass = mono_class_from_mono_type (field->type);
11399 ins->type = STACK_MP;
11404 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11406 if (mini_is_gsharedvt_klass (klass)) {
11407 MonoInst *offset_ins;
11409 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11410 /* The value is offset by 1 */
11411 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11412 dreg = alloc_ireg_mp (cfg);
11413 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11414 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11416 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11418 load->flags |= ins_flag;
11419 if (sp [0]->opcode != OP_LDADDR)
11420 load->flags |= MONO_INST_FAULT;
11432 context_used = mini_class_check_context_used (cfg, klass);
11434 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11437 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11438 * to be called here.
11440 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11441 mono_class_vtable (cfg->domain, klass);
11442 CHECK_TYPELOAD (klass);
11444 mono_domain_lock (cfg->domain);
11445 if (cfg->domain->special_static_fields)
11446 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11447 mono_domain_unlock (cfg->domain);
11449 is_special_static = mono_class_field_is_special_static (field);
11451 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11452 thread_ins = mono_get_thread_intrinsic (cfg);
11456 /* Generate IR to compute the field address */
11457 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11459 * Fast access to TLS data
11460 * Inline version of get_thread_static_data () in
11464 int idx, static_data_reg, array_reg, dreg;
11466 GSHAREDVT_FAILURE (op);
11468 MONO_ADD_INS (cfg->cbb, thread_ins);
11469 static_data_reg = alloc_ireg (cfg);
11470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11472 if (cfg->compile_aot) {
11473 int offset_reg, offset2_reg, idx_reg;
11475 /* For TLS variables, this will return the TLS offset */
11476 EMIT_NEW_SFLDACONST (cfg, ins, field);
11477 offset_reg = ins->dreg;
11478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11479 idx_reg = alloc_ireg (cfg);
11480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11481 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11482 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11483 array_reg = alloc_ireg (cfg);
11484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11485 offset2_reg = alloc_ireg (cfg);
11486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11488 dreg = alloc_ireg (cfg);
11489 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11491 offset = (gsize)addr & 0x7fffffff;
11492 idx = offset & 0x3f;
11494 array_reg = alloc_ireg (cfg);
11495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11496 dreg = alloc_ireg (cfg);
11497 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11499 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11500 (cfg->compile_aot && is_special_static) ||
11501 (context_used && is_special_static)) {
11502 MonoInst *iargs [2];
11504 g_assert (field->parent);
11505 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11506 if (context_used) {
11507 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11508 field, MONO_RGCTX_INFO_CLASS_FIELD);
11510 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11512 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11513 } else if (context_used) {
11514 MonoInst *static_data;
11517 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11518 method->klass->name_space, method->klass->name, method->name,
11519 depth, field->offset);
11522 if (mono_class_needs_cctor_run (klass, method))
11523 emit_class_init (cfg, klass);
11526 * The pointer we're computing here is
11528 * super_info.static_data + field->offset
11530 static_data = emit_get_rgctx_klass (cfg, context_used,
11531 klass, MONO_RGCTX_INFO_STATIC_DATA);
11533 if (mini_is_gsharedvt_klass (klass)) {
11534 MonoInst *offset_ins;
11536 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11537 /* The value is offset by 1 */
11538 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11539 dreg = alloc_ireg_mp (cfg);
11540 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11541 } else if (field->offset == 0) {
11544 int addr_reg = mono_alloc_preg (cfg);
11545 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11547 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11548 MonoInst *iargs [2];
11550 g_assert (field->parent);
11551 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11552 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11553 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11555 MonoVTable *vtable = NULL;
11557 if (!cfg->compile_aot)
11558 vtable = mono_class_vtable (cfg->domain, klass);
11559 CHECK_TYPELOAD (klass);
11562 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11563 if (!(g_slist_find (class_inits, klass))) {
11564 emit_class_init (cfg, klass);
11565 if (cfg->verbose_level > 2)
11566 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11567 class_inits = g_slist_prepend (class_inits, klass);
11570 if (cfg->run_cctors) {
11572 /* This makes so that inline cannot trigger */
11573 /* .cctors: too many apps depend on them */
11574 /* running with a specific order... */
11576 if (! vtable->initialized)
11577 INLINE_FAILURE ("class init");
11578 ex = mono_runtime_class_init_full (vtable, FALSE);
11580 set_exception_object (cfg, ex);
11581 goto exception_exit;
11585 if (cfg->compile_aot)
11586 EMIT_NEW_SFLDACONST (cfg, ins, field);
11589 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11591 EMIT_NEW_PCONST (cfg, ins, addr);
11594 MonoInst *iargs [1];
11595 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11596 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11600 /* Generate IR to do the actual load/store operation */
11602 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11603 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11604 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11607 if (op == CEE_LDSFLDA) {
11608 ins->klass = mono_class_from_mono_type (ftype);
11609 ins->type = STACK_PTR;
11611 } else if (op == CEE_STSFLD) {
11614 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11615 store->flags |= ins_flag;
11617 gboolean is_const = FALSE;
11618 MonoVTable *vtable = NULL;
11619 gpointer addr = NULL;
11621 if (!context_used) {
11622 vtable = mono_class_vtable (cfg->domain, klass);
11623 CHECK_TYPELOAD (klass);
11625 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11626 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11627 int ro_type = ftype->type;
11629 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11630 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11631 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11634 GSHAREDVT_FAILURE (op);
11636 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11639 case MONO_TYPE_BOOLEAN:
11641 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11645 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11648 case MONO_TYPE_CHAR:
11650 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11654 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11659 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11663 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11668 case MONO_TYPE_PTR:
11669 case MONO_TYPE_FNPTR:
11670 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11671 type_to_eval_stack_type ((cfg), field->type, *sp);
11674 case MONO_TYPE_STRING:
11675 case MONO_TYPE_OBJECT:
11676 case MONO_TYPE_CLASS:
11677 case MONO_TYPE_SZARRAY:
11678 case MONO_TYPE_ARRAY:
11679 if (!mono_gc_is_moving ()) {
11680 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11681 type_to_eval_stack_type ((cfg), field->type, *sp);
11689 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11694 case MONO_TYPE_VALUETYPE:
11704 CHECK_STACK_OVF (1);
11706 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11707 load->flags |= ins_flag;
11713 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11714 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11715 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11726 token = read32 (ip + 1);
11727 klass = mini_get_class (method, token, generic_context);
11728 CHECK_TYPELOAD (klass);
11729 if (ins_flag & MONO_INST_VOLATILE) {
11730 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11731 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11733 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11734 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11735 ins->flags |= ins_flag;
11736 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11737 generic_class_is_reference_type (cfg, klass)) {
11738 /* insert call to write barrier */
11739 emit_write_barrier (cfg, sp [0], sp [1]);
11751 const char *data_ptr;
11753 guint32 field_token;
11759 token = read32 (ip + 1);
11761 klass = mini_get_class (method, token, generic_context);
11762 CHECK_TYPELOAD (klass);
11764 context_used = mini_class_check_context_used (cfg, klass);
11766 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11767 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11768 ins->sreg1 = sp [0]->dreg;
11769 ins->type = STACK_I4;
11770 ins->dreg = alloc_ireg (cfg);
11771 MONO_ADD_INS (cfg->cbb, ins);
11772 *sp = mono_decompose_opcode (cfg, ins);
11775 if (context_used) {
11776 MonoInst *args [3];
11777 MonoClass *array_class = mono_array_class_get (klass, 1);
11778 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11780 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11783 args [0] = emit_get_rgctx_klass (cfg, context_used,
11784 array_class, MONO_RGCTX_INFO_VTABLE);
11789 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11791 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11793 if (cfg->opt & MONO_OPT_SHARED) {
11794 /* Decompose now to avoid problems with references to the domainvar */
11795 MonoInst *iargs [3];
11797 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11798 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11799 iargs [2] = sp [0];
11801 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11803 /* Decompose later since it is needed by abcrem */
11804 MonoClass *array_type = mono_array_class_get (klass, 1);
11805 mono_class_vtable (cfg->domain, array_type);
11806 CHECK_TYPELOAD (array_type);
11808 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11809 ins->dreg = alloc_ireg_ref (cfg);
11810 ins->sreg1 = sp [0]->dreg;
11811 ins->inst_newa_class = klass;
11812 ins->type = STACK_OBJ;
11813 ins->klass = array_type;
11814 MONO_ADD_INS (cfg->cbb, ins);
11815 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11816 cfg->cbb->has_array_access = TRUE;
11818 /* Needed so mono_emit_load_get_addr () gets called */
11819 mono_get_got_var (cfg);
11829 * we inline/optimize the initialization sequence if possible.
11830 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11831 * for small sizes open code the memcpy
11832 * ensure the rva field is big enough
11834 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11835 MonoMethod *memcpy_method = get_memcpy_method ();
11836 MonoInst *iargs [3];
11837 int add_reg = alloc_ireg_mp (cfg);
11839 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11840 if (cfg->compile_aot) {
11841 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11843 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11845 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11846 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11855 if (sp [0]->type != STACK_OBJ)
11858 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11859 ins->dreg = alloc_preg (cfg);
11860 ins->sreg1 = sp [0]->dreg;
11861 ins->type = STACK_I4;
11862 /* This flag will be inherited by the decomposition */
11863 ins->flags |= MONO_INST_FAULT;
11864 MONO_ADD_INS (cfg->cbb, ins);
11865 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11866 cfg->cbb->has_array_access = TRUE;
11874 if (sp [0]->type != STACK_OBJ)
11877 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11879 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11880 CHECK_TYPELOAD (klass);
11881 /* we need to make sure that this array is exactly the type it needs
11882 * to be for correctness. the wrappers are lax with their usage
11883 * so we need to ignore them here
11885 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11886 MonoClass *array_class = mono_array_class_get (klass, 1);
11887 mini_emit_check_array_type (cfg, sp [0], array_class);
11888 CHECK_TYPELOAD (array_class);
11892 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11897 case CEE_LDELEM_I1:
11898 case CEE_LDELEM_U1:
11899 case CEE_LDELEM_I2:
11900 case CEE_LDELEM_U2:
11901 case CEE_LDELEM_I4:
11902 case CEE_LDELEM_U4:
11903 case CEE_LDELEM_I8:
11905 case CEE_LDELEM_R4:
11906 case CEE_LDELEM_R8:
11907 case CEE_LDELEM_REF: {
11913 if (*ip == CEE_LDELEM) {
11915 token = read32 (ip + 1);
11916 klass = mini_get_class (method, token, generic_context);
11917 CHECK_TYPELOAD (klass);
11918 mono_class_init (klass);
11921 klass = array_access_to_klass (*ip);
11923 if (sp [0]->type != STACK_OBJ)
11926 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11928 if (mini_is_gsharedvt_variable_klass (klass)) {
11929 // FIXME-VT: OP_ICONST optimization
11930 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11931 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11932 ins->opcode = OP_LOADV_MEMBASE;
11933 } else if (sp [1]->opcode == OP_ICONST) {
11934 int array_reg = sp [0]->dreg;
11935 int index_reg = sp [1]->dreg;
11936 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11938 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11939 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11941 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11944 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11945 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11948 if (*ip == CEE_LDELEM)
11955 case CEE_STELEM_I1:
11956 case CEE_STELEM_I2:
11957 case CEE_STELEM_I4:
11958 case CEE_STELEM_I8:
11959 case CEE_STELEM_R4:
11960 case CEE_STELEM_R8:
11961 case CEE_STELEM_REF:
11966 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11968 if (*ip == CEE_STELEM) {
11970 token = read32 (ip + 1);
11971 klass = mini_get_class (method, token, generic_context);
11972 CHECK_TYPELOAD (klass);
11973 mono_class_init (klass);
11976 klass = array_access_to_klass (*ip);
11978 if (sp [0]->type != STACK_OBJ)
11981 emit_array_store (cfg, klass, sp, TRUE);
11983 if (*ip == CEE_STELEM)
11990 case CEE_CKFINITE: {
11994 if (cfg->llvm_only) {
11995 MonoInst *iargs [1];
11997 iargs [0] = sp [0];
11998 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12000 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12001 ins->sreg1 = sp [0]->dreg;
12002 ins->dreg = alloc_freg (cfg);
12003 ins->type = STACK_R8;
12004 MONO_ADD_INS (cfg->cbb, ins);
12006 *sp++ = mono_decompose_opcode (cfg, ins);
12012 case CEE_REFANYVAL: {
12013 MonoInst *src_var, *src;
12015 int klass_reg = alloc_preg (cfg);
12016 int dreg = alloc_preg (cfg);
12018 GSHAREDVT_FAILURE (*ip);
12021 MONO_INST_NEW (cfg, ins, *ip);
12024 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12025 CHECK_TYPELOAD (klass);
12027 context_used = mini_class_check_context_used (cfg, klass);
12030 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12032 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12033 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12036 if (context_used) {
12037 MonoInst *klass_ins;
12039 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12040 klass, MONO_RGCTX_INFO_KLASS);
12043 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12044 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12046 mini_emit_class_check (cfg, klass_reg, klass);
12048 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12049 ins->type = STACK_MP;
12050 ins->klass = klass;
12055 case CEE_MKREFANY: {
12056 MonoInst *loc, *addr;
12058 GSHAREDVT_FAILURE (*ip);
12061 MONO_INST_NEW (cfg, ins, *ip);
12064 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12065 CHECK_TYPELOAD (klass);
12067 context_used = mini_class_check_context_used (cfg, klass);
12069 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12070 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12072 if (context_used) {
12073 MonoInst *const_ins;
12074 int type_reg = alloc_preg (cfg);
12076 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12080 } else if (cfg->compile_aot) {
12081 int const_reg = alloc_preg (cfg);
12082 int type_reg = alloc_preg (cfg);
12084 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12085 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12089 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12090 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12092 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12094 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12095 ins->type = STACK_VTYPE;
12096 ins->klass = mono_defaults.typed_reference_class;
12101 case CEE_LDTOKEN: {
12103 MonoClass *handle_class;
12105 CHECK_STACK_OVF (1);
12108 n = read32 (ip + 1);
12110 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12111 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12112 handle = mono_method_get_wrapper_data (method, n);
12113 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12114 if (handle_class == mono_defaults.typehandle_class)
12115 handle = &((MonoClass*)handle)->byval_arg;
12118 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12123 mono_class_init (handle_class);
12124 if (cfg->gshared) {
12125 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12126 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12127 /* This case handles ldtoken
12128 of an open type, like for
12131 } else if (handle_class == mono_defaults.typehandle_class) {
12132 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12133 } else if (handle_class == mono_defaults.fieldhandle_class)
12134 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12135 else if (handle_class == mono_defaults.methodhandle_class)
12136 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12138 g_assert_not_reached ();
12141 if ((cfg->opt & MONO_OPT_SHARED) &&
12142 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12143 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12144 MonoInst *addr, *vtvar, *iargs [3];
12145 int method_context_used;
12147 method_context_used = mini_method_check_context_used (cfg, method);
12149 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12151 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12152 EMIT_NEW_ICONST (cfg, iargs [1], n);
12153 if (method_context_used) {
12154 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12155 method, MONO_RGCTX_INFO_METHOD);
12156 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12158 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12159 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12161 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12163 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12165 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12167 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12168 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12169 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12170 (cmethod->klass == mono_defaults.systemtype_class) &&
12171 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12172 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12174 mono_class_init (tclass);
12175 if (context_used) {
12176 ins = emit_get_rgctx_klass (cfg, context_used,
12177 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12178 } else if (cfg->compile_aot) {
12179 if (method->wrapper_type) {
12180 mono_error_init (&error); //got to do it since there are multiple conditionals below
12181 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12182 /* Special case for static synchronized wrappers */
12183 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12185 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12186 /* FIXME: n is not a normal token */
12188 EMIT_NEW_PCONST (cfg, ins, NULL);
12191 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12194 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, (MonoType *)handle));
12196 ins->type = STACK_OBJ;
12197 ins->klass = cmethod->klass;
12200 MonoInst *addr, *vtvar;
12202 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12204 if (context_used) {
12205 if (handle_class == mono_defaults.typehandle_class) {
12206 ins = emit_get_rgctx_klass (cfg, context_used,
12207 mono_class_from_mono_type ((MonoType *)handle),
12208 MONO_RGCTX_INFO_TYPE);
12209 } else if (handle_class == mono_defaults.methodhandle_class) {
12210 ins = emit_get_rgctx_method (cfg, context_used,
12211 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12212 } else if (handle_class == mono_defaults.fieldhandle_class) {
12213 ins = emit_get_rgctx_field (cfg, context_used,
12214 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12216 g_assert_not_reached ();
12218 } else if (cfg->compile_aot) {
12219 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12221 EMIT_NEW_PCONST (cfg, ins, handle);
12223 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12224 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12225 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12235 MONO_INST_NEW (cfg, ins, OP_THROW);
12237 ins->sreg1 = sp [0]->dreg;
12239 cfg->cbb->out_of_line = TRUE;
12240 MONO_ADD_INS (cfg->cbb, ins);
12241 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12242 MONO_ADD_INS (cfg->cbb, ins);
12245 link_bblock (cfg, cfg->cbb, end_bblock);
12246 start_new_bblock = 1;
12247 /* This can complicate code generation for llvm since the return value might not be defined */
12248 if (COMPILE_LLVM (cfg))
12249 INLINE_FAILURE ("throw");
12251 case CEE_ENDFINALLY:
12252 /* mono_save_seq_point_info () depends on this */
12253 if (sp != stack_start)
12254 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12255 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12256 MONO_ADD_INS (cfg->cbb, ins);
12258 start_new_bblock = 1;
12261 * Control will leave the method so empty the stack, otherwise
12262 * the next basic block will start with a nonempty stack.
12264 while (sp != stack_start) {
12269 case CEE_LEAVE_S: {
12272 if (*ip == CEE_LEAVE) {
12274 target = ip + 5 + (gint32)read32(ip + 1);
12277 target = ip + 2 + (signed char)(ip [1]);
12280 /* empty the stack */
12281 while (sp != stack_start) {
12286 * If this leave statement is in a catch block, check for a
12287 * pending exception, and rethrow it if necessary.
12288 * We avoid doing this in runtime invoke wrappers, since those are called
12289 * by native code which excepts the wrapper to catch all exceptions.
12291 for (i = 0; i < header->num_clauses; ++i) {
12292 MonoExceptionClause *clause = &header->clauses [i];
12295 * Use <= in the final comparison to handle clauses with multiple
12296 * leave statements, like in bug #78024.
12297 * The ordering of the exception clauses guarantees that we find the
12298 * innermost clause.
12300 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12302 MonoBasicBlock *dont_throw;
12307 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12310 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12312 NEW_BBLOCK (cfg, dont_throw);
12315 * Currently, we always rethrow the abort exception, despite the
12316 * fact that this is not correct. See thread6.cs for an example.
12317 * But propagating the abort exception is more important than
12318 * getting the sematics right.
12320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12322 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12324 MONO_START_BB (cfg, dont_throw);
12329 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12332 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12334 MonoExceptionClause *clause;
12336 for (tmp = handlers; tmp; tmp = tmp->next) {
12337 clause = (MonoExceptionClause *)tmp->data;
12338 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12340 link_bblock (cfg, cfg->cbb, tblock);
12341 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12342 ins->inst_target_bb = tblock;
12343 ins->inst_eh_block = clause;
12344 MONO_ADD_INS (cfg->cbb, ins);
12345 cfg->cbb->has_call_handler = 1;
12346 if (COMPILE_LLVM (cfg)) {
12347 MonoBasicBlock *target_bb;
12350 * Link the finally bblock with the target, since it will
12351 * conceptually branch there.
12352 * FIXME: Have to link the bblock containing the endfinally.
12354 GET_BBLOCK (cfg, target_bb, target);
12355 link_bblock (cfg, tblock, target_bb);
12358 g_list_free (handlers);
12361 MONO_INST_NEW (cfg, ins, OP_BR);
12362 MONO_ADD_INS (cfg->cbb, ins);
12363 GET_BBLOCK (cfg, tblock, target);
12364 link_bblock (cfg, cfg->cbb, tblock);
12365 ins->inst_target_bb = tblock;
12367 start_new_bblock = 1;
12369 if (*ip == CEE_LEAVE)
12378 * Mono specific opcodes
12380 case MONO_CUSTOM_PREFIX: {
12382 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12386 case CEE_MONO_ICALL: {
12388 MonoJitICallInfo *info;
12390 token = read32 (ip + 2);
12391 func = mono_method_get_wrapper_data (method, token);
12392 info = mono_find_jit_icall_by_addr (func);
12394 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12397 CHECK_STACK (info->sig->param_count);
12398 sp -= info->sig->param_count;
12400 ins = mono_emit_jit_icall (cfg, info->func, sp);
12401 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12405 inline_costs += 10 * num_calls++;
12409 case CEE_MONO_LDPTR_CARD_TABLE:
12410 case CEE_MONO_LDPTR_NURSERY_START:
12411 case CEE_MONO_LDPTR_NURSERY_BITS:
12412 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12413 CHECK_STACK_OVF (1);
12416 case CEE_MONO_LDPTR_CARD_TABLE:
12417 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12419 case CEE_MONO_LDPTR_NURSERY_START:
12420 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12422 case CEE_MONO_LDPTR_NURSERY_BITS:
12423 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12425 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12426 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12432 inline_costs += 10 * num_calls++;
12435 case CEE_MONO_LDPTR: {
12438 CHECK_STACK_OVF (1);
12440 token = read32 (ip + 2);
12442 ptr = mono_method_get_wrapper_data (method, token);
12443 EMIT_NEW_PCONST (cfg, ins, ptr);
12446 inline_costs += 10 * num_calls++;
12447 /* Can't embed random pointers into AOT code */
12451 case CEE_MONO_JIT_ICALL_ADDR: {
12452 MonoJitICallInfo *callinfo;
12455 CHECK_STACK_OVF (1);
12457 token = read32 (ip + 2);
12459 ptr = mono_method_get_wrapper_data (method, token);
12460 callinfo = mono_find_jit_icall_by_addr (ptr);
12461 g_assert (callinfo);
12462 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12465 inline_costs += 10 * num_calls++;
12468 case CEE_MONO_ICALL_ADDR: {
12469 MonoMethod *cmethod;
12472 CHECK_STACK_OVF (1);
12474 token = read32 (ip + 2);
12476 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12478 if (cfg->compile_aot) {
12479 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12481 ptr = mono_lookup_internal_call (cmethod);
12483 EMIT_NEW_PCONST (cfg, ins, ptr);
12489 case CEE_MONO_VTADDR: {
12490 MonoInst *src_var, *src;
12496 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12497 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12502 case CEE_MONO_NEWOBJ: {
12503 MonoInst *iargs [2];
12505 CHECK_STACK_OVF (1);
12507 token = read32 (ip + 2);
12508 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12509 mono_class_init (klass);
12510 NEW_DOMAINCONST (cfg, iargs [0]);
12511 MONO_ADD_INS (cfg->cbb, iargs [0]);
12512 NEW_CLASSCONST (cfg, iargs [1], klass);
12513 MONO_ADD_INS (cfg->cbb, iargs [1]);
12514 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12516 inline_costs += 10 * num_calls++;
12519 case CEE_MONO_OBJADDR:
12522 MONO_INST_NEW (cfg, ins, OP_MOVE);
12523 ins->dreg = alloc_ireg_mp (cfg);
12524 ins->sreg1 = sp [0]->dreg;
12525 ins->type = STACK_MP;
12526 MONO_ADD_INS (cfg->cbb, ins);
12530 case CEE_MONO_LDNATIVEOBJ:
12532 * Similar to LDOBJ, but instead load the unmanaged
12533 * representation of the vtype to the stack.
12538 token = read32 (ip + 2);
12539 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12540 g_assert (klass->valuetype);
12541 mono_class_init (klass);
12544 MonoInst *src, *dest, *temp;
12547 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12548 temp->backend.is_pinvoke = 1;
12549 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12550 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12552 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12553 dest->type = STACK_VTYPE;
12554 dest->klass = klass;
12560 case CEE_MONO_RETOBJ: {
12562 * Same as RET, but return the native representation of a vtype
12565 g_assert (cfg->ret);
12566 g_assert (mono_method_signature (method)->pinvoke);
12571 token = read32 (ip + 2);
12572 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12574 if (!cfg->vret_addr) {
12575 g_assert (cfg->ret_var_is_local);
12577 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12579 EMIT_NEW_RETLOADA (cfg, ins);
12581 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12583 if (sp != stack_start)
12586 MONO_INST_NEW (cfg, ins, OP_BR);
12587 ins->inst_target_bb = end_bblock;
12588 MONO_ADD_INS (cfg->cbb, ins);
12589 link_bblock (cfg, cfg->cbb, end_bblock);
12590 start_new_bblock = 1;
12594 case CEE_MONO_CISINST:
12595 case CEE_MONO_CCASTCLASS: {
12600 token = read32 (ip + 2);
12601 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12602 if (ip [1] == CEE_MONO_CISINST)
12603 ins = handle_cisinst (cfg, klass, sp [0]);
12605 ins = handle_ccastclass (cfg, klass, sp [0]);
12610 case CEE_MONO_SAVE_LMF:
12611 case CEE_MONO_RESTORE_LMF:
12614 case CEE_MONO_CLASSCONST:
12615 CHECK_STACK_OVF (1);
12617 token = read32 (ip + 2);
12618 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12621 inline_costs += 10 * num_calls++;
12623 case CEE_MONO_NOT_TAKEN:
12624 cfg->cbb->out_of_line = TRUE;
12627 case CEE_MONO_TLS: {
12630 CHECK_STACK_OVF (1);
12632 key = (MonoTlsKey)read32 (ip + 2);
12633 g_assert (key < TLS_KEY_NUM);
12635 ins = mono_create_tls_get (cfg, key);
12637 if (cfg->compile_aot) {
12639 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12640 ins->dreg = alloc_preg (cfg);
12641 ins->type = STACK_PTR;
12643 g_assert_not_reached ();
12646 ins->type = STACK_PTR;
12647 MONO_ADD_INS (cfg->cbb, ins);
12652 case CEE_MONO_DYN_CALL: {
12653 MonoCallInst *call;
12655 /* It would be easier to call a trampoline, but that would put an
12656 * extra frame on the stack, confusing exception handling. So
12657 * implement it inline using an opcode for now.
12660 if (!cfg->dyn_call_var) {
12661 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12662 /* prevent it from being register allocated */
12663 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12666 /* Has to use a call inst since it local regalloc expects it */
12667 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12668 ins = (MonoInst*)call;
12670 ins->sreg1 = sp [0]->dreg;
12671 ins->sreg2 = sp [1]->dreg;
12672 MONO_ADD_INS (cfg->cbb, ins);
12674 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12677 inline_costs += 10 * num_calls++;
12681 case CEE_MONO_MEMORY_BARRIER: {
12683 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12687 case CEE_MONO_JIT_ATTACH: {
12688 MonoInst *args [16], *domain_ins;
12689 MonoInst *ad_ins, *jit_tls_ins;
12690 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12692 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12694 EMIT_NEW_PCONST (cfg, ins, NULL);
12695 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12697 ad_ins = mono_get_domain_intrinsic (cfg);
12698 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12700 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12701 NEW_BBLOCK (cfg, next_bb);
12702 NEW_BBLOCK (cfg, call_bb);
12704 if (cfg->compile_aot) {
12705 /* AOT code is only used in the root domain */
12706 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12708 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12710 MONO_ADD_INS (cfg->cbb, ad_ins);
12711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12714 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12716 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12719 MONO_START_BB (cfg, call_bb);
12722 if (cfg->compile_aot) {
12723 /* AOT code is only used in the root domain */
12724 EMIT_NEW_PCONST (cfg, args [0], NULL);
12726 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12728 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12729 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12732 MONO_START_BB (cfg, next_bb);
12736 case CEE_MONO_JIT_DETACH: {
12737 MonoInst *args [16];
12739 /* Restore the original domain */
12740 dreg = alloc_ireg (cfg);
12741 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12742 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12747 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12753 case CEE_PREFIX1: {
12756 case CEE_ARGLIST: {
12757 /* somewhat similar to LDTOKEN */
12758 MonoInst *addr, *vtvar;
12759 CHECK_STACK_OVF (1);
12760 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12762 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12763 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12765 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12766 ins->type = STACK_VTYPE;
12767 ins->klass = mono_defaults.argumenthandle_class;
12777 MonoInst *cmp, *arg1, *arg2;
12785 * The following transforms:
12786 * CEE_CEQ into OP_CEQ
12787 * CEE_CGT into OP_CGT
12788 * CEE_CGT_UN into OP_CGT_UN
12789 * CEE_CLT into OP_CLT
12790 * CEE_CLT_UN into OP_CLT_UN
12792 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12794 MONO_INST_NEW (cfg, ins, cmp->opcode);
12795 cmp->sreg1 = arg1->dreg;
12796 cmp->sreg2 = arg2->dreg;
12797 type_from_op (cfg, cmp, arg1, arg2);
12799 add_widen_op (cfg, cmp, &arg1, &arg2);
12800 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12801 cmp->opcode = OP_LCOMPARE;
12802 else if (arg1->type == STACK_R4)
12803 cmp->opcode = OP_RCOMPARE;
12804 else if (arg1->type == STACK_R8)
12805 cmp->opcode = OP_FCOMPARE;
12807 cmp->opcode = OP_ICOMPARE;
12808 MONO_ADD_INS (cfg->cbb, cmp);
12809 ins->type = STACK_I4;
12810 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12811 type_from_op (cfg, ins, arg1, arg2);
12813 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12815 * The backends expect the fceq opcodes to do the
12818 ins->sreg1 = cmp->sreg1;
12819 ins->sreg2 = cmp->sreg2;
12822 MONO_ADD_INS (cfg->cbb, ins);
12828 MonoInst *argconst;
12829 MonoMethod *cil_method;
12831 CHECK_STACK_OVF (1);
12833 n = read32 (ip + 2);
12834 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12835 if (!cmethod || mono_loader_get_last_error ())
12837 mono_class_init (cmethod->klass);
12839 mono_save_token_info (cfg, image, n, cmethod);
12841 context_used = mini_method_check_context_used (cfg, cmethod);
12843 cil_method = cmethod;
12844 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12845 METHOD_ACCESS_FAILURE (method, cil_method);
12847 if (mono_security_core_clr_enabled ())
12848 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12851 * Optimize the common case of ldftn+delegate creation
12853 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12854 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12855 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12856 MonoInst *target_ins, *handle_ins;
12857 MonoMethod *invoke;
12858 int invoke_context_used;
12860 invoke = mono_get_delegate_invoke (ctor_method->klass);
12861 if (!invoke || !mono_method_signature (invoke))
12864 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12866 target_ins = sp [-1];
12868 if (mono_security_core_clr_enabled ())
12869 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12871 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12872 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12873 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12874 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12875 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12879 /* FIXME: SGEN support */
12880 if (invoke_context_used == 0 || cfg->llvm_only) {
12882 if (cfg->verbose_level > 3)
12883 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12884 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12887 CHECK_CFG_EXCEPTION;
12897 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12898 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12902 inline_costs += 10 * num_calls++;
12905 case CEE_LDVIRTFTN: {
12906 MonoInst *args [2];
12910 n = read32 (ip + 2);
12911 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12912 if (!cmethod || mono_loader_get_last_error ())
12914 mono_class_init (cmethod->klass);
12916 context_used = mini_method_check_context_used (cfg, cmethod);
12918 if (mono_security_core_clr_enabled ())
12919 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12922 * Optimize the common case of ldvirtftn+delegate creation
12924 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12925 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12926 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12927 MonoInst *target_ins, *handle_ins;
12928 MonoMethod *invoke;
12929 int invoke_context_used;
12930 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12932 invoke = mono_get_delegate_invoke (ctor_method->klass);
12933 if (!invoke || !mono_method_signature (invoke))
12936 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12938 target_ins = sp [-1];
12940 if (mono_security_core_clr_enabled ())
12941 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12943 /* FIXME: SGEN support */
12944 if (invoke_context_used == 0 || cfg->llvm_only) {
12946 if (cfg->verbose_level > 3)
12947 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12948 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12951 CHECK_CFG_EXCEPTION;
12964 args [1] = emit_get_rgctx_method (cfg, context_used,
12965 cmethod, MONO_RGCTX_INFO_METHOD);
12968 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12970 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12973 inline_costs += 10 * num_calls++;
12977 CHECK_STACK_OVF (1);
12979 n = read16 (ip + 2);
12981 EMIT_NEW_ARGLOAD (cfg, ins, n);
12986 CHECK_STACK_OVF (1);
12988 n = read16 (ip + 2);
12990 NEW_ARGLOADA (cfg, ins, n);
12991 MONO_ADD_INS (cfg->cbb, ins);
12999 n = read16 (ip + 2);
13001 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13003 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13007 CHECK_STACK_OVF (1);
13009 n = read16 (ip + 2);
13011 EMIT_NEW_LOCLOAD (cfg, ins, n);
13016 unsigned char *tmp_ip;
13017 CHECK_STACK_OVF (1);
13019 n = read16 (ip + 2);
13022 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13028 EMIT_NEW_LOCLOADA (cfg, ins, n);
13037 n = read16 (ip + 2);
13039 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13041 emit_stloc_ir (cfg, sp, header, n);
13048 if (sp != stack_start)
13050 if (cfg->method != method)
13052 * Inlining this into a loop in a parent could lead to
13053 * stack overflows which is different behavior than the
13054 * non-inlined case, thus disable inlining in this case.
13056 INLINE_FAILURE("localloc");
13058 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13059 ins->dreg = alloc_preg (cfg);
13060 ins->sreg1 = sp [0]->dreg;
13061 ins->type = STACK_PTR;
13062 MONO_ADD_INS (cfg->cbb, ins);
13064 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13066 ins->flags |= MONO_INST_INIT;
13071 case CEE_ENDFILTER: {
13072 MonoExceptionClause *clause, *nearest;
13077 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13079 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13080 ins->sreg1 = (*sp)->dreg;
13081 MONO_ADD_INS (cfg->cbb, ins);
13082 start_new_bblock = 1;
13086 for (cc = 0; cc < header->num_clauses; ++cc) {
13087 clause = &header->clauses [cc];
13088 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13089 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13090 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13093 g_assert (nearest);
13094 if ((ip - header->code) != nearest->handler_offset)
13099 case CEE_UNALIGNED_:
13100 ins_flag |= MONO_INST_UNALIGNED;
13101 /* FIXME: record alignment? we can assume 1 for now */
13105 case CEE_VOLATILE_:
13106 ins_flag |= MONO_INST_VOLATILE;
13110 ins_flag |= MONO_INST_TAILCALL;
13111 cfg->flags |= MONO_CFG_HAS_TAIL;
13112 /* Can't inline tail calls at this time */
13113 inline_costs += 100000;
13120 token = read32 (ip + 2);
13121 klass = mini_get_class (method, token, generic_context);
13122 CHECK_TYPELOAD (klass);
13123 if (generic_class_is_reference_type (cfg, klass))
13124 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13126 mini_emit_initobj (cfg, *sp, NULL, klass);
13130 case CEE_CONSTRAINED_:
13132 token = read32 (ip + 2);
13133 constrained_class = mini_get_class (method, token, generic_context);
13134 CHECK_TYPELOAD (constrained_class);
13138 case CEE_INITBLK: {
13139 MonoInst *iargs [3];
13143 /* Skip optimized paths for volatile operations. */
13144 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13145 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13146 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13147 /* emit_memset only works when val == 0 */
13148 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13151 iargs [0] = sp [0];
13152 iargs [1] = sp [1];
13153 iargs [2] = sp [2];
13154 if (ip [1] == CEE_CPBLK) {
13156 * FIXME: It's unclear whether we should be emitting both the acquire
13157 * and release barriers for cpblk. It is technically both a load and
13158 * store operation, so it seems like that's the sensible thing to do.
13160 * FIXME: We emit full barriers on both sides of the operation for
13161 * simplicity. We should have a separate atomic memcpy method instead.
13163 MonoMethod *memcpy_method = get_memcpy_method ();
13165 if (ins_flag & MONO_INST_VOLATILE)
13166 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13168 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13169 call->flags |= ins_flag;
13171 if (ins_flag & MONO_INST_VOLATILE)
13172 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13174 MonoMethod *memset_method = get_memset_method ();
13175 if (ins_flag & MONO_INST_VOLATILE) {
13176 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13177 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13179 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13180 call->flags |= ins_flag;
13191 ins_flag |= MONO_INST_NOTYPECHECK;
13193 ins_flag |= MONO_INST_NORANGECHECK;
13194 /* we ignore the no-nullcheck for now since we
13195 * really do it explicitly only when doing callvirt->call
13199 case CEE_RETHROW: {
13201 int handler_offset = -1;
13203 for (i = 0; i < header->num_clauses; ++i) {
13204 MonoExceptionClause *clause = &header->clauses [i];
13205 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13206 handler_offset = clause->handler_offset;
13211 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13213 if (handler_offset == -1)
13216 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13217 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13218 ins->sreg1 = load->dreg;
13219 MONO_ADD_INS (cfg->cbb, ins);
13221 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13222 MONO_ADD_INS (cfg->cbb, ins);
13225 link_bblock (cfg, cfg->cbb, end_bblock);
13226 start_new_bblock = 1;
13234 CHECK_STACK_OVF (1);
13236 token = read32 (ip + 2);
13237 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13238 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13241 val = mono_type_size (type, &ialign);
13243 MonoClass *klass = mini_get_class (method, token, generic_context);
13244 CHECK_TYPELOAD (klass);
13246 val = mono_type_size (&klass->byval_arg, &ialign);
13248 if (mini_is_gsharedvt_klass (klass))
13249 GSHAREDVT_FAILURE (*ip);
13251 EMIT_NEW_ICONST (cfg, ins, val);
13256 case CEE_REFANYTYPE: {
13257 MonoInst *src_var, *src;
13259 GSHAREDVT_FAILURE (*ip);
13265 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13267 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13268 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13269 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13274 case CEE_READONLY_:
13287 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13297 g_warning ("opcode 0x%02x not handled", *ip);
13301 if (start_new_bblock != 1)
13304 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13305 if (cfg->cbb->next_bb) {
13306 /* This could already be set because of inlining, #693905 */
13307 MonoBasicBlock *bb = cfg->cbb;
13309 while (bb->next_bb)
13311 bb->next_bb = end_bblock;
13313 cfg->cbb->next_bb = end_bblock;
13316 if (cfg->method == method && cfg->domainvar) {
13318 MonoInst *get_domain;
13320 cfg->cbb = init_localsbb;
13322 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13323 MONO_ADD_INS (cfg->cbb, get_domain);
13325 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13327 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13328 MONO_ADD_INS (cfg->cbb, store);
13331 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13332 if (cfg->compile_aot)
13333 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13334 mono_get_got_var (cfg);
13337 if (cfg->method == method && cfg->got_var)
13338 mono_emit_load_got_addr (cfg);
13340 if (init_localsbb) {
13341 cfg->cbb = init_localsbb;
13343 for (i = 0; i < header->num_locals; ++i) {
13344 emit_init_local (cfg, i, header->locals [i], init_locals);
13348 if (cfg->init_ref_vars && cfg->method == method) {
13349 /* Emit initialization for ref vars */
13350 // FIXME: Avoid duplication initialization for IL locals.
13351 for (i = 0; i < cfg->num_varinfo; ++i) {
13352 MonoInst *ins = cfg->varinfo [i];
13354 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13355 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13359 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13360 cfg->cbb = init_localsbb;
13361 emit_push_lmf (cfg);
13364 cfg->cbb = init_localsbb;
13365 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13368 MonoBasicBlock *bb;
13371 * Make seq points at backward branch targets interruptable.
13373 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13374 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13375 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13378 /* Add a sequence point for method entry/exit events */
13379 if (seq_points && cfg->gen_sdb_seq_points) {
13380 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13381 MONO_ADD_INS (init_localsbb, ins);
13382 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13383 MONO_ADD_INS (cfg->bb_exit, ins);
13387 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13388 * the code they refer to was dead (#11880).
13390 if (sym_seq_points) {
13391 for (i = 0; i < header->code_size; ++i) {
13392 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13395 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13396 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13403 if (cfg->method == method) {
13404 MonoBasicBlock *bb;
13405 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13406 bb->region = mono_find_block_region (cfg, bb->real_offset);
13408 mono_create_spvar_for_region (cfg, bb->region);
13409 if (cfg->verbose_level > 2)
13410 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13414 if (inline_costs < 0) {
13417 /* Method is too large */
13418 mname = mono_method_full_name (method, TRUE);
13419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13420 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13424 if ((cfg->verbose_level > 2) && (cfg->method == method))
13425 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13430 g_assert (!mono_error_ok (&cfg->error));
13434 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13438 set_exception_type_from_invalid_il (cfg, method, ip);
13442 g_slist_free (class_inits);
13443 mono_basic_block_free (original_bb);
13444 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13445 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13446 if (cfg->exception_type)
13449 return inline_costs;
13453 store_membase_reg_to_store_membase_imm (int opcode)
13456 case OP_STORE_MEMBASE_REG:
13457 return OP_STORE_MEMBASE_IMM;
13458 case OP_STOREI1_MEMBASE_REG:
13459 return OP_STOREI1_MEMBASE_IMM;
13460 case OP_STOREI2_MEMBASE_REG:
13461 return OP_STOREI2_MEMBASE_IMM;
13462 case OP_STOREI4_MEMBASE_REG:
13463 return OP_STOREI4_MEMBASE_IMM;
13464 case OP_STOREI8_MEMBASE_REG:
13465 return OP_STOREI8_MEMBASE_IMM;
13467 g_assert_not_reached ();
13474 mono_op_to_op_imm (int opcode)
13478 return OP_IADD_IMM;
13480 return OP_ISUB_IMM;
13482 return OP_IDIV_IMM;
13484 return OP_IDIV_UN_IMM;
13486 return OP_IREM_IMM;
13488 return OP_IREM_UN_IMM;
13490 return OP_IMUL_IMM;
13492 return OP_IAND_IMM;
13496 return OP_IXOR_IMM;
13498 return OP_ISHL_IMM;
13500 return OP_ISHR_IMM;
13502 return OP_ISHR_UN_IMM;
13505 return OP_LADD_IMM;
13507 return OP_LSUB_IMM;
13509 return OP_LAND_IMM;
13513 return OP_LXOR_IMM;
13515 return OP_LSHL_IMM;
13517 return OP_LSHR_IMM;
13519 return OP_LSHR_UN_IMM;
13520 #if SIZEOF_REGISTER == 8
13522 return OP_LREM_IMM;
13526 return OP_COMPARE_IMM;
13528 return OP_ICOMPARE_IMM;
13530 return OP_LCOMPARE_IMM;
13532 case OP_STORE_MEMBASE_REG:
13533 return OP_STORE_MEMBASE_IMM;
13534 case OP_STOREI1_MEMBASE_REG:
13535 return OP_STOREI1_MEMBASE_IMM;
13536 case OP_STOREI2_MEMBASE_REG:
13537 return OP_STOREI2_MEMBASE_IMM;
13538 case OP_STOREI4_MEMBASE_REG:
13539 return OP_STOREI4_MEMBASE_IMM;
13541 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13543 return OP_X86_PUSH_IMM;
13544 case OP_X86_COMPARE_MEMBASE_REG:
13545 return OP_X86_COMPARE_MEMBASE_IMM;
13547 #if defined(TARGET_AMD64)
13548 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13549 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13551 case OP_VOIDCALL_REG:
13552 return OP_VOIDCALL;
13560 return OP_LOCALLOC_IMM;
13567 ldind_to_load_membase (int opcode)
13571 return OP_LOADI1_MEMBASE;
13573 return OP_LOADU1_MEMBASE;
13575 return OP_LOADI2_MEMBASE;
13577 return OP_LOADU2_MEMBASE;
13579 return OP_LOADI4_MEMBASE;
13581 return OP_LOADU4_MEMBASE;
13583 return OP_LOAD_MEMBASE;
13584 case CEE_LDIND_REF:
13585 return OP_LOAD_MEMBASE;
13587 return OP_LOADI8_MEMBASE;
13589 return OP_LOADR4_MEMBASE;
13591 return OP_LOADR8_MEMBASE;
13593 g_assert_not_reached ();
13600 stind_to_store_membase (int opcode)
13604 return OP_STOREI1_MEMBASE_REG;
13606 return OP_STOREI2_MEMBASE_REG;
13608 return OP_STOREI4_MEMBASE_REG;
13610 case CEE_STIND_REF:
13611 return OP_STORE_MEMBASE_REG;
13613 return OP_STOREI8_MEMBASE_REG;
13615 return OP_STORER4_MEMBASE_REG;
13617 return OP_STORER8_MEMBASE_REG;
13619 g_assert_not_reached ();
13626 mono_load_membase_to_load_mem (int opcode)
13628 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13629 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13631 case OP_LOAD_MEMBASE:
13632 return OP_LOAD_MEM;
13633 case OP_LOADU1_MEMBASE:
13634 return OP_LOADU1_MEM;
13635 case OP_LOADU2_MEMBASE:
13636 return OP_LOADU2_MEM;
13637 case OP_LOADI4_MEMBASE:
13638 return OP_LOADI4_MEM;
13639 case OP_LOADU4_MEMBASE:
13640 return OP_LOADU4_MEM;
13641 #if SIZEOF_REGISTER == 8
13642 case OP_LOADI8_MEMBASE:
13643 return OP_LOADI8_MEM;
13652 op_to_op_dest_membase (int store_opcode, int opcode)
13654 #if defined(TARGET_X86)
13655 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13660 return OP_X86_ADD_MEMBASE_REG;
13662 return OP_X86_SUB_MEMBASE_REG;
13664 return OP_X86_AND_MEMBASE_REG;
13666 return OP_X86_OR_MEMBASE_REG;
13668 return OP_X86_XOR_MEMBASE_REG;
13671 return OP_X86_ADD_MEMBASE_IMM;
13674 return OP_X86_SUB_MEMBASE_IMM;
13677 return OP_X86_AND_MEMBASE_IMM;
13680 return OP_X86_OR_MEMBASE_IMM;
13683 return OP_X86_XOR_MEMBASE_IMM;
13689 #if defined(TARGET_AMD64)
13690 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13695 return OP_X86_ADD_MEMBASE_REG;
13697 return OP_X86_SUB_MEMBASE_REG;
13699 return OP_X86_AND_MEMBASE_REG;
13701 return OP_X86_OR_MEMBASE_REG;
13703 return OP_X86_XOR_MEMBASE_REG;
13705 return OP_X86_ADD_MEMBASE_IMM;
13707 return OP_X86_SUB_MEMBASE_IMM;
13709 return OP_X86_AND_MEMBASE_IMM;
13711 return OP_X86_OR_MEMBASE_IMM;
13713 return OP_X86_XOR_MEMBASE_IMM;
13715 return OP_AMD64_ADD_MEMBASE_REG;
13717 return OP_AMD64_SUB_MEMBASE_REG;
13719 return OP_AMD64_AND_MEMBASE_REG;
13721 return OP_AMD64_OR_MEMBASE_REG;
13723 return OP_AMD64_XOR_MEMBASE_REG;
13726 return OP_AMD64_ADD_MEMBASE_IMM;
13729 return OP_AMD64_SUB_MEMBASE_IMM;
13732 return OP_AMD64_AND_MEMBASE_IMM;
13735 return OP_AMD64_OR_MEMBASE_IMM;
13738 return OP_AMD64_XOR_MEMBASE_IMM;
13748 op_to_op_store_membase (int store_opcode, int opcode)
13750 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13753 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13754 return OP_X86_SETEQ_MEMBASE;
13756 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13757 return OP_X86_SETNE_MEMBASE;
13765 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13768 /* FIXME: This has sign extension issues */
13770 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13771 return OP_X86_COMPARE_MEMBASE8_IMM;
13774 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13779 return OP_X86_PUSH_MEMBASE;
13780 case OP_COMPARE_IMM:
13781 case OP_ICOMPARE_IMM:
13782 return OP_X86_COMPARE_MEMBASE_IMM;
13785 return OP_X86_COMPARE_MEMBASE_REG;
13789 #ifdef TARGET_AMD64
13790 /* FIXME: This has sign extension issues */
13792 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13793 return OP_X86_COMPARE_MEMBASE8_IMM;
13798 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13799 return OP_X86_PUSH_MEMBASE;
13801 /* FIXME: This only works for 32 bit immediates
13802 case OP_COMPARE_IMM:
13803 case OP_LCOMPARE_IMM:
13804 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13805 return OP_AMD64_COMPARE_MEMBASE_IMM;
13807 case OP_ICOMPARE_IMM:
13808 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13809 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13813 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13814 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13815 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13816 return OP_AMD64_COMPARE_MEMBASE_REG;
13819 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13820 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13829 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13832 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13838 return OP_X86_COMPARE_REG_MEMBASE;
13840 return OP_X86_ADD_REG_MEMBASE;
13842 return OP_X86_SUB_REG_MEMBASE;
13844 return OP_X86_AND_REG_MEMBASE;
13846 return OP_X86_OR_REG_MEMBASE;
13848 return OP_X86_XOR_REG_MEMBASE;
13852 #ifdef TARGET_AMD64
13853 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13856 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13858 return OP_X86_ADD_REG_MEMBASE;
13860 return OP_X86_SUB_REG_MEMBASE;
13862 return OP_X86_AND_REG_MEMBASE;
13864 return OP_X86_OR_REG_MEMBASE;
13866 return OP_X86_XOR_REG_MEMBASE;
13868 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13872 return OP_AMD64_COMPARE_REG_MEMBASE;
13874 return OP_AMD64_ADD_REG_MEMBASE;
13876 return OP_AMD64_SUB_REG_MEMBASE;
13878 return OP_AMD64_AND_REG_MEMBASE;
13880 return OP_AMD64_OR_REG_MEMBASE;
13882 return OP_AMD64_XOR_REG_MEMBASE;
13891 mono_op_to_op_imm_noemul (int opcode)
13894 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13900 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13907 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13912 return mono_op_to_op_imm (opcode);
13917 * mono_handle_global_vregs:
13919 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13923 mono_handle_global_vregs (MonoCompile *cfg)
13925 gint32 *vreg_to_bb;
13926 MonoBasicBlock *bb;
13929 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13931 #ifdef MONO_ARCH_SIMD_INTRINSICS
13932 if (cfg->uses_simd_intrinsics)
13933 mono_simd_simplify_indirection (cfg);
13936 /* Find local vregs used in more than one bb */
13937 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13938 MonoInst *ins = bb->code;
13939 int block_num = bb->block_num;
13941 if (cfg->verbose_level > 2)
13942 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13945 for (; ins; ins = ins->next) {
13946 const char *spec = INS_INFO (ins->opcode);
13947 int regtype = 0, regindex;
13950 if (G_UNLIKELY (cfg->verbose_level > 2))
13951 mono_print_ins (ins);
13953 g_assert (ins->opcode >= MONO_CEE_LAST);
13955 for (regindex = 0; regindex < 4; regindex ++) {
13958 if (regindex == 0) {
13959 regtype = spec [MONO_INST_DEST];
13960 if (regtype == ' ')
13963 } else if (regindex == 1) {
13964 regtype = spec [MONO_INST_SRC1];
13965 if (regtype == ' ')
13968 } else if (regindex == 2) {
13969 regtype = spec [MONO_INST_SRC2];
13970 if (regtype == ' ')
13973 } else if (regindex == 3) {
13974 regtype = spec [MONO_INST_SRC3];
13975 if (regtype == ' ')
13980 #if SIZEOF_REGISTER == 4
13981 /* In the LLVM case, the long opcodes are not decomposed */
13982 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13984 * Since some instructions reference the original long vreg,
13985 * and some reference the two component vregs, it is quite hard
13986 * to determine when it needs to be global. So be conservative.
13988 if (!get_vreg_to_inst (cfg, vreg)) {
13989 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13991 if (cfg->verbose_level > 2)
13992 printf ("LONG VREG R%d made global.\n", vreg);
13996 * Make the component vregs volatile since the optimizations can
13997 * get confused otherwise.
13999 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
14000 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
14004 g_assert (vreg != -1);
14006 prev_bb = vreg_to_bb [vreg];
14007 if (prev_bb == 0) {
14008 /* 0 is a valid block num */
14009 vreg_to_bb [vreg] = block_num + 1;
14010 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14011 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14014 if (!get_vreg_to_inst (cfg, vreg)) {
14015 if (G_UNLIKELY (cfg->verbose_level > 2))
14016 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14020 if (vreg_is_ref (cfg, vreg))
14021 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14023 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14026 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14029 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14032 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14035 g_assert_not_reached ();
14039 /* Flag as having been used in more than one bb */
14040 vreg_to_bb [vreg] = -1;
14046 /* If a variable is used in only one bblock, convert it into a local vreg */
14047 for (i = 0; i < cfg->num_varinfo; i++) {
14048 MonoInst *var = cfg->varinfo [i];
14049 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14051 switch (var->type) {
14057 #if SIZEOF_REGISTER == 8
14060 #if !defined(TARGET_X86)
14061 /* Enabling this screws up the fp stack on x86 */
14064 if (mono_arch_is_soft_float ())
14068 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14072 /* Arguments are implicitly global */
14073 /* Putting R4 vars into registers doesn't work currently */
14074 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14075 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14077 * Make that the variable's liveness interval doesn't contain a call, since
14078 * that would cause the lvreg to be spilled, making the whole optimization
14081 /* This is too slow for JIT compilation */
14083 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14085 int def_index, call_index, ins_index;
14086 gboolean spilled = FALSE;
14091 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14092 const char *spec = INS_INFO (ins->opcode);
14094 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14095 def_index = ins_index;
14097 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14098 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14099 if (call_index > def_index) {
14105 if (MONO_IS_CALL (ins))
14106 call_index = ins_index;
14116 if (G_UNLIKELY (cfg->verbose_level > 2))
14117 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14118 var->flags |= MONO_INST_IS_DEAD;
14119 cfg->vreg_to_inst [var->dreg] = NULL;
14126 * Compress the varinfo and vars tables so the liveness computation is faster and
14127 * takes up less space.
14130 for (i = 0; i < cfg->num_varinfo; ++i) {
14131 MonoInst *var = cfg->varinfo [i];
14132 if (pos < i && cfg->locals_start == i)
14133 cfg->locals_start = pos;
14134 if (!(var->flags & MONO_INST_IS_DEAD)) {
14136 cfg->varinfo [pos] = cfg->varinfo [i];
14137 cfg->varinfo [pos]->inst_c0 = pos;
14138 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14139 cfg->vars [pos].idx = pos;
14140 #if SIZEOF_REGISTER == 4
14141 if (cfg->varinfo [pos]->type == STACK_I8) {
14142 /* Modify the two component vars too */
14145 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
14146 var1->inst_c0 = pos;
14147 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
14148 var1->inst_c0 = pos;
14155 cfg->num_varinfo = pos;
14156 if (cfg->locals_start > cfg->num_varinfo)
14157 cfg->locals_start = cfg->num_varinfo;
14161 * mono_allocate_gsharedvt_vars:
14163 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14164 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14167 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14171 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14173 for (i = 0; i < cfg->num_varinfo; ++i) {
14174 MonoInst *ins = cfg->varinfo [i];
14177 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14178 if (i >= cfg->locals_start) {
14180 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14181 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14182 ins->opcode = OP_GSHAREDVT_LOCAL;
14183 ins->inst_imm = idx;
14186 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14187 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14194 * mono_spill_global_vars:
14196 * Generate spill code for variables which are not allocated to registers,
14197 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14198 * code is generated which could be optimized by the local optimization passes.
14201 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14203 MonoBasicBlock *bb;
14205 int orig_next_vreg;
14206 guint32 *vreg_to_lvreg;
14208 guint32 i, lvregs_len;
14209 gboolean dest_has_lvreg = FALSE;
14210 MonoStackType stacktypes [128];
14211 MonoInst **live_range_start, **live_range_end;
14212 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14214 *need_local_opts = FALSE;
14216 memset (spec2, 0, sizeof (spec2));
14218 /* FIXME: Move this function to mini.c */
14219 stacktypes ['i'] = STACK_PTR;
14220 stacktypes ['l'] = STACK_I8;
14221 stacktypes ['f'] = STACK_R8;
14222 #ifdef MONO_ARCH_SIMD_INTRINSICS
14223 stacktypes ['x'] = STACK_VTYPE;
14226 #if SIZEOF_REGISTER == 4
14227 /* Create MonoInsts for longs */
14228 for (i = 0; i < cfg->num_varinfo; i++) {
14229 MonoInst *ins = cfg->varinfo [i];
14231 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14232 switch (ins->type) {
14237 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14240 g_assert (ins->opcode == OP_REGOFFSET);
14242 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
14244 tree->opcode = OP_REGOFFSET;
14245 tree->inst_basereg = ins->inst_basereg;
14246 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14248 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
14250 tree->opcode = OP_REGOFFSET;
14251 tree->inst_basereg = ins->inst_basereg;
14252 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14262 if (cfg->compute_gc_maps) {
14263 /* registers need liveness info even for !non refs */
14264 for (i = 0; i < cfg->num_varinfo; i++) {
14265 MonoInst *ins = cfg->varinfo [i];
14267 if (ins->opcode == OP_REGVAR)
14268 ins->flags |= MONO_INST_GC_TRACK;
14272 /* FIXME: widening and truncation */
14275 * As an optimization, when a variable allocated to the stack is first loaded into
14276 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14277 * the variable again.
14279 orig_next_vreg = cfg->next_vreg;
14280 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14281 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14285 * These arrays contain the first and last instructions accessing a given
14287 * Since we emit bblocks in the same order we process them here, and we
14288 * don't split live ranges, these will precisely describe the live range of
14289 * the variable, i.e. the instruction range where a valid value can be found
14290 * in the variables location.
14291 * The live range is computed using the liveness info computed by the liveness pass.
14292 * We can't use vmv->range, since that is an abstract live range, and we need
14293 * one which is instruction precise.
14294 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14296 /* FIXME: Only do this if debugging info is requested */
14297 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14298 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14299 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14300 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14302 /* Add spill loads/stores */
14303 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14306 if (cfg->verbose_level > 2)
14307 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14309 /* Clear vreg_to_lvreg array */
14310 for (i = 0; i < lvregs_len; i++)
14311 vreg_to_lvreg [lvregs [i]] = 0;
14315 MONO_BB_FOR_EACH_INS (bb, ins) {
14316 const char *spec = INS_INFO (ins->opcode);
14317 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14318 gboolean store, no_lvreg;
14319 int sregs [MONO_MAX_SRC_REGS];
14321 if (G_UNLIKELY (cfg->verbose_level > 2))
14322 mono_print_ins (ins);
14324 if (ins->opcode == OP_NOP)
14328 * We handle LDADDR here as well, since it can only be decomposed
14329 * when variable addresses are known.
14331 if (ins->opcode == OP_LDADDR) {
14332 MonoInst *var = (MonoInst *)ins->inst_p0;
14334 if (var->opcode == OP_VTARG_ADDR) {
14335 /* Happens on SPARC/S390 where vtypes are passed by reference */
14336 MonoInst *vtaddr = var->inst_left;
14337 if (vtaddr->opcode == OP_REGVAR) {
14338 ins->opcode = OP_MOVE;
14339 ins->sreg1 = vtaddr->dreg;
14341 else if (var->inst_left->opcode == OP_REGOFFSET) {
14342 ins->opcode = OP_LOAD_MEMBASE;
14343 ins->inst_basereg = vtaddr->inst_basereg;
14344 ins->inst_offset = vtaddr->inst_offset;
14347 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14348 /* gsharedvt arg passed by ref */
14349 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14351 ins->opcode = OP_LOAD_MEMBASE;
14352 ins->inst_basereg = var->inst_basereg;
14353 ins->inst_offset = var->inst_offset;
14354 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14355 MonoInst *load, *load2, *load3;
14356 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14357 int reg1, reg2, reg3;
14358 MonoInst *info_var = cfg->gsharedvt_info_var;
14359 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14363 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14366 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14368 g_assert (info_var);
14369 g_assert (locals_var);
14371 /* Mark the instruction used to compute the locals var as used */
14372 cfg->gsharedvt_locals_var_ins = NULL;
14374 /* Load the offset */
14375 if (info_var->opcode == OP_REGOFFSET) {
14376 reg1 = alloc_ireg (cfg);
14377 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14378 } else if (info_var->opcode == OP_REGVAR) {
14380 reg1 = info_var->dreg;
14382 g_assert_not_reached ();
14384 reg2 = alloc_ireg (cfg);
14385 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14386 /* Load the locals area address */
14387 reg3 = alloc_ireg (cfg);
14388 if (locals_var->opcode == OP_REGOFFSET) {
14389 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14390 } else if (locals_var->opcode == OP_REGVAR) {
14391 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14393 g_assert_not_reached ();
14395 /* Compute the address */
14396 ins->opcode = OP_PADD;
14400 mono_bblock_insert_before_ins (bb, ins, load3);
14401 mono_bblock_insert_before_ins (bb, load3, load2);
14403 mono_bblock_insert_before_ins (bb, load2, load);
14405 g_assert (var->opcode == OP_REGOFFSET);
14407 ins->opcode = OP_ADD_IMM;
14408 ins->sreg1 = var->inst_basereg;
14409 ins->inst_imm = var->inst_offset;
14412 *need_local_opts = TRUE;
14413 spec = INS_INFO (ins->opcode);
14416 if (ins->opcode < MONO_CEE_LAST) {
14417 mono_print_ins (ins);
14418 g_assert_not_reached ();
14422 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14426 if (MONO_IS_STORE_MEMBASE (ins)) {
14427 tmp_reg = ins->dreg;
14428 ins->dreg = ins->sreg2;
14429 ins->sreg2 = tmp_reg;
14432 spec2 [MONO_INST_DEST] = ' ';
14433 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14434 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14435 spec2 [MONO_INST_SRC3] = ' ';
14437 } else if (MONO_IS_STORE_MEMINDEX (ins))
14438 g_assert_not_reached ();
14443 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14444 printf ("\t %.3s %d", spec, ins->dreg);
14445 num_sregs = mono_inst_get_src_registers (ins, sregs);
14446 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14447 printf (" %d", sregs [srcindex]);
14454 regtype = spec [MONO_INST_DEST];
14455 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14458 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14459 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14460 MonoInst *store_ins;
14462 MonoInst *def_ins = ins;
14463 int dreg = ins->dreg; /* The original vreg */
14465 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14467 if (var->opcode == OP_REGVAR) {
14468 ins->dreg = var->dreg;
14469 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14471 * Instead of emitting a load+store, use a _membase opcode.
14473 g_assert (var->opcode == OP_REGOFFSET);
14474 if (ins->opcode == OP_MOVE) {
14478 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14479 ins->inst_basereg = var->inst_basereg;
14480 ins->inst_offset = var->inst_offset;
14483 spec = INS_INFO (ins->opcode);
14487 g_assert (var->opcode == OP_REGOFFSET);
14489 prev_dreg = ins->dreg;
14491 /* Invalidate any previous lvreg for this vreg */
14492 vreg_to_lvreg [ins->dreg] = 0;
14496 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14498 store_opcode = OP_STOREI8_MEMBASE_REG;
14501 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14503 #if SIZEOF_REGISTER != 8
14504 if (regtype == 'l') {
14505 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14506 mono_bblock_insert_after_ins (bb, ins, store_ins);
14507 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14508 mono_bblock_insert_after_ins (bb, ins, store_ins);
14509 def_ins = store_ins;
14514 g_assert (store_opcode != OP_STOREV_MEMBASE);
14516 /* Try to fuse the store into the instruction itself */
14517 /* FIXME: Add more instructions */
14518 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14519 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14520 ins->inst_imm = ins->inst_c0;
14521 ins->inst_destbasereg = var->inst_basereg;
14522 ins->inst_offset = var->inst_offset;
14523 spec = INS_INFO (ins->opcode);
14524 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14525 ins->opcode = store_opcode;
14526 ins->inst_destbasereg = var->inst_basereg;
14527 ins->inst_offset = var->inst_offset;
14531 tmp_reg = ins->dreg;
14532 ins->dreg = ins->sreg2;
14533 ins->sreg2 = tmp_reg;
14536 spec2 [MONO_INST_DEST] = ' ';
14537 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14538 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14539 spec2 [MONO_INST_SRC3] = ' ';
14541 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14542 // FIXME: The backends expect the base reg to be in inst_basereg
14543 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14545 ins->inst_basereg = var->inst_basereg;
14546 ins->inst_offset = var->inst_offset;
14547 spec = INS_INFO (ins->opcode);
14549 /* printf ("INS: "); mono_print_ins (ins); */
14550 /* Create a store instruction */
14551 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14553 /* Insert it after the instruction */
14554 mono_bblock_insert_after_ins (bb, ins, store_ins);
14556 def_ins = store_ins;
14559 * We can't assign ins->dreg to var->dreg here, since the
14560 * sregs could use it. So set a flag, and do it after
14563 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14564 dest_has_lvreg = TRUE;
14569 if (def_ins && !live_range_start [dreg]) {
14570 live_range_start [dreg] = def_ins;
14571 live_range_start_bb [dreg] = bb;
14574 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14577 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14578 tmp->inst_c1 = dreg;
14579 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14586 num_sregs = mono_inst_get_src_registers (ins, sregs);
14587 for (srcindex = 0; srcindex < 3; ++srcindex) {
14588 regtype = spec [MONO_INST_SRC1 + srcindex];
14589 sreg = sregs [srcindex];
14591 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14592 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14593 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14594 MonoInst *use_ins = ins;
14595 MonoInst *load_ins;
14596 guint32 load_opcode;
14598 if (var->opcode == OP_REGVAR) {
14599 sregs [srcindex] = var->dreg;
14600 //mono_inst_set_src_registers (ins, sregs);
14601 live_range_end [sreg] = use_ins;
14602 live_range_end_bb [sreg] = bb;
14604 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14607 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14608 /* var->dreg is a hreg */
14609 tmp->inst_c1 = sreg;
14610 mono_bblock_insert_after_ins (bb, ins, tmp);
14616 g_assert (var->opcode == OP_REGOFFSET);
14618 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14620 g_assert (load_opcode != OP_LOADV_MEMBASE);
14622 if (vreg_to_lvreg [sreg]) {
14623 g_assert (vreg_to_lvreg [sreg] != -1);
14625 /* The variable is already loaded to an lvreg */
14626 if (G_UNLIKELY (cfg->verbose_level > 2))
14627 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14628 sregs [srcindex] = vreg_to_lvreg [sreg];
14629 //mono_inst_set_src_registers (ins, sregs);
14633 /* Try to fuse the load into the instruction */
14634 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14635 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14636 sregs [0] = var->inst_basereg;
14637 //mono_inst_set_src_registers (ins, sregs);
14638 ins->inst_offset = var->inst_offset;
14639 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14640 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14641 sregs [1] = var->inst_basereg;
14642 //mono_inst_set_src_registers (ins, sregs);
14643 ins->inst_offset = var->inst_offset;
14645 if (MONO_IS_REAL_MOVE (ins)) {
14646 ins->opcode = OP_NOP;
14649 //printf ("%d ", srcindex); mono_print_ins (ins);
14651 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14653 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14654 if (var->dreg == prev_dreg) {
14656 * sreg refers to the value loaded by the load
14657 * emitted below, but we need to use ins->dreg
14658 * since it refers to the store emitted earlier.
14662 g_assert (sreg != -1);
14663 vreg_to_lvreg [var->dreg] = sreg;
14664 g_assert (lvregs_len < 1024);
14665 lvregs [lvregs_len ++] = var->dreg;
14669 sregs [srcindex] = sreg;
14670 //mono_inst_set_src_registers (ins, sregs);
14672 #if SIZEOF_REGISTER != 8
14673 if (regtype == 'l') {
14674 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14675 mono_bblock_insert_before_ins (bb, ins, load_ins);
14676 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14677 mono_bblock_insert_before_ins (bb, ins, load_ins);
14678 use_ins = load_ins;
14683 #if SIZEOF_REGISTER == 4
14684 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14686 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14687 mono_bblock_insert_before_ins (bb, ins, load_ins);
14688 use_ins = load_ins;
14692 if (var->dreg < orig_next_vreg) {
14693 live_range_end [var->dreg] = use_ins;
14694 live_range_end_bb [var->dreg] = bb;
14697 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14700 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14701 tmp->inst_c1 = var->dreg;
14702 mono_bblock_insert_after_ins (bb, ins, tmp);
14706 mono_inst_set_src_registers (ins, sregs);
14708 if (dest_has_lvreg) {
14709 g_assert (ins->dreg != -1);
14710 vreg_to_lvreg [prev_dreg] = ins->dreg;
14711 g_assert (lvregs_len < 1024);
14712 lvregs [lvregs_len ++] = prev_dreg;
14713 dest_has_lvreg = FALSE;
14717 tmp_reg = ins->dreg;
14718 ins->dreg = ins->sreg2;
14719 ins->sreg2 = tmp_reg;
14722 if (MONO_IS_CALL (ins)) {
14723 /* Clear vreg_to_lvreg array */
14724 for (i = 0; i < lvregs_len; i++)
14725 vreg_to_lvreg [lvregs [i]] = 0;
14727 } else if (ins->opcode == OP_NOP) {
14729 MONO_INST_NULLIFY_SREGS (ins);
14732 if (cfg->verbose_level > 2)
14733 mono_print_ins_index (1, ins);
14736 /* Extend the live range based on the liveness info */
14737 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14738 for (i = 0; i < cfg->num_varinfo; i ++) {
14739 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14741 if (vreg_is_volatile (cfg, vi->vreg))
14742 /* The liveness info is incomplete */
14745 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14746 /* Live from at least the first ins of this bb */
14747 live_range_start [vi->vreg] = bb->code;
14748 live_range_start_bb [vi->vreg] = bb;
14751 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14752 /* Live at least until the last ins of this bb */
14753 live_range_end [vi->vreg] = bb->last_ins;
14754 live_range_end_bb [vi->vreg] = bb;
14761 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14762 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14764 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14765 for (i = 0; i < cfg->num_varinfo; ++i) {
14766 int vreg = MONO_VARINFO (cfg, i)->vreg;
14769 if (live_range_start [vreg]) {
14770 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14772 ins->inst_c1 = vreg;
14773 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14775 if (live_range_end [vreg]) {
14776 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14778 ins->inst_c1 = vreg;
14779 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14780 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14782 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14787 if (cfg->gsharedvt_locals_var_ins) {
14788 /* Nullify if unused */
14789 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14790 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14793 g_free (live_range_start);
14794 g_free (live_range_end);
14795 g_free (live_range_start_bb);
14796 g_free (live_range_end_bb);
14801 * - use 'iadd' instead of 'int_add'
14802 * - handling ovf opcodes: decompose in method_to_ir.
14803 * - unify iregs/fregs
14804 * -> partly done, the missing parts are:
14805 * - a more complete unification would involve unifying the hregs as well, so
14806 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14807 * would no longer map to the machine hregs, so the code generators would need to
14808 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14809 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14810 * fp/non-fp branches speeds it up by about 15%.
14811 * - use sext/zext opcodes instead of shifts
14813 * - get rid of TEMPLOADs if possible and use vregs instead
14814 * - clean up usage of OP_P/OP_ opcodes
14815 * - cleanup usage of DUMMY_USE
14816 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14818 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14819 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14820 * - make sure handle_stack_args () is called before the branch is emitted
14821 * - when the new IR is done, get rid of all unused stuff
14822 * - COMPARE/BEQ as separate instructions or unify them ?
14823 * - keeping them separate allows specialized compare instructions like
14824 * compare_imm, compare_membase
14825 * - most back ends unify fp compare+branch, fp compare+ceq
14826 * - integrate mono_save_args into inline_method
14827 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14828 * - handle long shift opts on 32 bit platforms somehow: they require
14829 * 3 sregs (2 for arg1 and 1 for arg2)
14830 * - make byref a 'normal' type.
14831 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14832 * variable if needed.
14833 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14834 * like inline_method.
14835 * - remove inlining restrictions
14836 * - fix LNEG and enable cfold of INEG
14837 * - generalize x86 optimizations like ldelema as a peephole optimization
14838 * - add store_mem_imm for amd64
14839 * - optimize the loading of the interruption flag in the managed->native wrappers
14840 * - avoid special handling of OP_NOP in passes
14841 * - move code inserting instructions into one function/macro.
14842 * - try a coalescing phase after liveness analysis
14843 * - add float -> vreg conversion + local optimizations on !x86
14844 * - figure out how to handle decomposed branches during optimizations, ie.
14845 * compare+branch, op_jump_table+op_br etc.
14846 * - promote RuntimeXHandles to vregs
14847 * - vtype cleanups:
14848 * - add a NEW_VARLOADA_VREG macro
14849 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14850 * accessing vtype fields.
14851 * - get rid of I8CONST on 64 bit platforms
14852 * - dealing with the increase in code size due to branches created during opcode
14854 * - use extended basic blocks
14855 * - all parts of the JIT
14856 * - handle_global_vregs () && local regalloc
14857 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14858 * - sources of increase in code size:
14861 * - isinst and castclass
14862 * - lvregs not allocated to global registers even if used multiple times
14863 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14865 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14866 * - add all micro optimizations from the old JIT
14867 * - put tree optimizations into the deadce pass
14868 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14869 * specific function.
14870 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14871 * fcompare + branchCC.
14872 * - create a helper function for allocating a stack slot, taking into account
14873 * MONO_CFG_HAS_SPILLUP.
14875 * - merge the ia64 switch changes.
14876 * - optimize mono_regstate2_alloc_int/float.
14877 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14878 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14879 * parts of the tree could be separated by other instructions, killing the tree
14880 * arguments, or stores killing loads etc. Also, should we fold loads into other
14881 * instructions if the result of the load is used multiple times ?
14882 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14883 * - LAST MERGE: 108395.
14884 * - when returning vtypes in registers, generate IR and append it to the end of the
14885 * last bb instead of doing it in the epilog.
14886 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14894 - When to decompose opcodes:
14895 - earlier: this makes some optimizations hard to implement, since the low level IR
14896 no longer contains the neccessary information. But it is easier to do.
14897 - later: harder to implement, enables more optimizations.
14898 - Branches inside bblocks:
14899 - created when decomposing complex opcodes.
14900 - branches to another bblock: harmless, but not tracked by the branch
14901 optimizations, so need to branch to a label at the start of the bblock.
14902 - branches to inside the same bblock: very problematic, trips up the local
14903 reg allocator. Can be fixed by spitting the current bblock, but that is a
14904 complex operation, since some local vregs can become global vregs etc.
14905 - Local/global vregs:
14906 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14907 local register allocator.
14908 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14909 structure, created by mono_create_var (). Assigned to hregs or the stack by
14910 the global register allocator.
14911 - When to do optimizations like alu->alu_imm:
14912 - earlier -> saves work later on since the IR will be smaller/simpler
14913 - later -> can work on more instructions
14914 - Handling of valuetypes:
14915 - When a vtype is pushed on the stack, a new temporary is created, an
14916 instruction computing its address (LDADDR) is emitted and pushed on
14917 the stack. Need to optimize cases when the vtype is used immediately as in
14918 argument passing, stloc etc.
14919 - Instead of the to_end stuff in the old JIT, simply call the function handling
14920 the values on the stack before emitting the last instruction of the bb.
14923 #endif /* DISABLE_JIT */