2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
72 #include "aot-compiler.h"
73 #include "mini-llvm.h"
75 #define BRANCH_COST 10
76 #define INLINE_LENGTH_LIMIT 20
78 /* These have 'cfg' as an implicit argument */
79 #define INLINE_FAILURE(msg) do { \
80 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
81 inline_failure (cfg, msg); \
82 goto exception_exit; \
85 #define CHECK_CFG_EXCEPTION do {\
86 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
87 goto exception_exit; \
89 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
90 method_access_failure ((cfg), (method), (cmethod)); \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
147 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
148 guchar *ip, guint real_offset, gboolean inline_always);
150 /* helper methods signatures */
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 * Instruction metadata
163 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
164 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
170 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
175 /* keep in sync with the enum in mini.h */
178 #include "mini-ops.h"
183 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
184 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
186 * This should contain the index of the last sreg + 1. This is not the same
187 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
189 const gint8 ins_sreg_counts[] = {
190 #include "mini-ops.h"
195 #define MONO_INIT_VARINFO(vi,id) do { \
196 (vi)->range.first_use.pos.bid = 0xffff; \
202 mono_alloc_ireg (MonoCompile *cfg)
204 return alloc_ireg (cfg);
208 mono_alloc_lreg (MonoCompile *cfg)
210 return alloc_lreg (cfg);
214 mono_alloc_freg (MonoCompile *cfg)
216 return alloc_freg (cfg);
220 mono_alloc_preg (MonoCompile *cfg)
222 return alloc_preg (cfg);
226 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
228 return alloc_dreg (cfg, stack_type);
232 * mono_alloc_ireg_ref:
234 * Allocate an IREG, and mark it as holding a GC ref.
237 mono_alloc_ireg_ref (MonoCompile *cfg)
239 return alloc_ireg_ref (cfg);
243 * mono_alloc_ireg_mp:
245 * Allocate an IREG, and mark it as holding a managed pointer.
248 mono_alloc_ireg_mp (MonoCompile *cfg)
250 return alloc_ireg_mp (cfg);
254 * mono_alloc_ireg_copy:
256 * Allocate an IREG with the same GC type as VREG.
259 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
261 if (vreg_is_ref (cfg, vreg))
262 return alloc_ireg_ref (cfg);
263 else if (vreg_is_mp (cfg, vreg))
264 return alloc_ireg_mp (cfg);
266 return alloc_ireg (cfg);
270 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
275 type = mini_get_underlying_type (type);
277 switch (type->type) {
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
306 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->gshared);
325 if (mini_type_var_is_vt (type))
328 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
359 static MONO_NEVER_INLINE void
360 break_on_unverified (void)
362 if (mini_get_debug_options ()->break_on_unverified)
366 static MONO_NEVER_INLINE void
367 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
369 char *method_fname = mono_method_full_name (method, TRUE);
370 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
371 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
372 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
373 g_free (method_fname);
374 g_free (cil_method_fname);
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
383 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
672 for (i = 0; i < header->num_clauses; ++i) {
673 clause = &header->clauses [i];
674 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
675 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
676 if (clause->flags == type)
677 res = g_list_append (res, clause);
684 mono_create_spvar_for_region (MonoCompile *cfg, int region)
688 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
692 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
693 /* prevent it from being register allocated */
694 var->flags |= MONO_INST_VOLATILE;
696 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
700 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
702 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
706 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
710 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
714 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
715 /* prevent it from being register allocated */
716 var->flags |= MONO_INST_VOLATILE;
718 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
724 * Returns the type used in the eval stack when @type is loaded.
725 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
728 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
732 type = mini_get_underlying_type (type);
733 inst->klass = klass = mono_class_from_mono_type (type);
735 inst->type = STACK_MP;
740 switch (type->type) {
742 inst->type = STACK_INV;
750 inst->type = STACK_I4;
755 case MONO_TYPE_FNPTR:
756 inst->type = STACK_PTR;
758 case MONO_TYPE_CLASS:
759 case MONO_TYPE_STRING:
760 case MONO_TYPE_OBJECT:
761 case MONO_TYPE_SZARRAY:
762 case MONO_TYPE_ARRAY:
763 inst->type = STACK_OBJ;
767 inst->type = STACK_I8;
770 inst->type = cfg->r4_stack_type;
773 inst->type = STACK_R8;
775 case MONO_TYPE_VALUETYPE:
776 if (type->data.klass->enumtype) {
777 type = mono_class_enum_basetype (type->data.klass);
781 inst->type = STACK_VTYPE;
784 case MONO_TYPE_TYPEDBYREF:
785 inst->klass = mono_defaults.typed_reference_class;
786 inst->type = STACK_VTYPE;
788 case MONO_TYPE_GENERICINST:
789 type = &type->data.generic_class->container_class->byval_arg;
793 g_assert (cfg->gshared);
794 if (mini_is_gsharedvt_type (type)) {
795 g_assert (cfg->gsharedvt);
796 inst->type = STACK_VTYPE;
798 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
802 g_error ("unknown type 0x%02x in eval stack type", type->type);
807 * The following tables are used to quickly validate the IL code in type_from_op ().
810 bin_num_table [STACK_MAX] [STACK_MAX] = {
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
816 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
824 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
827 /* reduce the size of this table */
829 bin_int_table [STACK_MAX] [STACK_MAX] = {
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
841 bin_comp_table [STACK_MAX] [STACK_MAX] = {
842 /* Inv i L p F & O vt r4 */
844 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
845 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
846 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
847 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
848 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
849 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
850 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
851 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
854 /* reduce the size of this table */
856 shift_table [STACK_MAX] [STACK_MAX] = {
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
868 * Tables to map from the non-specific opcode to the matching
869 * type-specific opcode.
871 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
873 binops_op_map [STACK_MAX] = {
874 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
877 /* handles from CEE_NEG to CEE_CONV_U8 */
879 unops_op_map [STACK_MAX] = {
880 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
883 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
885 ovfops_op_map [STACK_MAX] = {
886 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
889 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
891 ovf2ops_op_map [STACK_MAX] = {
892 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
895 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
897 ovf3ops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
901 /* handles from CEE_BEQ to CEE_BLT_UN */
903 beqops_op_map [STACK_MAX] = {
904 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
907 /* handles from CEE_CEQ to CEE_CLT_UN */
909 ceqops_op_map [STACK_MAX] = {
910 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
914 * Sets ins->type (the type on the eval stack) according to the
915 * type of the opcode and the arguments to it.
916 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
918 * FIXME: this function sets ins->type unconditionally in some cases, but
919 * it should set it to invalid for some types (a conv.x on an object)
922 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
924 switch (ins->opcode) {
931 /* FIXME: check unverifiable args for STACK_MP */
932 ins->type = bin_num_table [src1->type] [src2->type];
933 ins->opcode += binops_op_map [ins->type];
940 ins->type = bin_int_table [src1->type] [src2->type];
941 ins->opcode += binops_op_map [ins->type];
946 ins->type = shift_table [src1->type] [src2->type];
947 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
953 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
954 ins->opcode = OP_LCOMPARE;
955 else if (src1->type == STACK_R4)
956 ins->opcode = OP_RCOMPARE;
957 else if (src1->type == STACK_R8)
958 ins->opcode = OP_FCOMPARE;
960 ins->opcode = OP_ICOMPARE;
962 case OP_ICOMPARE_IMM:
963 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE_IMM;
977 ins->opcode += beqops_op_map [src1->type];
980 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
981 ins->opcode += ceqops_op_map [src1->type];
987 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
992 ins->type = neg_table [src1->type];
993 ins->opcode += unops_op_map [ins->type];
996 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
997 ins->type = src1->type;
999 ins->type = STACK_INV;
1000 ins->opcode += unops_op_map [ins->type];
1006 ins->type = STACK_I4;
1007 ins->opcode += unops_op_map [src1->type];
1010 ins->type = STACK_R8;
1011 switch (src1->type) {
1014 ins->opcode = OP_ICONV_TO_R_UN;
1017 ins->opcode = OP_LCONV_TO_R_UN;
1021 case CEE_CONV_OVF_I1:
1022 case CEE_CONV_OVF_U1:
1023 case CEE_CONV_OVF_I2:
1024 case CEE_CONV_OVF_U2:
1025 case CEE_CONV_OVF_I4:
1026 case CEE_CONV_OVF_U4:
1027 ins->type = STACK_I4;
1028 ins->opcode += ovf3ops_op_map [src1->type];
1030 case CEE_CONV_OVF_I_UN:
1031 case CEE_CONV_OVF_U_UN:
1032 ins->type = STACK_PTR;
1033 ins->opcode += ovf2ops_op_map [src1->type];
1035 case CEE_CONV_OVF_I1_UN:
1036 case CEE_CONV_OVF_I2_UN:
1037 case CEE_CONV_OVF_I4_UN:
1038 case CEE_CONV_OVF_U1_UN:
1039 case CEE_CONV_OVF_U2_UN:
1040 case CEE_CONV_OVF_U4_UN:
1041 ins->type = STACK_I4;
1042 ins->opcode += ovf2ops_op_map [src1->type];
1045 ins->type = STACK_PTR;
1046 switch (src1->type) {
1048 ins->opcode = OP_ICONV_TO_U;
1052 #if SIZEOF_VOID_P == 8
1053 ins->opcode = OP_LCONV_TO_U;
1055 ins->opcode = OP_MOVE;
1059 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_FCONV_TO_U;
1068 ins->type = STACK_I8;
1069 ins->opcode += unops_op_map [src1->type];
1071 case CEE_CONV_OVF_I8:
1072 case CEE_CONV_OVF_U8:
1073 ins->type = STACK_I8;
1074 ins->opcode += ovf3ops_op_map [src1->type];
1076 case CEE_CONV_OVF_U8_UN:
1077 case CEE_CONV_OVF_I8_UN:
1078 ins->type = STACK_I8;
1079 ins->opcode += ovf2ops_op_map [src1->type];
1082 ins->type = cfg->r4_stack_type;
1083 ins->opcode += unops_op_map [src1->type];
1086 ins->type = STACK_R8;
1087 ins->opcode += unops_op_map [src1->type];
1090 ins->type = STACK_R8;
1094 ins->type = STACK_I4;
1095 ins->opcode += ovfops_op_map [src1->type];
1098 case CEE_CONV_OVF_I:
1099 case CEE_CONV_OVF_U:
1100 ins->type = STACK_PTR;
1101 ins->opcode += ovfops_op_map [src1->type];
1104 case CEE_ADD_OVF_UN:
1106 case CEE_MUL_OVF_UN:
1108 case CEE_SUB_OVF_UN:
1109 ins->type = bin_num_table [src1->type] [src2->type];
1110 ins->opcode += ovfops_op_map [src1->type];
1111 if (ins->type == STACK_R8)
1112 ins->type = STACK_INV;
1114 case OP_LOAD_MEMBASE:
1115 ins->type = STACK_PTR;
1117 case OP_LOADI1_MEMBASE:
1118 case OP_LOADU1_MEMBASE:
1119 case OP_LOADI2_MEMBASE:
1120 case OP_LOADU2_MEMBASE:
1121 case OP_LOADI4_MEMBASE:
1122 case OP_LOADU4_MEMBASE:
1123 ins->type = STACK_PTR;
1125 case OP_LOADI8_MEMBASE:
1126 ins->type = STACK_I8;
1128 case OP_LOADR4_MEMBASE:
1129 ins->type = cfg->r4_stack_type;
1131 case OP_LOADR8_MEMBASE:
1132 ins->type = STACK_R8;
1135 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1139 if (ins->type == STACK_MP)
1140 ins->klass = mono_defaults.object_class;
1145 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1151 param_table [STACK_MAX] [STACK_MAX] = {
1156 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1161 switch (args->type) {
1171 for (i = 0; i < sig->param_count; ++i) {
1172 switch (args [i].type) {
1176 if (!sig->params [i]->byref)
1180 if (sig->params [i]->byref)
1182 switch (sig->params [i]->type) {
1183 case MONO_TYPE_CLASS:
1184 case MONO_TYPE_STRING:
1185 case MONO_TYPE_OBJECT:
1186 case MONO_TYPE_SZARRAY:
1187 case MONO_TYPE_ARRAY:
1194 if (sig->params [i]->byref)
1196 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1205 /*if (!param_table [args [i].type] [sig->params [i]->type])
1213 * When we need a pointer to the current domain many times in a method, we
1214 * call mono_domain_get() once and we store the result in a local variable.
1215 * This function returns the variable that represents the MonoDomain*.
1217 inline static MonoInst *
1218 mono_get_domainvar (MonoCompile *cfg)
1220 if (!cfg->domainvar)
1221 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1222 return cfg->domainvar;
1226 * The got_var contains the address of the Global Offset Table when AOT
1230 mono_get_got_var (MonoCompile *cfg)
1232 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1234 if (!cfg->got_var) {
1235 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1237 return cfg->got_var;
1241 mono_get_vtable_var (MonoCompile *cfg)
1243 g_assert (cfg->gshared);
1245 if (!cfg->rgctx_var) {
1246 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 /* force the var to be stack allocated */
1248 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1251 return cfg->rgctx_var;
1255 type_from_stack_type (MonoInst *ins) {
1256 switch (ins->type) {
1257 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1258 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1259 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1260 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1261 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1263 return &ins->klass->this_arg;
1264 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1265 case STACK_VTYPE: return &ins->klass->byval_arg;
1267 g_error ("stack type %d to monotype not handled\n", ins->type);
1272 static G_GNUC_UNUSED int
1273 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1275 t = mono_type_get_underlying_type (t);
1287 case MONO_TYPE_FNPTR:
1289 case MONO_TYPE_CLASS:
1290 case MONO_TYPE_STRING:
1291 case MONO_TYPE_OBJECT:
1292 case MONO_TYPE_SZARRAY:
1293 case MONO_TYPE_ARRAY:
1299 return cfg->r4_stack_type;
1302 case MONO_TYPE_VALUETYPE:
1303 case MONO_TYPE_TYPEDBYREF:
1305 case MONO_TYPE_GENERICINST:
1306 if (mono_type_generic_inst_is_valuetype (t))
1312 g_assert_not_reached ();
1319 array_access_to_klass (int opcode)
1323 return mono_defaults.byte_class;
1325 return mono_defaults.uint16_class;
1328 return mono_defaults.int_class;
1331 return mono_defaults.sbyte_class;
1334 return mono_defaults.int16_class;
1337 return mono_defaults.int32_class;
1339 return mono_defaults.uint32_class;
1342 return mono_defaults.int64_class;
1345 return mono_defaults.single_class;
1348 return mono_defaults.double_class;
1349 case CEE_LDELEM_REF:
1350 case CEE_STELEM_REF:
1351 return mono_defaults.object_class;
1353 g_assert_not_reached ();
1359 * We try to share variables when possible
1362 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1367 /* inlining can result in deeper stacks */
1368 if (slot >= cfg->header->max_stack)
1369 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1371 pos = ins->type - 1 + slot * STACK_MAX;
1373 switch (ins->type) {
1380 if ((vnum = cfg->intvars [pos]))
1381 return cfg->varinfo [vnum];
1382 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 cfg->intvars [pos] = res->inst_c0;
1386 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1392 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1395 * Don't use this if a generic_context is set, since that means AOT can't
1396 * look up the method using just the image+token.
1397 * table == 0 means this is a reference made from a wrapper.
1399 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1400 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1401 jump_info_token->image = image;
1402 jump_info_token->token = token;
1403 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1408 * This function is called to handle items that are left on the evaluation stack
1409 * at basic block boundaries. What happens is that we save the values to local variables
1410 * and we reload them later when first entering the target basic block (with the
1411 * handle_loaded_temps () function).
1412 * A single joint point will use the same variables (stored in the array bb->out_stack or
1413 * bb->in_stack, if the basic block is before or after the joint point).
1415 * This function needs to be called _before_ emitting the last instruction of
1416 * the bb (i.e. before emitting a branch).
1417 * If the stack merge fails at a join point, cfg->unverifiable is set.
1420 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1423 MonoBasicBlock *bb = cfg->cbb;
1424 MonoBasicBlock *outb;
1425 MonoInst *inst, **locals;
1430 if (cfg->verbose_level > 3)
1431 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1432 if (!bb->out_scount) {
1433 bb->out_scount = count;
1434 //printf ("bblock %d has out:", bb->block_num);
1436 for (i = 0; i < bb->out_count; ++i) {
1437 outb = bb->out_bb [i];
1438 /* exception handlers are linked, but they should not be considered for stack args */
1439 if (outb->flags & BB_EXCEPTION_HANDLER)
1441 //printf (" %d", outb->block_num);
1442 if (outb->in_stack) {
1444 bb->out_stack = outb->in_stack;
1450 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1451 for (i = 0; i < count; ++i) {
1453 * try to reuse temps already allocated for this purpouse, if they occupy the same
1454 * stack slot and if they are of the same type.
1455 * This won't cause conflicts since if 'local' is used to
1456 * store one of the values in the in_stack of a bblock, then
1457 * the same variable will be used for the same outgoing stack
1459 * This doesn't work when inlining methods, since the bblocks
1460 * in the inlined methods do not inherit their in_stack from
1461 * the bblock they are inlined to. See bug #58863 for an
1464 if (cfg->inlined_method)
1465 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1467 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1472 for (i = 0; i < bb->out_count; ++i) {
1473 outb = bb->out_bb [i];
1474 /* exception handlers are linked, but they should not be considered for stack args */
1475 if (outb->flags & BB_EXCEPTION_HANDLER)
1477 if (outb->in_scount) {
1478 if (outb->in_scount != bb->out_scount) {
1479 cfg->unverifiable = TRUE;
1482 continue; /* check they are the same locals */
1484 outb->in_scount = count;
1485 outb->in_stack = bb->out_stack;
1488 locals = bb->out_stack;
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1499 * It is possible that the out bblocks already have in_stack assigned, and
1500 * the in_stacks differ. In this case, we will store to all the different
1507 /* Find a bblock which has a different in_stack */
1509 while (bindex < bb->out_count) {
1510 outb = bb->out_bb [bindex];
1511 /* exception handlers are linked, but they should not be considered for stack args */
1512 if (outb->flags & BB_EXCEPTION_HANDLER) {
1516 if (outb->in_stack != locals) {
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1524 locals = outb->in_stack;
1534 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1538 if (cfg->compile_aot) {
1539 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1544 ji.type = patch_type;
1545 ji.data.target = data;
1546 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1548 EMIT_NEW_PCONST (cfg, ins, target);
1554 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1556 int ibitmap_reg = alloc_preg (cfg);
1557 #ifdef COMPRESSED_INTERFACE_BITMAP
1559 MonoInst *res, *ins;
1560 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1561 MONO_ADD_INS (cfg->cbb, ins);
1563 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1564 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1567 int ibitmap_byte_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1571 if (cfg->compile_aot) {
1572 int iid_reg = alloc_preg (cfg);
1573 int shifted_iid_reg = alloc_preg (cfg);
1574 int ibitmap_byte_address_reg = alloc_preg (cfg);
1575 int masked_iid_reg = alloc_preg (cfg);
1576 int iid_one_bit_reg = alloc_preg (cfg);
1577 int iid_bit_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1580 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1583 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1584 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1585 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1594 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1595 * stored in "klass_reg" implements the interface "klass".
1598 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1600 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1604 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1605 * stored in "vtable_reg" implements the interface "klass".
1608 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1610 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1614 * Emit code which checks whenever the interface id of @klass is smaller than
1615 * than the value given by max_iid_reg.
1618 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1619 MonoBasicBlock *false_target)
1621 if (cfg->compile_aot) {
1622 int iid_reg = alloc_preg (cfg);
1623 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1624 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1629 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1631 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1634 /* Same as above, but obtains max_iid from a vtable */
1636 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 int max_iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1642 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1645 /* Same as above, but obtains max_iid from a klass */
1647 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1648 MonoBasicBlock *false_target)
1650 int max_iid_reg = alloc_preg (cfg);
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1653 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1657 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1659 int idepth_reg = alloc_preg (cfg);
1660 int stypes_reg = alloc_preg (cfg);
1661 int stype = alloc_preg (cfg);
1663 mono_class_setup_supertypes (klass);
1665 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1673 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1674 } else if (cfg->compile_aot) {
1675 int const_reg = alloc_preg (cfg);
1676 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1677 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1685 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1691 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1693 int intf_reg = alloc_preg (cfg);
1695 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1696 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1701 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1705 * Variant of the above that takes a register to the class, not the vtable.
1708 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1710 int intf_bit_reg = alloc_preg (cfg);
1712 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1713 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1716 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1718 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1722 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1727 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1728 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1730 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1734 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1736 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1740 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1742 if (cfg->compile_aot) {
1743 int const_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1753 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1756 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1759 int rank_reg = alloc_preg (cfg);
1760 int eclass_reg = alloc_preg (cfg);
1762 g_assert (!klass_inst);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1765 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1766 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1768 if (klass->cast_class == mono_defaults.object_class) {
1769 int parent_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1771 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1772 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1773 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1774 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1775 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1776 } else if (klass->cast_class == mono_defaults.enum_class) {
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1779 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1781 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1782 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1785 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1786 /* Check that the object is a vector too */
1787 int bounds_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1790 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1793 int idepth_reg = alloc_preg (cfg);
1794 int stypes_reg = alloc_preg (cfg);
1795 int stype = alloc_preg (cfg);
1797 mono_class_setup_supertypes (klass);
1799 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1802 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1806 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1811 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1813 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1817 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1821 g_assert (val == 0);
1826 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1829 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1832 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1837 #if SIZEOF_REGISTER == 8
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1845 val_reg = alloc_preg (cfg);
1847 if (SIZEOF_REGISTER == 8)
1848 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1850 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1853 /* This could be optimized further if neccesary */
1855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1862 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1893 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1900 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1901 g_assert (size < 10000);
1904 /* This could be optimized further if neccesary */
1906 cur_reg = alloc_preg (cfg);
1907 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1908 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1915 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1943 cur_reg = alloc_preg (cfg);
1944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1953 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1957 if (cfg->compile_aot) {
1958 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1959 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1961 ins->sreg2 = c->dreg;
1962 MONO_ADD_INS (cfg->cbb, ins);
1964 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1966 ins->inst_offset = mini_get_tls_offset (tls_key);
1967 MONO_ADD_INS (cfg->cbb, ins);
1974 * Emit IR to push the current LMF onto the LMF stack.
1977 emit_push_lmf (MonoCompile *cfg)
1980 * Emit IR to push the LMF:
1981 * lmf_addr = <lmf_addr from tls>
1982 * lmf->lmf_addr = lmf_addr
1983 * lmf->prev_lmf = *lmf_addr
1986 int lmf_reg, prev_lmf_reg;
1987 MonoInst *ins, *lmf_ins;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load current lmf */
1994 lmf_ins = mono_get_lmf_intrinsic (cfg);
1996 MONO_ADD_INS (cfg->cbb, lmf_ins);
1997 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1998 lmf_reg = ins->dreg;
1999 /* Save previous_lmf */
2000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2002 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2005 * Store lmf_addr in a variable, so it can be allocated to a global register.
2007 if (!cfg->lmf_addr_var)
2008 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2011 ins = mono_get_jit_tls_intrinsic (cfg);
2013 int jit_tls_dreg = ins->dreg;
2015 MONO_ADD_INS (cfg->cbb, ins);
2016 lmf_reg = alloc_preg (cfg);
2017 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2022 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2024 MONO_ADD_INS (cfg->cbb, lmf_ins);
2027 MonoInst *args [16], *jit_tls_ins, *ins;
2029 /* Inline mono_get_lmf_addr () */
2030 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2032 /* Load mono_jit_tls_id */
2033 if (cfg->compile_aot)
2034 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2036 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2037 /* call pthread_getspecific () */
2038 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2039 /* lmf_addr = &jit_tls->lmf */
2040 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2043 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2049 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2050 lmf_reg = ins->dreg;
2052 prev_lmf_reg = alloc_preg (cfg);
2053 /* Save previous_lmf */
2054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2055 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2057 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2064 * Emit IR to pop the current LMF from the LMF stack.
2067 emit_pop_lmf (MonoCompile *cfg)
2069 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2075 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2076 lmf_reg = ins->dreg;
2078 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2079 /* Load previous_lmf */
2080 prev_lmf_reg = alloc_preg (cfg);
2081 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2083 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2086 * Emit IR to pop the LMF:
2087 * *(lmf->lmf_addr) = lmf->prev_lmf
2089 /* This could be called before emit_push_lmf () */
2090 if (!cfg->lmf_addr_var)
2091 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2092 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2096 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2101 emit_instrumentation_call (MonoCompile *cfg, void *func)
2103 MonoInst *iargs [1];
2106 * Avoid instrumenting inlined methods since it can
2107 * distort profiling results.
2109 if (cfg->method != cfg->current_method)
2112 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2113 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2114 mono_emit_jit_icall (cfg, func, iargs);
2119 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2122 type = mini_get_underlying_type (type);
2123 switch (type->type) {
2124 case MONO_TYPE_VOID:
2125 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2132 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2136 case MONO_TYPE_FNPTR:
2137 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 case MONO_TYPE_CLASS:
2139 case MONO_TYPE_STRING:
2140 case MONO_TYPE_OBJECT:
2141 case MONO_TYPE_SZARRAY:
2142 case MONO_TYPE_ARRAY:
2143 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2146 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2149 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2151 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2153 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2154 case MONO_TYPE_VALUETYPE:
2155 if (type->data.klass->enumtype) {
2156 type = mono_class_enum_basetype (type->data.klass);
2159 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2160 case MONO_TYPE_TYPEDBYREF:
2161 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 case MONO_TYPE_GENERICINST:
2163 type = &type->data.generic_class->container_class->byval_arg;
2166 case MONO_TYPE_MVAR:
2168 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2170 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2176 * target_type_is_incompatible:
2177 * @cfg: MonoCompile context
2179 * Check that the item @arg on the evaluation stack can be stored
2180 * in the target type (can be a local, or field, etc).
2181 * The cfg arg can be used to check if we need verification or just
2184 * Returns: non-0 value if arg can't be stored on a target.
2187 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2189 MonoType *simple_type;
2192 if (target->byref) {
2193 /* FIXME: check that the pointed to types match */
2194 if (arg->type == STACK_MP)
2195 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2196 if (arg->type == STACK_PTR)
2201 simple_type = mini_get_underlying_type (target);
2202 switch (simple_type->type) {
2203 case MONO_TYPE_VOID:
2211 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2215 /* STACK_MP is needed when setting pinned locals */
2216 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2221 case MONO_TYPE_FNPTR:
2223 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2224 * in native int. (#688008).
2226 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2229 case MONO_TYPE_CLASS:
2230 case MONO_TYPE_STRING:
2231 case MONO_TYPE_OBJECT:
2232 case MONO_TYPE_SZARRAY:
2233 case MONO_TYPE_ARRAY:
2234 if (arg->type != STACK_OBJ)
2236 /* FIXME: check type compatibility */
2240 if (arg->type != STACK_I8)
2244 if (arg->type != cfg->r4_stack_type)
2248 if (arg->type != STACK_R8)
2251 case MONO_TYPE_VALUETYPE:
2252 if (arg->type != STACK_VTYPE)
2254 klass = mono_class_from_mono_type (simple_type);
2255 if (klass != arg->klass)
2258 case MONO_TYPE_TYPEDBYREF:
2259 if (arg->type != STACK_VTYPE)
2261 klass = mono_class_from_mono_type (simple_type);
2262 if (klass != arg->klass)
2265 case MONO_TYPE_GENERICINST:
2266 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2267 if (arg->type != STACK_VTYPE)
2269 klass = mono_class_from_mono_type (simple_type);
2270 /* The second cases is needed when doing partial sharing */
2271 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2275 if (arg->type != STACK_OBJ)
2277 /* FIXME: check type compatibility */
2281 case MONO_TYPE_MVAR:
2282 g_assert (cfg->gshared);
2283 if (mini_type_var_is_vt (simple_type)) {
2284 if (arg->type != STACK_VTYPE)
2287 if (arg->type != STACK_OBJ)
2292 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2298 * Prepare arguments for passing to a function call.
2299 * Return a non-zero value if the arguments can't be passed to the given
2301 * The type checks are not yet complete and some conversions may need
2302 * casts on 32 or 64 bit architectures.
2304 * FIXME: implement this using target_type_is_incompatible ()
2307 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2309 MonoType *simple_type;
2313 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2317 for (i = 0; i < sig->param_count; ++i) {
2318 if (sig->params [i]->byref) {
2319 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2323 simple_type = mini_get_underlying_type (sig->params [i]);
2325 switch (simple_type->type) {
2326 case MONO_TYPE_VOID:
2335 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2341 case MONO_TYPE_FNPTR:
2342 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2345 case MONO_TYPE_CLASS:
2346 case MONO_TYPE_STRING:
2347 case MONO_TYPE_OBJECT:
2348 case MONO_TYPE_SZARRAY:
2349 case MONO_TYPE_ARRAY:
2350 if (args [i]->type != STACK_OBJ)
2355 if (args [i]->type != STACK_I8)
2359 if (args [i]->type != cfg->r4_stack_type)
2363 if (args [i]->type != STACK_R8)
2366 case MONO_TYPE_VALUETYPE:
2367 if (simple_type->data.klass->enumtype) {
2368 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2371 if (args [i]->type != STACK_VTYPE)
2374 case MONO_TYPE_TYPEDBYREF:
2375 if (args [i]->type != STACK_VTYPE)
2378 case MONO_TYPE_GENERICINST:
2379 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2382 case MONO_TYPE_MVAR:
2384 if (args [i]->type != STACK_VTYPE)
2388 g_error ("unknown type 0x%02x in check_call_signature",
2396 callvirt_to_call (int opcode)
2399 case OP_CALL_MEMBASE:
2401 case OP_VOIDCALL_MEMBASE:
2403 case OP_FCALL_MEMBASE:
2405 case OP_RCALL_MEMBASE:
2407 case OP_VCALL_MEMBASE:
2409 case OP_LCALL_MEMBASE:
2412 g_assert_not_reached ();
2419 callvirt_to_call_reg (int opcode)
2422 case OP_CALL_MEMBASE:
2424 case OP_VOIDCALL_MEMBASE:
2425 return OP_VOIDCALL_REG;
2426 case OP_FCALL_MEMBASE:
2427 return OP_FCALL_REG;
2428 case OP_RCALL_MEMBASE:
2429 return OP_RCALL_REG;
2430 case OP_VCALL_MEMBASE:
2431 return OP_VCALL_REG;
2432 case OP_LCALL_MEMBASE:
2433 return OP_LCALL_REG;
2435 g_assert_not_reached ();
2441 /* Either METHOD or IMT_ARG needs to be set */
2443 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2447 if (COMPILE_LLVM (cfg)) {
2449 method_reg = alloc_preg (cfg);
2450 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2452 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2453 method_reg = ins->dreg;
2457 call->imt_arg_reg = method_reg;
2459 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2464 method_reg = alloc_preg (cfg);
2465 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2467 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2468 method_reg = ins->dreg;
2471 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2474 static MonoJumpInfo *
2475 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2477 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2481 ji->data.target = target;
2487 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2490 return mono_class_check_context_used (klass);
2496 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2499 return mono_method_check_context_used (method);
2505 * check_method_sharing:
2507 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2510 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2512 gboolean pass_vtable = FALSE;
2513 gboolean pass_mrgctx = FALSE;
2515 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2516 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2517 gboolean sharable = FALSE;
2519 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2523 * Pass vtable iff target method might
2524 * be shared, which means that sharing
2525 * is enabled for its class and its
2526 * context is sharable (and it's not a
2529 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2533 if (mini_method_get_context (cmethod) &&
2534 mini_method_get_context (cmethod)->method_inst) {
2535 g_assert (!pass_vtable);
2537 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2540 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2545 if (out_pass_vtable)
2546 *out_pass_vtable = pass_vtable;
2547 if (out_pass_mrgctx)
2548 *out_pass_mrgctx = pass_mrgctx;
2551 inline static MonoCallInst *
2552 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2553 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2557 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2565 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2567 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2569 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2572 call->signature = sig;
2573 call->rgctx_reg = rgctx;
2574 sig_ret = mini_get_underlying_type (sig->ret);
2576 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2579 if (mini_type_is_vtype (sig_ret)) {
2580 call->vret_var = cfg->vret_addr;
2581 //g_assert_not_reached ();
2583 } else if (mini_type_is_vtype (sig_ret)) {
2584 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2587 temp->backend.is_pinvoke = sig->pinvoke;
2590 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2591 * address of return value to increase optimization opportunities.
2592 * Before vtype decomposition, the dreg of the call ins itself represents the
2593 * fact the call modifies the return value. After decomposition, the call will
2594 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2595 * will be transformed into an LDADDR.
2597 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2598 loada->dreg = alloc_preg (cfg);
2599 loada->inst_p0 = temp;
2600 /* We reference the call too since call->dreg could change during optimization */
2601 loada->inst_p1 = call;
2602 MONO_ADD_INS (cfg->cbb, loada);
2604 call->inst.dreg = temp->dreg;
2606 call->vret_var = loada;
2607 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2608 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2610 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2611 if (COMPILE_SOFT_FLOAT (cfg)) {
2613 * If the call has a float argument, we would need to do an r8->r4 conversion using
2614 * an icall, but that cannot be done during the call sequence since it would clobber
2615 * the call registers + the stack. So we do it before emitting the call.
2617 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2619 MonoInst *in = call->args [i];
2621 if (i >= sig->hasthis)
2622 t = sig->params [i - sig->hasthis];
2624 t = &mono_defaults.int_class->byval_arg;
2625 t = mono_type_get_underlying_type (t);
2627 if (!t->byref && t->type == MONO_TYPE_R4) {
2628 MonoInst *iargs [1];
2632 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2634 /* The result will be in an int vreg */
2635 call->args [i] = conv;
2641 call->need_unbox_trampoline = unbox_trampoline;
2644 if (COMPILE_LLVM (cfg))
2645 mono_llvm_emit_call (cfg, call);
2647 mono_arch_emit_call (cfg, call);
2649 mono_arch_emit_call (cfg, call);
2652 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2653 cfg->flags |= MONO_CFG_HAS_CALLS;
2659 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2661 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2662 cfg->uses_rgctx_reg = TRUE;
2663 call->rgctx_reg = TRUE;
2665 call->rgctx_arg_reg = rgctx_reg;
2669 inline static MonoInst*
2670 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2675 gboolean check_sp = FALSE;
2677 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2678 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2680 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2685 rgctx_reg = mono_alloc_preg (cfg);
2686 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2690 if (!cfg->stack_inbalance_var)
2691 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2693 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2694 ins->dreg = cfg->stack_inbalance_var->dreg;
2695 MONO_ADD_INS (cfg->cbb, ins);
2698 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2700 call->inst.sreg1 = addr->dreg;
2703 emit_imt_argument (cfg, call, NULL, imt_arg);
2705 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2710 sp_reg = mono_alloc_preg (cfg);
2712 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 MONO_ADD_INS (cfg->cbb, ins);
2716 /* Restore the stack so we don't crash when throwing the exception */
2717 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2718 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2719 MONO_ADD_INS (cfg->cbb, ins);
2721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2722 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2726 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2728 return (MonoInst*)call;
2732 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2735 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2737 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2740 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2741 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2743 #ifndef DISABLE_REMOTING
2744 gboolean might_be_remote = FALSE;
2746 gboolean virtual = this_ins != NULL;
2747 gboolean enable_for_aot = TRUE;
2750 MonoInst *call_target = NULL;
2752 gboolean need_unbox_trampoline;
2755 sig = mono_method_signature (method);
2757 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2758 MonoInst *icall_args [16];
2761 // FIXME: Optimize this
2763 guint32 imt_slot = mono_method_get_imt_slot (method);
2765 icall_args [0] = this_ins;
2766 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2768 icall_args [2] = imt_arg;
2770 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2771 icall_args [2] = ins;
2773 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2775 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2779 rgctx_reg = mono_alloc_preg (cfg);
2780 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2783 if (method->string_ctor) {
2784 /* Create the real signature */
2785 /* FIXME: Cache these */
2786 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2787 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2792 context_used = mini_method_check_context_used (cfg, method);
2794 #ifndef DISABLE_REMOTING
2795 might_be_remote = this_ins && sig->hasthis &&
2796 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2797 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2799 if (might_be_remote && context_used) {
2802 g_assert (cfg->gshared);
2804 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2806 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2810 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2811 // FIXME: Vcall optimizations below
2812 MonoInst *icall_args [16];
2815 if (sig->generic_param_count) {
2817 * Generic virtual call, pass the concrete method as the imt argument.
2819 imt_arg = emit_get_rgctx_method (cfg, context_used,
2820 method, MONO_RGCTX_INFO_METHOD);
2823 // FIXME: Optimize this
2825 int slot = mono_method_get_vtable_index (method);
2827 icall_args [0] = this_ins;
2828 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2830 icall_args [2] = imt_arg;
2832 EMIT_NEW_PCONST (cfg, ins, NULL);
2833 icall_args [2] = ins;
2835 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2838 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2840 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2842 #ifndef DISABLE_REMOTING
2843 if (might_be_remote)
2844 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2847 call->method = method;
2848 call->inst.flags |= MONO_INST_HAS_METHOD;
2849 call->inst.inst_left = this_ins;
2850 call->tail_call = tail;
2853 int vtable_reg, slot_reg, this_reg;
2856 this_reg = this_ins->dreg;
2858 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2859 MonoInst *dummy_use;
2861 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2863 /* Make a call to delegate->invoke_impl */
2864 call->inst.inst_basereg = this_reg;
2865 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2866 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2868 /* We must emit a dummy use here because the delegate trampoline will
2869 replace the 'this' argument with the delegate target making this activation
2870 no longer a root for the delegate.
2871 This is an issue for delegates that target collectible code such as dynamic
2872 methods of GC'able assemblies.
2874 For a test case look into #667921.
2876 FIXME: a dummy use is not the best way to do it as the local register allocator
2877 will put it on a caller save register and spil it around the call.
2878 Ideally, we would either put it on a callee save register or only do the store part.
2880 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2882 return (MonoInst*)call;
2885 if ((!cfg->compile_aot || enable_for_aot) &&
2886 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2887 (MONO_METHOD_IS_FINAL (method) &&
2888 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2889 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2891 * the method is not virtual, we just need to ensure this is not null
2892 * and then we can call the method directly.
2894 #ifndef DISABLE_REMOTING
2895 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2897 * The check above ensures method is not gshared, this is needed since
2898 * gshared methods can't have wrappers.
2900 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2904 if (!method->string_ctor)
2905 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2907 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2908 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2910 * the method is virtual, but we can statically dispatch since either
2911 * it's class or the method itself are sealed.
2912 * But first we need to ensure it's not a null reference.
2914 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2916 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2917 } else if (call_target) {
2918 vtable_reg = alloc_preg (cfg);
2919 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2921 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2922 call->inst.sreg1 = call_target->dreg;
2923 call->inst.flags &= !MONO_INST_HAS_METHOD;
2925 vtable_reg = alloc_preg (cfg);
2926 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2927 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2928 guint32 imt_slot = mono_method_get_imt_slot (method);
2929 emit_imt_argument (cfg, call, call->method, imt_arg);
2930 slot_reg = vtable_reg;
2931 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2933 slot_reg = vtable_reg;
2934 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2935 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2937 g_assert (mono_method_signature (method)->generic_param_count);
2938 emit_imt_argument (cfg, call, call->method, imt_arg);
2942 call->inst.sreg1 = slot_reg;
2943 call->inst.inst_offset = offset;
2944 call->is_virtual = TRUE;
2948 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2951 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2953 return (MonoInst*)call;
2957 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2959 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2963 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2970 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2973 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2975 return (MonoInst*)call;
2979 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2981 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2985 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2989 * mono_emit_abs_call:
2991 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2993 inline static MonoInst*
2994 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2995 MonoMethodSignature *sig, MonoInst **args)
2997 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
3001 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3004 if (cfg->abs_patches == NULL)
3005 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3006 g_hash_table_insert (cfg->abs_patches, ji, ji);
3007 ins = mono_emit_native_call (cfg, ji, sig, args);
3008 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3013 direct_icalls_enabled (MonoCompile *cfg)
3015 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3017 if (cfg->compile_llvm)
3020 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3026 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3029 * Call the jit icall without a wrapper if possible.
3030 * The wrapper is needed for the following reasons:
3031 * - to handle exceptions thrown using mono_raise_exceptions () from the
3032 * icall function. The EH code needs the lmf frame pushed by the
3033 * wrapper to be able to unwind back to managed code.
3034 * - to be able to do stack walks for asynchronously suspended
3035 * threads when debugging.
3037 if (info->no_raise && direct_icalls_enabled (cfg)) {
3041 if (!info->wrapper_method) {
3042 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3043 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3045 mono_memory_barrier ();
3049 * Inline the wrapper method, which is basically a call to the C icall, and
3050 * an exception check.
3052 costs = inline_method (cfg, info->wrapper_method, NULL,
3053 args, NULL, cfg->real_offset, TRUE);
3054 g_assert (costs > 0);
3055 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3059 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3064 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3066 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3067 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3071 * Native code might return non register sized integers
3072 * without initializing the upper bits.
3074 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3075 case OP_LOADI1_MEMBASE:
3076 widen_op = OP_ICONV_TO_I1;
3078 case OP_LOADU1_MEMBASE:
3079 widen_op = OP_ICONV_TO_U1;
3081 case OP_LOADI2_MEMBASE:
3082 widen_op = OP_ICONV_TO_I2;
3084 case OP_LOADU2_MEMBASE:
3085 widen_op = OP_ICONV_TO_U2;
3091 if (widen_op != -1) {
3092 int dreg = alloc_preg (cfg);
3095 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3096 widen->type = ins->type;
3106 get_memcpy_method (void)
3108 static MonoMethod *memcpy_method = NULL;
3109 if (!memcpy_method) {
3110 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3112 g_error ("Old corlib found. Install a new one");
3114 return memcpy_method;
3118 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3120 MonoClassField *field;
3121 gpointer iter = NULL;
3123 while ((field = mono_class_get_fields (klass, &iter))) {
3126 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3128 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3129 if (mini_type_is_reference (mono_field_get_type (field))) {
3130 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3131 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3133 MonoClass *field_class = mono_class_from_mono_type (field->type);
3134 if (field_class->has_references)
3135 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3141 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3143 int card_table_shift_bits;
3144 gpointer card_table_mask;
3146 MonoInst *dummy_use;
3147 int nursery_shift_bits;
3148 size_t nursery_size;
3150 if (!cfg->gen_write_barriers)
3153 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3155 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3157 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3160 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3161 wbarrier->sreg1 = ptr->dreg;
3162 wbarrier->sreg2 = value->dreg;
3163 MONO_ADD_INS (cfg->cbb, wbarrier);
3164 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3165 int offset_reg = alloc_preg (cfg);
3169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3170 if (card_table_mask)
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3173 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3174 * IMM's larger than 32bits.
3176 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3177 card_reg = ins->dreg;
3179 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3180 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3182 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3183 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3186 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3190 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3192 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3193 unsigned need_wb = 0;
3198 /*types with references can't have alignment smaller than sizeof(void*) */
3199 if (align < SIZEOF_VOID_P)
3202 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3203 if (size > 32 * SIZEOF_VOID_P)
3206 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3208 /* We don't unroll more than 5 stores to avoid code bloat. */
3209 if (size > 5 * SIZEOF_VOID_P) {
3210 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3211 size += (SIZEOF_VOID_P - 1);
3212 size &= ~(SIZEOF_VOID_P - 1);
3214 EMIT_NEW_ICONST (cfg, iargs [2], size);
3215 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3216 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3220 destreg = iargs [0]->dreg;
3221 srcreg = iargs [1]->dreg;
3224 dest_ptr_reg = alloc_preg (cfg);
3225 tmp_reg = alloc_preg (cfg);
3228 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3230 while (size >= SIZEOF_VOID_P) {
3231 MonoInst *load_inst;
3232 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3233 load_inst->dreg = tmp_reg;
3234 load_inst->inst_basereg = srcreg;
3235 load_inst->inst_offset = offset;
3236 MONO_ADD_INS (cfg->cbb, load_inst);
3238 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3241 emit_write_barrier (cfg, iargs [0], load_inst);
3243 offset += SIZEOF_VOID_P;
3244 size -= SIZEOF_VOID_P;
3247 /*tmp += sizeof (void*)*/
3248 if (size >= SIZEOF_VOID_P) {
3249 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3250 MONO_ADD_INS (cfg->cbb, iargs [0]);
3254 /* Those cannot be references since size < sizeof (void*) */
3256 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3257 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3270 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3280 * Emit code to copy a valuetype of type @klass whose address is stored in
3281 * @src->dreg to memory whose address is stored at @dest->dreg.
3284 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3286 MonoInst *iargs [4];
3289 MonoMethod *memcpy_method;
3290 MonoInst *size_ins = NULL;
3291 MonoInst *memcpy_ins = NULL;
3295 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3298 * This check breaks with spilled vars... need to handle it during verification anyway.
3299 * g_assert (klass && klass == src->klass && klass == dest->klass);
3302 if (mini_is_gsharedvt_klass (klass)) {
3304 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3305 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3309 n = mono_class_native_size (klass, &align);
3311 n = mono_class_value_size (klass, &align);
3313 /* if native is true there should be no references in the struct */
3314 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3315 /* Avoid barriers when storing to the stack */
3316 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3317 (dest->opcode == OP_LDADDR))) {
3323 context_used = mini_class_check_context_used (cfg, klass);
3325 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3326 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3328 } else if (context_used) {
3329 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3331 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3332 if (!cfg->compile_aot)
3333 mono_class_compute_gc_descriptor (klass);
3337 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3339 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3344 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3345 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3346 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3351 iargs [2] = size_ins;
3353 EMIT_NEW_ICONST (cfg, iargs [2], n);
3355 memcpy_method = get_memcpy_method ();
3357 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3359 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3364 get_memset_method (void)
3366 static MonoMethod *memset_method = NULL;
3367 if (!memset_method) {
3368 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3370 g_error ("Old corlib found. Install a new one");
3372 return memset_method;
3376 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3378 MonoInst *iargs [3];
3381 MonoMethod *memset_method;
3382 MonoInst *size_ins = NULL;
3383 MonoInst *bzero_ins = NULL;
3384 static MonoMethod *bzero_method;
3386 /* FIXME: Optimize this for the case when dest is an LDADDR */
3387 mono_class_init (klass);
3388 if (mini_is_gsharedvt_klass (klass)) {
3389 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3390 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3392 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3393 g_assert (bzero_method);
3395 iargs [1] = size_ins;
3396 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3400 n = mono_class_value_size (klass, &align);
3402 if (n <= sizeof (gpointer) * 8) {
3403 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3406 memset_method = get_memset_method ();
3408 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3409 EMIT_NEW_ICONST (cfg, iargs [2], n);
3410 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3417 * Emit IR to return either the this pointer for instance method,
3418 * or the mrgctx for static methods.
3421 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3423 MonoInst *this_ins = NULL;
3425 g_assert (cfg->gshared);
3427 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3428 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3429 !method->klass->valuetype)
3430 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3432 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3433 MonoInst *mrgctx_loc, *mrgctx_var;
3435 g_assert (!this_ins);
3436 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3438 mrgctx_loc = mono_get_vtable_var (cfg);
3439 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3442 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3443 MonoInst *vtable_loc, *vtable_var;
3445 g_assert (!this_ins);
3447 vtable_loc = mono_get_vtable_var (cfg);
3448 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3450 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3451 MonoInst *mrgctx_var = vtable_var;
3454 vtable_reg = alloc_preg (cfg);
3455 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3456 vtable_var->type = STACK_PTR;
3464 vtable_reg = alloc_preg (cfg);
3465 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3470 static MonoJumpInfoRgctxEntry *
3471 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3473 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3474 res->method = method;
3475 res->in_mrgctx = in_mrgctx;
3476 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3477 res->data->type = patch_type;
3478 res->data->data.target = patch_data;
3479 res->info_type = info_type;
3484 static inline MonoInst*
3485 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3487 MonoInst *args [16];
3490 // FIXME: No fastpath since the slot is not a compile time constant
3492 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3493 if (entry->in_mrgctx)
3494 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3496 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3500 * FIXME: This can be called during decompose, which is a problem since it creates
3502 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3504 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3506 MonoBasicBlock *is_null_bb, *end_bb;
3507 MonoInst *res, *ins, *call;
3510 slot = mini_get_rgctx_entry_slot (entry);
3512 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3513 index = MONO_RGCTX_SLOT_INDEX (slot);
3515 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3516 for (depth = 0; ; ++depth) {
3517 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3519 if (index < size - 1)
3524 NEW_BBLOCK (cfg, end_bb);
3525 NEW_BBLOCK (cfg, is_null_bb);
3528 rgctx_reg = rgctx->dreg;
3530 rgctx_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3533 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3534 NEW_BBLOCK (cfg, is_null_bb);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3540 for (i = 0; i < depth; ++i) {
3541 int array_reg = alloc_preg (cfg);
3543 /* load ptr to next array */
3544 if (mrgctx && i == 0)
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3548 rgctx_reg = array_reg;
3549 /* is the ptr null? */
3550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3551 /* if yes, jump to actual trampoline */
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3556 val_reg = alloc_preg (cfg);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3558 /* is the slot null? */
3559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3560 /* if yes, jump to actual trampoline */
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3564 res_reg = alloc_preg (cfg);
3565 MONO_INST_NEW (cfg, ins, OP_MOVE);
3566 ins->dreg = res_reg;
3567 ins->sreg1 = val_reg;
3568 MONO_ADD_INS (cfg->cbb, ins);
3570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3573 MONO_START_BB (cfg, is_null_bb);
3575 EMIT_NEW_ICONST (cfg, args [1], index);
3577 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3579 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3580 MONO_INST_NEW (cfg, ins, OP_MOVE);
3581 ins->dreg = res_reg;
3582 ins->sreg1 = call->dreg;
3583 MONO_ADD_INS (cfg->cbb, ins);
3584 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3586 MONO_START_BB (cfg, end_bb);
3595 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3598 static inline MonoInst*
3599 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3602 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3604 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3608 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3609 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3611 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3612 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3614 return emit_rgctx_fetch (cfg, rgctx, entry);
3618 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3619 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3621 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3622 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3624 return emit_rgctx_fetch (cfg, rgctx, entry);
3628 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3629 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3631 MonoJumpInfoGSharedVtCall *call_info;
3632 MonoJumpInfoRgctxEntry *entry;
3635 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3636 call_info->sig = sig;
3637 call_info->method = cmethod;
3639 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3640 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3642 return emit_rgctx_fetch (cfg, rgctx, entry);
3646 * emit_get_rgctx_virt_method:
3648 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3651 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3652 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3654 MonoJumpInfoVirtMethod *info;
3655 MonoJumpInfoRgctxEntry *entry;
3658 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3659 info->klass = klass;
3660 info->method = virt_method;
3662 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3663 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3670 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3672 MonoJumpInfoRgctxEntry *entry;
3675 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3676 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3678 return emit_rgctx_fetch (cfg, rgctx, entry);
3682 * emit_get_rgctx_method:
3684 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3685 * normal constants, else emit a load from the rgctx.
3688 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3689 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3691 if (!context_used) {
3694 switch (rgctx_type) {
3695 case MONO_RGCTX_INFO_METHOD:
3696 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3698 case MONO_RGCTX_INFO_METHOD_RGCTX:
3699 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3702 g_assert_not_reached ();
3705 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3706 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3708 return emit_rgctx_fetch (cfg, rgctx, entry);
3713 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3714 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3716 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3717 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3725 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3726 MonoRuntimeGenericContextInfoTemplate *template;
3731 for (i = 0; i < info->num_entries; ++i) {
3732 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3734 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3738 if (info->num_entries == info->count_entries) {
3739 MonoRuntimeGenericContextInfoTemplate *new_entries;
3740 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3742 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3744 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3745 info->entries = new_entries;
3746 info->count_entries = new_count_entries;
3749 idx = info->num_entries;
3750 template = &info->entries [idx];
3751 template->info_type = rgctx_type;
3752 template->data = data;
3754 info->num_entries ++;
3760 * emit_get_gsharedvt_info:
3762 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3765 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3770 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3771 /* Load info->entries [idx] */
3772 dreg = alloc_preg (cfg);
3773 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3779 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3781 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3785 * On return the caller must check @klass for load errors.
3788 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3790 MonoInst *vtable_arg;
3793 context_used = mini_class_check_context_used (cfg, klass);
3796 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3797 klass, MONO_RGCTX_INFO_VTABLE);
3799 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3803 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3806 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3810 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3811 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3813 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3814 ins->sreg1 = vtable_arg->dreg;
3815 MONO_ADD_INS (cfg->cbb, ins);
3817 static int byte_offset = -1;
3818 static guint8 bitmask;
3819 int bits_reg, inited_reg;
3820 MonoBasicBlock *inited_bb;
3821 MonoInst *args [16];
3823 if (byte_offset < 0)
3824 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3826 bits_reg = alloc_ireg (cfg);
3827 inited_reg = alloc_ireg (cfg);
3829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3832 NEW_BBLOCK (cfg, inited_bb);
3834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3835 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3837 args [0] = vtable_arg;
3838 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3840 MONO_START_BB (cfg, inited_bb);
3845 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3849 if (cfg->gen_seq_points && cfg->method == method) {
3850 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3852 ins->flags |= MONO_INST_NONEMPTY_STACK;
3853 MONO_ADD_INS (cfg->cbb, ins);
3858 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3860 if (mini_get_debug_options ()->better_cast_details) {
3861 int vtable_reg = alloc_preg (cfg);
3862 int klass_reg = alloc_preg (cfg);
3863 MonoBasicBlock *is_null_bb = NULL;
3865 int to_klass_reg, context_used;
3868 NEW_BBLOCK (cfg, is_null_bb);
3870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3871 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3874 tls_get = mono_get_jit_tls_intrinsic (cfg);
3876 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3880 MONO_ADD_INS (cfg->cbb, tls_get);
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3886 context_used = mini_class_check_context_used (cfg, klass);
3888 MonoInst *class_ins;
3890 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3891 to_klass_reg = class_ins->dreg;
3893 to_klass_reg = alloc_preg (cfg);
3894 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3899 MONO_START_BB (cfg, is_null_bb);
3904 reset_cast_details (MonoCompile *cfg)
3906 /* Reset the variables holding the cast details */
3907 if (mini_get_debug_options ()->better_cast_details) {
3908 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3910 MONO_ADD_INS (cfg->cbb, tls_get);
3911 /* It is enough to reset the from field */
3912 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3917 * On return the caller must check @array_class for load errors
3920 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3922 int vtable_reg = alloc_preg (cfg);
3925 context_used = mini_class_check_context_used (cfg, array_class);
3927 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3929 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3931 if (cfg->opt & MONO_OPT_SHARED) {
3932 int class_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3936 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3938 } else if (context_used) {
3939 MonoInst *vtable_ins;
3941 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3944 if (cfg->compile_aot) {
3948 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3950 vt_reg = alloc_preg (cfg);
3951 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3952 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3955 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3957 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3961 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3963 reset_cast_details (cfg);
3967 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3968 * generic code is generated.
3971 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3973 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3976 MonoInst *rgctx, *addr;
3978 /* FIXME: What if the class is shared? We might not
3979 have to get the address of the method from the
3981 addr = emit_get_rgctx_method (cfg, context_used, method,
3982 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3984 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3986 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3988 gboolean pass_vtable, pass_mrgctx;
3989 MonoInst *rgctx_arg = NULL;
3991 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3992 g_assert (!pass_mrgctx);
3995 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3998 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4001 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4006 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4010 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4011 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4012 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4013 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4015 obj_reg = sp [0]->dreg;
4016 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4017 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4019 /* FIXME: generics */
4020 g_assert (klass->rank == 0);
4023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4024 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4026 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4030 MonoInst *element_class;
4032 /* This assertion is from the unboxcast insn */
4033 g_assert (klass->rank == 0);
4035 element_class = emit_get_rgctx_klass (cfg, context_used,
4036 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4038 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4039 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4041 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4042 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4043 reset_cast_details (cfg);
4046 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4047 MONO_ADD_INS (cfg->cbb, add);
4048 add->type = STACK_MP;
4055 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4057 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4058 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4062 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4068 args [1] = klass_inst;
4071 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4073 NEW_BBLOCK (cfg, is_ref_bb);
4074 NEW_BBLOCK (cfg, is_nullable_bb);
4075 NEW_BBLOCK (cfg, end_bb);
4076 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4081 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4083 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4084 addr_reg = alloc_dreg (cfg, STACK_MP);
4088 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4089 MONO_ADD_INS (cfg->cbb, addr);
4091 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4094 MONO_START_BB (cfg, is_ref_bb);
4096 /* Save the ref to a temporary */
4097 dreg = alloc_ireg (cfg);
4098 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4099 addr->dreg = addr_reg;
4100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4104 MONO_START_BB (cfg, is_nullable_bb);
4107 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4108 MonoInst *unbox_call;
4109 MonoMethodSignature *unbox_sig;
4111 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4112 unbox_sig->ret = &klass->byval_arg;
4113 unbox_sig->param_count = 1;
4114 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4115 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4117 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4118 addr->dreg = addr_reg;
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4124 MONO_START_BB (cfg, end_bb);
4127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4133 * Returns NULL and set the cfg exception on error.
4136 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4138 MonoInst *iargs [2];
4144 MonoInst *iargs [2];
4145 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4147 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4149 if (cfg->opt & MONO_OPT_SHARED)
4150 rgctx_info = MONO_RGCTX_INFO_KLASS;
4152 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4153 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4155 if (cfg->opt & MONO_OPT_SHARED) {
4156 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4158 alloc_ftn = mono_object_new;
4161 alloc_ftn = mono_object_new_specific;
4164 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4165 if (known_instance_size) {
4166 int size = mono_class_instance_size (klass);
4167 if (size < sizeof (MonoObject))
4168 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4170 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4172 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4175 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4178 if (cfg->opt & MONO_OPT_SHARED) {
4179 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4180 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4182 alloc_ftn = mono_object_new;
4183 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4184 /* This happens often in argument checking code, eg. throw new FooException... */
4185 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4186 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4187 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4189 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4190 MonoMethod *managed_alloc = NULL;
4194 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4195 cfg->exception_ptr = klass;
4199 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4201 if (managed_alloc) {
4202 int size = mono_class_instance_size (klass);
4203 if (size < sizeof (MonoObject))
4204 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4206 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4207 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4208 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4210 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4212 guint32 lw = vtable->klass->instance_size;
4213 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4214 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4215 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4218 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4222 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 * Returns NULL and set the cfg exception on error.
4229 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4231 MonoInst *alloc, *ins;
4233 if (mono_class_is_nullable (klass)) {
4234 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4237 /* FIXME: What if the class is shared? We might not
4238 have to get the method address from the RGCTX. */
4239 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4240 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4241 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4243 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4245 gboolean pass_vtable, pass_mrgctx;
4246 MonoInst *rgctx_arg = NULL;
4248 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4249 g_assert (!pass_mrgctx);
4252 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4255 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4258 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4262 if (mini_is_gsharedvt_klass (klass)) {
4263 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4264 MonoInst *res, *is_ref, *src_var, *addr;
4267 dreg = alloc_ireg (cfg);
4269 NEW_BBLOCK (cfg, is_ref_bb);
4270 NEW_BBLOCK (cfg, is_nullable_bb);
4271 NEW_BBLOCK (cfg, end_bb);
4272 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4280 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4283 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4284 ins->opcode = OP_STOREV_MEMBASE;
4286 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4287 res->type = STACK_OBJ;
4289 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4292 MONO_START_BB (cfg, is_ref_bb);
4294 /* val is a vtype, so has to load the value manually */
4295 src_var = get_vreg_to_inst (cfg, val->dreg);
4297 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4298 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4299 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4303 MONO_START_BB (cfg, is_nullable_bb);
4306 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4307 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4309 MonoMethodSignature *box_sig;
4312 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4313 * construct that method at JIT time, so have to do things by hand.
4315 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4316 box_sig->ret = &mono_defaults.object_class->byval_arg;
4317 box_sig->param_count = 1;
4318 box_sig->params [0] = &klass->byval_arg;
4319 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4320 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4321 res->type = STACK_OBJ;
4325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4327 MONO_START_BB (cfg, end_bb);
4331 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4335 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4341 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4344 MonoGenericContainer *container;
4345 MonoGenericInst *ginst;
4347 if (klass->generic_class) {
4348 container = klass->generic_class->container_class->generic_container;
4349 ginst = klass->generic_class->context.class_inst;
4350 } else if (klass->generic_container && context_used) {
4351 container = klass->generic_container;
4352 ginst = container->context.class_inst;
4357 for (i = 0; i < container->type_argc; ++i) {
4359 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4361 type = ginst->type_argv [i];
4362 if (mini_type_is_reference (type))
4368 static GHashTable* direct_icall_type_hash;
4371 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4373 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4374 if (!direct_icalls_enabled (cfg))
4378 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4379 * Whitelist a few icalls for now.
4381 if (!direct_icall_type_hash) {
4382 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4384 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4385 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4386 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4387 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4388 mono_memory_barrier ();
4389 direct_icall_type_hash = h;
4392 if (cmethod->klass == mono_defaults.math_class)
4394 /* No locking needed */
4395 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4400 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4403 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4405 MonoMethod *mono_castclass;
4408 mono_castclass = mono_marshal_get_castclass_with_cache ();
4410 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4411 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4412 reset_cast_details (cfg);
4418 get_castclass_cache_idx (MonoCompile *cfg)
4420 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4421 cfg->castclass_cache_index ++;
4422 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4426 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4435 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4438 idx = get_castclass_cache_idx (cfg);
4439 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4441 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4442 return emit_castclass_with_cache (cfg, klass, args);
4446 * Returns NULL and set the cfg exception on error.
4449 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4451 MonoBasicBlock *is_null_bb;
4452 int obj_reg = src->dreg;
4453 int vtable_reg = alloc_preg (cfg);
4455 MonoInst *klass_inst = NULL, *res;
4457 context_used = mini_class_check_context_used (cfg, klass);
4459 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4460 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4461 (*inline_costs) += 2;
4463 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4464 MonoMethod *mono_castclass;
4465 MonoInst *iargs [1];
4468 mono_castclass = mono_marshal_get_castclass (klass);
4471 save_cast_details (cfg, klass, src->dreg, TRUE);
4472 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4473 iargs, ip, cfg->real_offset, TRUE);
4474 reset_cast_details (cfg);
4475 CHECK_CFG_EXCEPTION;
4476 g_assert (costs > 0);
4478 cfg->real_offset += 5;
4480 (*inline_costs) += costs;
4488 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4489 MonoInst *cache_ins;
4491 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4496 /* klass - it's the second element of the cache entry*/
4497 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4500 args [2] = cache_ins;
4502 return emit_castclass_with_cache (cfg, klass, args);
4505 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4508 NEW_BBLOCK (cfg, is_null_bb);
4510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4513 save_cast_details (cfg, klass, obj_reg, FALSE);
4515 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4517 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4519 int klass_reg = alloc_preg (cfg);
4521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4523 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4524 /* the remoting code is broken, access the class for now */
4525 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4526 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4528 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4529 cfg->exception_ptr = klass;
4532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4540 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4544 MONO_START_BB (cfg, is_null_bb);
4546 reset_cast_details (cfg);
4555 * Returns NULL and set the cfg exception on error.
4558 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4561 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4562 int obj_reg = src->dreg;
4563 int vtable_reg = alloc_preg (cfg);
4564 int res_reg = alloc_ireg_ref (cfg);
4565 MonoInst *klass_inst = NULL;
4570 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4571 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4572 MonoInst *cache_ins;
4574 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4579 /* klass - it's the second element of the cache entry*/
4580 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4583 args [2] = cache_ins;
4585 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4588 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4591 NEW_BBLOCK (cfg, is_null_bb);
4592 NEW_BBLOCK (cfg, false_bb);
4593 NEW_BBLOCK (cfg, end_bb);
4595 /* Do the assignment at the beginning, so the other assignment can be if converted */
4596 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4597 ins->type = STACK_OBJ;
4600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4605 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4606 g_assert (!context_used);
4607 /* the is_null_bb target simply copies the input register to the output */
4608 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4610 int klass_reg = alloc_preg (cfg);
4613 int rank_reg = alloc_preg (cfg);
4614 int eclass_reg = alloc_preg (cfg);
4616 g_assert (!context_used);
4617 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4622 if (klass->cast_class == mono_defaults.object_class) {
4623 int parent_reg = alloc_preg (cfg);
4624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4625 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4626 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4627 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4628 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4629 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4630 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4632 } else if (klass->cast_class == mono_defaults.enum_class) {
4633 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4634 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4635 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4636 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4638 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4639 /* Check that the object is a vector too */
4640 int bounds_reg = alloc_preg (cfg);
4641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4646 /* the is_null_bb target simply copies the input register to the output */
4647 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4649 } else if (mono_class_is_nullable (klass)) {
4650 g_assert (!context_used);
4651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4652 /* the is_null_bb target simply copies the input register to the output */
4653 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4655 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4656 g_assert (!context_used);
4657 /* the remoting code is broken, access the class for now */
4658 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4659 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4661 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4662 cfg->exception_ptr = klass;
4665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4674 /* the is_null_bb target simply copies the input register to the output */
4675 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4680 MONO_START_BB (cfg, false_bb);
4682 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4685 MONO_START_BB (cfg, is_null_bb);
4687 MONO_START_BB (cfg, end_bb);
4693 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4695 /* This opcode takes as input an object reference and a class, and returns:
4696 0) if the object is an instance of the class,
4697 1) if the object is not instance of the class,
4698 2) if the object is a proxy whose type cannot be determined */
4701 #ifndef DISABLE_REMOTING
4702 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4704 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4706 int obj_reg = src->dreg;
4707 int dreg = alloc_ireg (cfg);
4709 #ifndef DISABLE_REMOTING
4710 int klass_reg = alloc_preg (cfg);
4713 NEW_BBLOCK (cfg, true_bb);
4714 NEW_BBLOCK (cfg, false_bb);
4715 NEW_BBLOCK (cfg, end_bb);
4716 #ifndef DISABLE_REMOTING
4717 NEW_BBLOCK (cfg, false2_bb);
4718 NEW_BBLOCK (cfg, no_proxy_bb);
4721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4724 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4725 #ifndef DISABLE_REMOTING
4726 NEW_BBLOCK (cfg, interface_fail_bb);
4729 tmp_reg = alloc_preg (cfg);
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4731 #ifndef DISABLE_REMOTING
4732 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4733 MONO_START_BB (cfg, interface_fail_bb);
4734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4736 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4738 tmp_reg = alloc_preg (cfg);
4739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4743 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4746 #ifndef DISABLE_REMOTING
4747 tmp_reg = alloc_preg (cfg);
4748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4751 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4752 tmp_reg = alloc_preg (cfg);
4753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4756 tmp_reg = alloc_preg (cfg);
4757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4761 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4764 MONO_START_BB (cfg, no_proxy_bb);
4766 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4768 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4772 MONO_START_BB (cfg, false_bb);
4774 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4777 #ifndef DISABLE_REMOTING
4778 MONO_START_BB (cfg, false2_bb);
4780 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4784 MONO_START_BB (cfg, true_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4788 MONO_START_BB (cfg, end_bb);
4791 MONO_INST_NEW (cfg, ins, OP_ICONST);
4793 ins->type = STACK_I4;
4799 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4801 /* This opcode takes as input an object reference and a class, and returns:
4802 0) if the object is an instance of the class,
4803 1) if the object is a proxy whose type cannot be determined
4804 an InvalidCastException exception is thrown otherwhise*/
4807 #ifndef DISABLE_REMOTING
4808 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4810 MonoBasicBlock *ok_result_bb;
4812 int obj_reg = src->dreg;
4813 int dreg = alloc_ireg (cfg);
4814 int tmp_reg = alloc_preg (cfg);
4816 #ifndef DISABLE_REMOTING
4817 int klass_reg = alloc_preg (cfg);
4818 NEW_BBLOCK (cfg, end_bb);
4821 NEW_BBLOCK (cfg, ok_result_bb);
4823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4824 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4826 save_cast_details (cfg, klass, obj_reg, FALSE);
4828 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4829 #ifndef DISABLE_REMOTING
4830 NEW_BBLOCK (cfg, interface_fail_bb);
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4833 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4834 MONO_START_BB (cfg, interface_fail_bb);
4835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4837 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4839 tmp_reg = alloc_preg (cfg);
4840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4842 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4848 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4852 #ifndef DISABLE_REMOTING
4853 NEW_BBLOCK (cfg, no_proxy_bb);
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4857 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4859 tmp_reg = alloc_preg (cfg);
4860 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4863 tmp_reg = alloc_preg (cfg);
4864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4868 NEW_BBLOCK (cfg, fail_1_bb);
4870 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4872 MONO_START_BB (cfg, fail_1_bb);
4874 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4877 MONO_START_BB (cfg, no_proxy_bb);
4879 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4881 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4885 MONO_START_BB (cfg, ok_result_bb);
4887 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4889 #ifndef DISABLE_REMOTING
4890 MONO_START_BB (cfg, end_bb);
4894 MONO_INST_NEW (cfg, ins, OP_ICONST);
4896 ins->type = STACK_I4;
4901 static G_GNUC_UNUSED MonoInst*
4902 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4904 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4905 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4908 switch (enum_type->type) {
4911 #if SIZEOF_REGISTER == 8
4923 MonoInst *load, *and, *cmp, *ceq;
4924 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4925 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4926 int dest_reg = alloc_ireg (cfg);
4928 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4929 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4930 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4931 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4933 ceq->type = STACK_I4;
4936 load = mono_decompose_opcode (cfg, load);
4937 and = mono_decompose_opcode (cfg, and);
4938 cmp = mono_decompose_opcode (cfg, cmp);
4939 ceq = mono_decompose_opcode (cfg, ceq);
4947 * Returns NULL and set the cfg exception on error.
4949 static G_GNUC_UNUSED MonoInst*
4950 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4954 gpointer trampoline;
4955 MonoInst *obj, *method_ins, *tramp_ins;
4959 if (virtual && !cfg->llvm_only) {
4960 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4963 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4967 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4971 if (cfg->llvm_only) {
4972 MonoInst *args [16];
4975 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4976 * the address of a gshared method. So use a JIT icall.
4977 * FIXME: Optimize this.
4981 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4982 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4987 /* Inline the contents of mono_delegate_ctor */
4989 /* Set target field */
4990 /* Optimize away setting of NULL target */
4991 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4993 if (cfg->gen_write_barriers) {
4994 dreg = alloc_preg (cfg);
4995 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4996 emit_write_barrier (cfg, ptr, target);
5000 /* Set method field */
5001 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5002 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5005 * To avoid looking up the compiled code belonging to the target method
5006 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5007 * store it, and we fill it after the method has been compiled.
5009 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5010 MonoInst *code_slot_ins;
5013 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5015 domain = mono_domain_get ();
5016 mono_domain_lock (domain);
5017 if (!domain_jit_info (domain)->method_code_hash)
5018 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5019 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5021 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5022 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5024 mono_domain_unlock (domain);
5026 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5031 if (cfg->compile_aot) {
5032 MonoDelegateClassMethodPair *del_tramp;
5034 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5035 del_tramp->klass = klass;
5036 del_tramp->method = context_used ? NULL : method;
5037 del_tramp->is_virtual = virtual;
5038 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5041 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5043 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5044 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5047 /* Set invoke_impl field */
5049 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5051 dreg = alloc_preg (cfg);
5052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5053 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5055 dreg = alloc_preg (cfg);
5056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5057 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5060 dreg = alloc_preg (cfg);
5061 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5062 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5064 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5070 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5072 MonoJitICallInfo *info;
5074 /* Need to register the icall so it gets an icall wrapper */
5075 info = mono_get_array_new_va_icall (rank);
5077 cfg->flags |= MONO_CFG_HAS_VARARGS;
5079 /* mono_array_new_va () needs a vararg calling convention */
5080 cfg->disable_llvm = TRUE;
5082 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5083 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5087 * handle_constrained_gsharedvt_call:
5089 * Handle constrained calls where the receiver is a gsharedvt type.
5090 * Return the instruction representing the call. Set the cfg exception on failure.
5093 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5094 gboolean *ref_emit_widen)
5096 MonoInst *ins = NULL;
5097 gboolean emit_widen = *ref_emit_widen;
5100 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5101 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5102 * pack the arguments into an array, and do the rest of the work in in an icall.
5104 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5105 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5106 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5107 MonoInst *args [16];
5110 * This case handles calls to
5111 * - object:ToString()/Equals()/GetHashCode(),
5112 * - System.IComparable<T>:CompareTo()
5113 * - System.IEquatable<T>:Equals ()
5114 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5118 if (mono_method_check_context_used (cmethod))
5119 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5121 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5122 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5124 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5125 if (fsig->hasthis && fsig->param_count) {
5126 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5127 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5128 ins->dreg = alloc_preg (cfg);
5129 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5130 MONO_ADD_INS (cfg->cbb, ins);
5133 if (mini_is_gsharedvt_type (fsig->params [0])) {
5134 int addr_reg, deref_arg_reg;
5136 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5137 deref_arg_reg = alloc_preg (cfg);
5138 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5139 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5141 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5142 addr_reg = ins->dreg;
5143 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5145 EMIT_NEW_ICONST (cfg, args [3], 0);
5146 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5149 EMIT_NEW_ICONST (cfg, args [3], 0);
5150 EMIT_NEW_ICONST (cfg, args [4], 0);
5152 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5155 if (mini_is_gsharedvt_type (fsig->ret)) {
5156 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5157 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5162 MONO_ADD_INS (cfg->cbb, add);
5164 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5165 MONO_ADD_INS (cfg->cbb, ins);
5166 /* ins represents the call result */
5169 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5172 *ref_emit_widen = emit_widen;
5181 mono_emit_load_got_addr (MonoCompile *cfg)
5183 MonoInst *getaddr, *dummy_use;
5185 if (!cfg->got_var || cfg->got_var_allocated)
5188 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5189 getaddr->cil_code = cfg->header->code;
5190 getaddr->dreg = cfg->got_var->dreg;
5192 /* Add it to the start of the first bblock */
5193 if (cfg->bb_entry->code) {
5194 getaddr->next = cfg->bb_entry->code;
5195 cfg->bb_entry->code = getaddr;
5198 MONO_ADD_INS (cfg->bb_entry, getaddr);
5200 cfg->got_var_allocated = TRUE;
5203 * Add a dummy use to keep the got_var alive, since real uses might
5204 * only be generated by the back ends.
5205 * Add it to end_bblock, so the variable's lifetime covers the whole
5207 * It would be better to make the usage of the got var explicit in all
5208 * cases when the backend needs it (i.e. calls, throw etc.), so this
5209 * wouldn't be needed.
5211 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5212 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5215 static int inline_limit;
5216 static gboolean inline_limit_inited;
5219 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5221 MonoMethodHeaderSummary header;
5223 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5224 MonoMethodSignature *sig = mono_method_signature (method);
5228 if (cfg->disable_inline)
5233 if (cfg->inline_depth > 10)
5236 if (!mono_method_get_header_summary (method, &header))
5239 /*runtime, icall and pinvoke are checked by summary call*/
5240 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5241 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5242 (mono_class_is_marshalbyref (method->klass)) ||
5246 /* also consider num_locals? */
5247 /* Do the size check early to avoid creating vtables */
5248 if (!inline_limit_inited) {
5249 if (g_getenv ("MONO_INLINELIMIT"))
5250 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5252 inline_limit = INLINE_LENGTH_LIMIT;
5253 inline_limit_inited = TRUE;
5255 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5259 * if we can initialize the class of the method right away, we do,
5260 * otherwise we don't allow inlining if the class needs initialization,
5261 * since it would mean inserting a call to mono_runtime_class_init()
5262 * inside the inlined code
5264 if (!(cfg->opt & MONO_OPT_SHARED)) {
5265 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5266 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5267 vtable = mono_class_vtable (cfg->domain, method->klass);
5270 if (!cfg->compile_aot)
5271 mono_runtime_class_init (vtable);
5272 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5273 if (cfg->run_cctors && method->klass->has_cctor) {
5274 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5275 if (!method->klass->runtime_info)
5276 /* No vtable created yet */
5278 vtable = mono_class_vtable (cfg->domain, method->klass);
5281 /* This makes so that inline cannot trigger */
5282 /* .cctors: too many apps depend on them */
5283 /* running with a specific order... */
5284 if (! vtable->initialized)
5286 mono_runtime_class_init (vtable);
5288 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5289 if (!method->klass->runtime_info)
5290 /* No vtable created yet */
5292 vtable = mono_class_vtable (cfg->domain, method->klass);
5295 if (!vtable->initialized)
5300 * If we're compiling for shared code
5301 * the cctor will need to be run at aot method load time, for example,
5302 * or at the end of the compilation of the inlining method.
5304 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5308 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5309 if (mono_arch_is_soft_float ()) {
5311 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5313 for (i = 0; i < sig->param_count; ++i)
5314 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5319 if (g_list_find (cfg->dont_inline, method))
5326 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5328 if (!cfg->compile_aot) {
5330 if (vtable->initialized)
5334 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5335 if (cfg->method == method)
5339 if (!mono_class_needs_cctor_run (klass, method))
5342 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5343 /* The initialization is already done before the method is called */
5350 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5354 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5357 if (mini_is_gsharedvt_variable_klass (klass)) {
5360 mono_class_init (klass);
5361 size = mono_class_array_element_size (klass);
5364 mult_reg = alloc_preg (cfg);
5365 array_reg = arr->dreg;
5366 index_reg = index->dreg;
5368 #if SIZEOF_REGISTER == 8
5369 /* The array reg is 64 bits but the index reg is only 32 */
5370 if (COMPILE_LLVM (cfg)) {
5372 index2_reg = index_reg;
5374 index2_reg = alloc_preg (cfg);
5375 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5378 if (index->type == STACK_I8) {
5379 index2_reg = alloc_preg (cfg);
5380 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5382 index2_reg = index_reg;
5387 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5389 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5390 if (size == 1 || size == 2 || size == 4 || size == 8) {
5391 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5393 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5394 ins->klass = mono_class_get_element_class (klass);
5395 ins->type = STACK_MP;
5401 add_reg = alloc_ireg_mp (cfg);
5404 MonoInst *rgctx_ins;
5407 g_assert (cfg->gshared);
5408 context_used = mini_class_check_context_used (cfg, klass);
5409 g_assert (context_used);
5410 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5411 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5415 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5416 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5417 ins->klass = mono_class_get_element_class (klass);
5418 ins->type = STACK_MP;
5419 MONO_ADD_INS (cfg->cbb, ins);
5425 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5427 int bounds_reg = alloc_preg (cfg);
5428 int add_reg = alloc_ireg_mp (cfg);
5429 int mult_reg = alloc_preg (cfg);
5430 int mult2_reg = alloc_preg (cfg);
5431 int low1_reg = alloc_preg (cfg);
5432 int low2_reg = alloc_preg (cfg);
5433 int high1_reg = alloc_preg (cfg);
5434 int high2_reg = alloc_preg (cfg);
5435 int realidx1_reg = alloc_preg (cfg);
5436 int realidx2_reg = alloc_preg (cfg);
5437 int sum_reg = alloc_preg (cfg);
5438 int index1, index2, tmpreg;
5442 mono_class_init (klass);
5443 size = mono_class_array_element_size (klass);
5445 index1 = index_ins1->dreg;
5446 index2 = index_ins2->dreg;
5448 #if SIZEOF_REGISTER == 8
5449 /* The array reg is 64 bits but the index reg is only 32 */
5450 if (COMPILE_LLVM (cfg)) {
5453 tmpreg = alloc_preg (cfg);
5454 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5456 tmpreg = alloc_preg (cfg);
5457 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5461 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5465 /* range checking */
5466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5467 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5469 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5470 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5471 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5472 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5473 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5474 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5475 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5478 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5479 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5480 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5481 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5482 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5483 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5485 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5486 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5488 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5489 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5491 ins->type = STACK_MP;
5493 MONO_ADD_INS (cfg->cbb, ins);
5499 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5503 MonoMethod *addr_method;
5505 MonoClass *eclass = cmethod->klass->element_class;
5507 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5510 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5512 /* emit_ldelema_2 depends on OP_LMUL */
5513 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5514 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5517 if (mini_is_gsharedvt_variable_klass (eclass))
5520 element_size = mono_class_array_element_size (eclass);
5521 addr_method = mono_marshal_get_array_address (rank, element_size);
5522 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5527 static MonoBreakPolicy
5528 always_insert_breakpoint (MonoMethod *method)
5530 return MONO_BREAK_POLICY_ALWAYS;
5533 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5536 * mono_set_break_policy:
5537 * policy_callback: the new callback function
5539 * Allow embedders to decide wherther to actually obey breakpoint instructions
5540 * (both break IL instructions and Debugger.Break () method calls), for example
5541 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5542 * untrusted or semi-trusted code.
5544 * @policy_callback will be called every time a break point instruction needs to
5545 * be inserted with the method argument being the method that calls Debugger.Break()
5546 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5547 * if it wants the breakpoint to not be effective in the given method.
5548 * #MONO_BREAK_POLICY_ALWAYS is the default.
5551 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5553 if (policy_callback)
5554 break_policy_func = policy_callback;
5556 break_policy_func = always_insert_breakpoint;
5560 should_insert_brekpoint (MonoMethod *method) {
5561 switch (break_policy_func (method)) {
5562 case MONO_BREAK_POLICY_ALWAYS:
5564 case MONO_BREAK_POLICY_NEVER:
5566 case MONO_BREAK_POLICY_ON_DBG:
5567 g_warning ("mdb no longer supported");
5570 g_warning ("Incorrect value returned from break policy callback");
5575 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5577 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5579 MonoInst *addr, *store, *load;
5580 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5582 /* the bounds check is already done by the callers */
5583 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5585 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5586 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5587 if (mini_type_is_reference (fsig->params [2]))
5588 emit_write_barrier (cfg, addr, load);
5590 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5591 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5598 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5600 return mini_type_is_reference (&klass->byval_arg);
5604 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5606 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5607 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5608 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5609 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5610 MonoInst *iargs [3];
5613 mono_class_setup_vtable (obj_array);
5614 g_assert (helper->slot);
5616 if (sp [0]->type != STACK_OBJ)
5618 if (sp [2]->type != STACK_OBJ)
5625 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5629 if (mini_is_gsharedvt_variable_klass (klass)) {
5632 // FIXME-VT: OP_ICONST optimization
5633 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5634 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5635 ins->opcode = OP_STOREV_MEMBASE;
5636 } else if (sp [1]->opcode == OP_ICONST) {
5637 int array_reg = sp [0]->dreg;
5638 int index_reg = sp [1]->dreg;
5639 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5642 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5643 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5645 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 if (generic_class_is_reference_type (cfg, klass))
5648 emit_write_barrier (cfg, addr, sp [2]);
5655 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5660 eklass = mono_class_from_mono_type (fsig->params [2]);
5662 eklass = mono_class_from_mono_type (fsig->ret);
5665 return emit_array_store (cfg, eklass, args, FALSE);
5667 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5668 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5674 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5677 int param_size, return_size;
5679 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5680 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5682 if (cfg->verbose_level > 3)
5683 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5685 //Don't allow mixing reference types with value types
5686 if (param_klass->valuetype != return_klass->valuetype) {
5687 if (cfg->verbose_level > 3)
5688 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5692 if (!param_klass->valuetype) {
5693 if (cfg->verbose_level > 3)
5694 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5699 if (param_klass->has_references || return_klass->has_references)
5702 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5703 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5704 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5705 if (cfg->verbose_level > 3)
5706 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5710 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5711 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5712 if (cfg->verbose_level > 3)
5713 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5717 param_size = mono_class_value_size (param_klass, &align);
5718 return_size = mono_class_value_size (return_klass, &align);
5720 //We can do it if sizes match
5721 if (param_size == return_size) {
5722 if (cfg->verbose_level > 3)
5723 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5727 //No simple way to handle struct if sizes don't match
5728 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5729 if (cfg->verbose_level > 3)
5730 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5735 * Same reg size category.
5736 * A quick note on why we don't require widening here.
5737 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5739 * Since the source value comes from a function argument, the JIT will already have
5740 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5742 if (param_size <= 4 && return_size <= 4) {
5743 if (cfg->verbose_level > 3)
5744 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5752 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5754 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5755 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5757 if (mini_is_gsharedvt_variable_type (fsig->ret))
5760 //Valuetypes that are semantically equivalent or numbers than can be widened to
5761 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5764 //Arrays of valuetypes that are semantically equivalent
5765 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5772 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5774 #ifdef MONO_ARCH_SIMD_INTRINSICS
5775 MonoInst *ins = NULL;
5777 if (cfg->opt & MONO_OPT_SIMD) {
5778 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5784 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5788 emit_memory_barrier (MonoCompile *cfg, int kind)
5790 MonoInst *ins = NULL;
5791 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5792 MONO_ADD_INS (cfg->cbb, ins);
5793 ins->backend.memory_barrier_kind = kind;
5799 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5801 MonoInst *ins = NULL;
5804 /* The LLVM backend supports these intrinsics */
5805 if (cmethod->klass == mono_defaults.math_class) {
5806 if (strcmp (cmethod->name, "Sin") == 0) {
5808 } else if (strcmp (cmethod->name, "Cos") == 0) {
5810 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5812 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5816 if (opcode && fsig->param_count == 1) {
5817 MONO_INST_NEW (cfg, ins, opcode);
5818 ins->type = STACK_R8;
5819 ins->dreg = mono_alloc_freg (cfg);
5820 ins->sreg1 = args [0]->dreg;
5821 MONO_ADD_INS (cfg->cbb, ins);
5825 if (cfg->opt & MONO_OPT_CMOV) {
5826 if (strcmp (cmethod->name, "Min") == 0) {
5827 if (fsig->params [0]->type == MONO_TYPE_I4)
5829 if (fsig->params [0]->type == MONO_TYPE_U4)
5830 opcode = OP_IMIN_UN;
5831 else if (fsig->params [0]->type == MONO_TYPE_I8)
5833 else if (fsig->params [0]->type == MONO_TYPE_U8)
5834 opcode = OP_LMIN_UN;
5835 } else if (strcmp (cmethod->name, "Max") == 0) {
5836 if (fsig->params [0]->type == MONO_TYPE_I4)
5838 if (fsig->params [0]->type == MONO_TYPE_U4)
5839 opcode = OP_IMAX_UN;
5840 else if (fsig->params [0]->type == MONO_TYPE_I8)
5842 else if (fsig->params [0]->type == MONO_TYPE_U8)
5843 opcode = OP_LMAX_UN;
5847 if (opcode && fsig->param_count == 2) {
5848 MONO_INST_NEW (cfg, ins, opcode);
5849 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5850 ins->dreg = mono_alloc_ireg (cfg);
5851 ins->sreg1 = args [0]->dreg;
5852 ins->sreg2 = args [1]->dreg;
5853 MONO_ADD_INS (cfg->cbb, ins);
5861 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5863 if (cmethod->klass == mono_defaults.array_class) {
5864 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5865 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5866 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5867 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5868 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5869 return emit_array_unsafe_mov (cfg, fsig, args);
5876 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5878 MonoInst *ins = NULL;
5880 static MonoClass *runtime_helpers_class = NULL;
5881 if (! runtime_helpers_class)
5882 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5883 "System.Runtime.CompilerServices", "RuntimeHelpers");
5885 if (cmethod->klass == mono_defaults.string_class) {
5886 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5887 int dreg = alloc_ireg (cfg);
5888 int index_reg = alloc_preg (cfg);
5889 int add_reg = alloc_preg (cfg);
5891 #if SIZEOF_REGISTER == 8
5892 /* The array reg is 64 bits but the index reg is only 32 */
5893 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5895 index_reg = args [1]->dreg;
5897 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5899 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5900 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5901 add_reg = ins->dreg;
5902 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5905 int mult_reg = alloc_preg (cfg);
5906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5907 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5908 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5909 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5911 type_from_op (cfg, ins, NULL, NULL);
5913 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5914 int dreg = alloc_ireg (cfg);
5915 /* Decompose later to allow more optimizations */
5916 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5917 ins->type = STACK_I4;
5918 ins->flags |= MONO_INST_FAULT;
5919 cfg->cbb->has_array_access = TRUE;
5920 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5925 } else if (cmethod->klass == mono_defaults.object_class) {
5927 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5928 int dreg = alloc_ireg_ref (cfg);
5929 int vt_reg = alloc_preg (cfg);
5930 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5931 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5932 type_from_op (cfg, ins, NULL, NULL);
5935 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5936 int dreg = alloc_ireg (cfg);
5937 int t1 = alloc_ireg (cfg);
5939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5940 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5941 ins->type = STACK_I4;
5944 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5945 MONO_INST_NEW (cfg, ins, OP_NOP);
5946 MONO_ADD_INS (cfg->cbb, ins);
5950 } else if (cmethod->klass == mono_defaults.array_class) {
5951 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5952 return emit_array_generic_access (cfg, fsig, args, FALSE);
5953 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5954 return emit_array_generic_access (cfg, fsig, args, TRUE);
5956 #ifndef MONO_BIG_ARRAYS
5958 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5961 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5962 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5963 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5964 int dreg = alloc_ireg (cfg);
5965 int bounds_reg = alloc_ireg_mp (cfg);
5966 MonoBasicBlock *end_bb, *szarray_bb;
5967 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5969 NEW_BBLOCK (cfg, end_bb);
5970 NEW_BBLOCK (cfg, szarray_bb);
5972 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5973 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5974 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5975 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5976 /* Non-szarray case */
5978 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5979 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5981 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5982 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5983 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5984 MONO_START_BB (cfg, szarray_bb);
5987 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5988 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5990 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5991 MONO_START_BB (cfg, end_bb);
5993 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5994 ins->type = STACK_I4;
6000 if (cmethod->name [0] != 'g')
6003 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6004 int dreg = alloc_ireg (cfg);
6005 int vtable_reg = alloc_preg (cfg);
6006 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6007 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6009 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6010 type_from_op (cfg, ins, NULL, NULL);
6013 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6014 int dreg = alloc_ireg (cfg);
6016 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6017 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6018 type_from_op (cfg, ins, NULL, NULL);
6023 } else if (cmethod->klass == runtime_helpers_class) {
6025 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6026 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6030 } else if (cmethod->klass == mono_defaults.thread_class) {
6031 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6032 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6033 MONO_ADD_INS (cfg->cbb, ins);
6035 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6036 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6037 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6039 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6041 if (fsig->params [0]->type == MONO_TYPE_I1)
6042 opcode = OP_LOADI1_MEMBASE;
6043 else if (fsig->params [0]->type == MONO_TYPE_U1)
6044 opcode = OP_LOADU1_MEMBASE;
6045 else if (fsig->params [0]->type == MONO_TYPE_I2)
6046 opcode = OP_LOADI2_MEMBASE;
6047 else if (fsig->params [0]->type == MONO_TYPE_U2)
6048 opcode = OP_LOADU2_MEMBASE;
6049 else if (fsig->params [0]->type == MONO_TYPE_I4)
6050 opcode = OP_LOADI4_MEMBASE;
6051 else if (fsig->params [0]->type == MONO_TYPE_U4)
6052 opcode = OP_LOADU4_MEMBASE;
6053 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6054 opcode = OP_LOADI8_MEMBASE;
6055 else if (fsig->params [0]->type == MONO_TYPE_R4)
6056 opcode = OP_LOADR4_MEMBASE;
6057 else if (fsig->params [0]->type == MONO_TYPE_R8)
6058 opcode = OP_LOADR8_MEMBASE;
6059 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6060 opcode = OP_LOAD_MEMBASE;
6063 MONO_INST_NEW (cfg, ins, opcode);
6064 ins->inst_basereg = args [0]->dreg;
6065 ins->inst_offset = 0;
6066 MONO_ADD_INS (cfg->cbb, ins);
6068 switch (fsig->params [0]->type) {
6075 ins->dreg = mono_alloc_ireg (cfg);
6076 ins->type = STACK_I4;
6080 ins->dreg = mono_alloc_lreg (cfg);
6081 ins->type = STACK_I8;
6085 ins->dreg = mono_alloc_ireg (cfg);
6086 #if SIZEOF_REGISTER == 8
6087 ins->type = STACK_I8;
6089 ins->type = STACK_I4;
6094 ins->dreg = mono_alloc_freg (cfg);
6095 ins->type = STACK_R8;
6098 g_assert (mini_type_is_reference (fsig->params [0]));
6099 ins->dreg = mono_alloc_ireg_ref (cfg);
6100 ins->type = STACK_OBJ;
6104 if (opcode == OP_LOADI8_MEMBASE)
6105 ins = mono_decompose_opcode (cfg, ins);
6107 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6111 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6113 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6115 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6116 opcode = OP_STOREI1_MEMBASE_REG;
6117 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6118 opcode = OP_STOREI2_MEMBASE_REG;
6119 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6120 opcode = OP_STOREI4_MEMBASE_REG;
6121 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6122 opcode = OP_STOREI8_MEMBASE_REG;
6123 else if (fsig->params [0]->type == MONO_TYPE_R4)
6124 opcode = OP_STORER4_MEMBASE_REG;
6125 else if (fsig->params [0]->type == MONO_TYPE_R8)
6126 opcode = OP_STORER8_MEMBASE_REG;
6127 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6128 opcode = OP_STORE_MEMBASE_REG;
6131 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6133 MONO_INST_NEW (cfg, ins, opcode);
6134 ins->sreg1 = args [1]->dreg;
6135 ins->inst_destbasereg = args [0]->dreg;
6136 ins->inst_offset = 0;
6137 MONO_ADD_INS (cfg->cbb, ins);
6139 if (opcode == OP_STOREI8_MEMBASE_REG)
6140 ins = mono_decompose_opcode (cfg, ins);
6145 } else if (cmethod->klass->image == mono_defaults.corlib &&
6146 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6147 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6150 #if SIZEOF_REGISTER == 8
6151 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6152 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6153 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6154 ins->dreg = mono_alloc_preg (cfg);
6155 ins->sreg1 = args [0]->dreg;
6156 ins->type = STACK_I8;
6157 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6158 MONO_ADD_INS (cfg->cbb, ins);
6162 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6164 /* 64 bit reads are already atomic */
6165 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6166 load_ins->dreg = mono_alloc_preg (cfg);
6167 load_ins->inst_basereg = args [0]->dreg;
6168 load_ins->inst_offset = 0;
6169 load_ins->type = STACK_I8;
6170 MONO_ADD_INS (cfg->cbb, load_ins);
6172 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6179 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6180 MonoInst *ins_iconst;
6183 if (fsig->params [0]->type == MONO_TYPE_I4) {
6184 opcode = OP_ATOMIC_ADD_I4;
6185 cfg->has_atomic_add_i4 = TRUE;
6187 #if SIZEOF_REGISTER == 8
6188 else if (fsig->params [0]->type == MONO_TYPE_I8)
6189 opcode = OP_ATOMIC_ADD_I8;
6192 if (!mono_arch_opcode_supported (opcode))
6194 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6195 ins_iconst->inst_c0 = 1;
6196 ins_iconst->dreg = mono_alloc_ireg (cfg);
6197 MONO_ADD_INS (cfg->cbb, ins_iconst);
6199 MONO_INST_NEW (cfg, ins, opcode);
6200 ins->dreg = mono_alloc_ireg (cfg);
6201 ins->inst_basereg = args [0]->dreg;
6202 ins->inst_offset = 0;
6203 ins->sreg2 = ins_iconst->dreg;
6204 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6205 MONO_ADD_INS (cfg->cbb, ins);
6207 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6208 MonoInst *ins_iconst;
6211 if (fsig->params [0]->type == MONO_TYPE_I4) {
6212 opcode = OP_ATOMIC_ADD_I4;
6213 cfg->has_atomic_add_i4 = TRUE;
6215 #if SIZEOF_REGISTER == 8
6216 else if (fsig->params [0]->type == MONO_TYPE_I8)
6217 opcode = OP_ATOMIC_ADD_I8;
6220 if (!mono_arch_opcode_supported (opcode))
6222 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6223 ins_iconst->inst_c0 = -1;
6224 ins_iconst->dreg = mono_alloc_ireg (cfg);
6225 MONO_ADD_INS (cfg->cbb, ins_iconst);
6227 MONO_INST_NEW (cfg, ins, opcode);
6228 ins->dreg = mono_alloc_ireg (cfg);
6229 ins->inst_basereg = args [0]->dreg;
6230 ins->inst_offset = 0;
6231 ins->sreg2 = ins_iconst->dreg;
6232 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6233 MONO_ADD_INS (cfg->cbb, ins);
6235 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6238 if (fsig->params [0]->type == MONO_TYPE_I4) {
6239 opcode = OP_ATOMIC_ADD_I4;
6240 cfg->has_atomic_add_i4 = TRUE;
6242 #if SIZEOF_REGISTER == 8
6243 else if (fsig->params [0]->type == MONO_TYPE_I8)
6244 opcode = OP_ATOMIC_ADD_I8;
6247 if (!mono_arch_opcode_supported (opcode))
6249 MONO_INST_NEW (cfg, ins, opcode);
6250 ins->dreg = mono_alloc_ireg (cfg);
6251 ins->inst_basereg = args [0]->dreg;
6252 ins->inst_offset = 0;
6253 ins->sreg2 = args [1]->dreg;
6254 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6255 MONO_ADD_INS (cfg->cbb, ins);
6258 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6259 MonoInst *f2i = NULL, *i2f;
6260 guint32 opcode, f2i_opcode, i2f_opcode;
6261 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6262 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6264 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6265 fsig->params [0]->type == MONO_TYPE_R4) {
6266 opcode = OP_ATOMIC_EXCHANGE_I4;
6267 f2i_opcode = OP_MOVE_F_TO_I4;
6268 i2f_opcode = OP_MOVE_I4_TO_F;
6269 cfg->has_atomic_exchange_i4 = TRUE;
6271 #if SIZEOF_REGISTER == 8
6273 fsig->params [0]->type == MONO_TYPE_I8 ||
6274 fsig->params [0]->type == MONO_TYPE_R8 ||
6275 fsig->params [0]->type == MONO_TYPE_I) {
6276 opcode = OP_ATOMIC_EXCHANGE_I8;
6277 f2i_opcode = OP_MOVE_F_TO_I8;
6278 i2f_opcode = OP_MOVE_I8_TO_F;
6281 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6282 opcode = OP_ATOMIC_EXCHANGE_I4;
6283 cfg->has_atomic_exchange_i4 = TRUE;
6289 if (!mono_arch_opcode_supported (opcode))
6293 /* TODO: Decompose these opcodes instead of bailing here. */
6294 if (COMPILE_SOFT_FLOAT (cfg))
6297 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6298 f2i->dreg = mono_alloc_ireg (cfg);
6299 f2i->sreg1 = args [1]->dreg;
6300 if (f2i_opcode == OP_MOVE_F_TO_I4)
6301 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6302 MONO_ADD_INS (cfg->cbb, f2i);
6305 MONO_INST_NEW (cfg, ins, opcode);
6306 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6307 ins->inst_basereg = args [0]->dreg;
6308 ins->inst_offset = 0;
6309 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6310 MONO_ADD_INS (cfg->cbb, ins);
6312 switch (fsig->params [0]->type) {
6314 ins->type = STACK_I4;
6317 ins->type = STACK_I8;
6320 #if SIZEOF_REGISTER == 8
6321 ins->type = STACK_I8;
6323 ins->type = STACK_I4;
6328 ins->type = STACK_R8;
6331 g_assert (mini_type_is_reference (fsig->params [0]));
6332 ins->type = STACK_OBJ;
6337 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6338 i2f->dreg = mono_alloc_freg (cfg);
6339 i2f->sreg1 = ins->dreg;
6340 i2f->type = STACK_R8;
6341 if (i2f_opcode == OP_MOVE_I4_TO_F)
6342 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6343 MONO_ADD_INS (cfg->cbb, i2f);
6348 if (cfg->gen_write_barriers && is_ref)
6349 emit_write_barrier (cfg, args [0], args [1]);
6351 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6352 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6353 guint32 opcode, f2i_opcode, i2f_opcode;
6354 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6355 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6357 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6358 fsig->params [1]->type == MONO_TYPE_R4) {
6359 opcode = OP_ATOMIC_CAS_I4;
6360 f2i_opcode = OP_MOVE_F_TO_I4;
6361 i2f_opcode = OP_MOVE_I4_TO_F;
6362 cfg->has_atomic_cas_i4 = TRUE;
6364 #if SIZEOF_REGISTER == 8
6366 fsig->params [1]->type == MONO_TYPE_I8 ||
6367 fsig->params [1]->type == MONO_TYPE_R8 ||
6368 fsig->params [1]->type == MONO_TYPE_I) {
6369 opcode = OP_ATOMIC_CAS_I8;
6370 f2i_opcode = OP_MOVE_F_TO_I8;
6371 i2f_opcode = OP_MOVE_I8_TO_F;
6374 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6375 opcode = OP_ATOMIC_CAS_I4;
6376 cfg->has_atomic_cas_i4 = TRUE;
6382 if (!mono_arch_opcode_supported (opcode))
6386 /* TODO: Decompose these opcodes instead of bailing here. */
6387 if (COMPILE_SOFT_FLOAT (cfg))
6390 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6391 f2i_new->dreg = mono_alloc_ireg (cfg);
6392 f2i_new->sreg1 = args [1]->dreg;
6393 if (f2i_opcode == OP_MOVE_F_TO_I4)
6394 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6395 MONO_ADD_INS (cfg->cbb, f2i_new);
6397 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6398 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6399 f2i_cmp->sreg1 = args [2]->dreg;
6400 if (f2i_opcode == OP_MOVE_F_TO_I4)
6401 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6402 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6405 MONO_INST_NEW (cfg, ins, opcode);
6406 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6407 ins->sreg1 = args [0]->dreg;
6408 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6409 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6410 MONO_ADD_INS (cfg->cbb, ins);
6412 switch (fsig->params [1]->type) {
6414 ins->type = STACK_I4;
6417 ins->type = STACK_I8;
6420 #if SIZEOF_REGISTER == 8
6421 ins->type = STACK_I8;
6423 ins->type = STACK_I4;
6427 ins->type = cfg->r4_stack_type;
6430 ins->type = STACK_R8;
6433 g_assert (mini_type_is_reference (fsig->params [1]));
6434 ins->type = STACK_OBJ;
6439 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6440 i2f->dreg = mono_alloc_freg (cfg);
6441 i2f->sreg1 = ins->dreg;
6442 i2f->type = STACK_R8;
6443 if (i2f_opcode == OP_MOVE_I4_TO_F)
6444 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6445 MONO_ADD_INS (cfg->cbb, i2f);
6450 if (cfg->gen_write_barriers && is_ref)
6451 emit_write_barrier (cfg, args [0], args [1]);
6453 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6454 fsig->params [1]->type == MONO_TYPE_I4) {
6455 MonoInst *cmp, *ceq;
6457 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6460 /* int32 r = CAS (location, value, comparand); */
6461 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6462 ins->dreg = alloc_ireg (cfg);
6463 ins->sreg1 = args [0]->dreg;
6464 ins->sreg2 = args [1]->dreg;
6465 ins->sreg3 = args [2]->dreg;
6466 ins->type = STACK_I4;
6467 MONO_ADD_INS (cfg->cbb, ins);
6469 /* bool result = r == comparand; */
6470 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6471 cmp->sreg1 = ins->dreg;
6472 cmp->sreg2 = args [2]->dreg;
6473 cmp->type = STACK_I4;
6474 MONO_ADD_INS (cfg->cbb, cmp);
6476 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6477 ceq->dreg = alloc_ireg (cfg);
6478 ceq->type = STACK_I4;
6479 MONO_ADD_INS (cfg->cbb, ceq);
6481 /* *success = result; */
6482 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6484 cfg->has_atomic_cas_i4 = TRUE;
6486 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6487 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6491 } else if (cmethod->klass->image == mono_defaults.corlib &&
6492 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6493 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6496 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6498 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6499 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6501 if (fsig->params [0]->type == MONO_TYPE_I1)
6502 opcode = OP_ATOMIC_LOAD_I1;
6503 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6504 opcode = OP_ATOMIC_LOAD_U1;
6505 else if (fsig->params [0]->type == MONO_TYPE_I2)
6506 opcode = OP_ATOMIC_LOAD_I2;
6507 else if (fsig->params [0]->type == MONO_TYPE_U2)
6508 opcode = OP_ATOMIC_LOAD_U2;
6509 else if (fsig->params [0]->type == MONO_TYPE_I4)
6510 opcode = OP_ATOMIC_LOAD_I4;
6511 else if (fsig->params [0]->type == MONO_TYPE_U4)
6512 opcode = OP_ATOMIC_LOAD_U4;
6513 else if (fsig->params [0]->type == MONO_TYPE_R4)
6514 opcode = OP_ATOMIC_LOAD_R4;
6515 else if (fsig->params [0]->type == MONO_TYPE_R8)
6516 opcode = OP_ATOMIC_LOAD_R8;
6517 #if SIZEOF_REGISTER == 8
6518 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6519 opcode = OP_ATOMIC_LOAD_I8;
6520 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6521 opcode = OP_ATOMIC_LOAD_U8;
6523 else if (fsig->params [0]->type == MONO_TYPE_I)
6524 opcode = OP_ATOMIC_LOAD_I4;
6525 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6526 opcode = OP_ATOMIC_LOAD_U4;
6530 if (!mono_arch_opcode_supported (opcode))
6533 MONO_INST_NEW (cfg, ins, opcode);
6534 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6535 ins->sreg1 = args [0]->dreg;
6536 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6537 MONO_ADD_INS (cfg->cbb, ins);
6539 switch (fsig->params [0]->type) {
6540 case MONO_TYPE_BOOLEAN:
6547 ins->type = STACK_I4;
6551 ins->type = STACK_I8;
6555 #if SIZEOF_REGISTER == 8
6556 ins->type = STACK_I8;
6558 ins->type = STACK_I4;
6562 ins->type = cfg->r4_stack_type;
6565 ins->type = STACK_R8;
6568 g_assert (mini_type_is_reference (fsig->params [0]));
6569 ins->type = STACK_OBJ;
6575 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6577 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6579 if (fsig->params [0]->type == MONO_TYPE_I1)
6580 opcode = OP_ATOMIC_STORE_I1;
6581 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6582 opcode = OP_ATOMIC_STORE_U1;
6583 else if (fsig->params [0]->type == MONO_TYPE_I2)
6584 opcode = OP_ATOMIC_STORE_I2;
6585 else if (fsig->params [0]->type == MONO_TYPE_U2)
6586 opcode = OP_ATOMIC_STORE_U2;
6587 else if (fsig->params [0]->type == MONO_TYPE_I4)
6588 opcode = OP_ATOMIC_STORE_I4;
6589 else if (fsig->params [0]->type == MONO_TYPE_U4)
6590 opcode = OP_ATOMIC_STORE_U4;
6591 else if (fsig->params [0]->type == MONO_TYPE_R4)
6592 opcode = OP_ATOMIC_STORE_R4;
6593 else if (fsig->params [0]->type == MONO_TYPE_R8)
6594 opcode = OP_ATOMIC_STORE_R8;
6595 #if SIZEOF_REGISTER == 8
6596 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6597 opcode = OP_ATOMIC_STORE_I8;
6598 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6599 opcode = OP_ATOMIC_STORE_U8;
6601 else if (fsig->params [0]->type == MONO_TYPE_I)
6602 opcode = OP_ATOMIC_STORE_I4;
6603 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6604 opcode = OP_ATOMIC_STORE_U4;
6608 if (!mono_arch_opcode_supported (opcode))
6611 MONO_INST_NEW (cfg, ins, opcode);
6612 ins->dreg = args [0]->dreg;
6613 ins->sreg1 = args [1]->dreg;
6614 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6615 MONO_ADD_INS (cfg->cbb, ins);
6617 if (cfg->gen_write_barriers && is_ref)
6618 emit_write_barrier (cfg, args [0], args [1]);
6624 } else if (cmethod->klass->image == mono_defaults.corlib &&
6625 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6626 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6627 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6628 if (should_insert_brekpoint (cfg->method)) {
6629 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6631 MONO_INST_NEW (cfg, ins, OP_NOP);
6632 MONO_ADD_INS (cfg->cbb, ins);
6636 } else if (cmethod->klass->image == mono_defaults.corlib &&
6637 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6638 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6639 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6641 EMIT_NEW_ICONST (cfg, ins, 1);
6643 EMIT_NEW_ICONST (cfg, ins, 0);
6646 } else if (cmethod->klass->image == mono_defaults.corlib &&
6647 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6648 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6649 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6650 /* No stack walks are current available, so implement this as an intrinsic */
6651 MonoInst *assembly_ins;
6653 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6654 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6657 } else if (cmethod->klass == mono_defaults.math_class) {
6659 * There is general branchless code for Min/Max, but it does not work for
6661 * http://everything2.com/?node_id=1051618
6663 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6664 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6665 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6666 !strcmp (cmethod->klass->name, "Selector")) ||
6667 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6668 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6669 !strcmp (cmethod->klass->name, "Selector"))
6671 if (cfg->backend->have_objc_get_selector &&
6672 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6673 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6676 MonoJumpInfoToken *ji;
6679 cfg->disable_llvm = TRUE;
6681 if (args [0]->opcode == OP_GOT_ENTRY) {
6682 pi = args [0]->inst_p1;
6683 g_assert (pi->opcode == OP_PATCH_INFO);
6684 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6687 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6688 ji = args [0]->inst_p0;
6691 NULLIFY_INS (args [0]);
6694 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6695 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6696 ins->dreg = mono_alloc_ireg (cfg);
6698 ins->inst_p0 = mono_string_to_utf8 (s);
6699 MONO_ADD_INS (cfg->cbb, ins);
6704 #ifdef MONO_ARCH_SIMD_INTRINSICS
6705 if (cfg->opt & MONO_OPT_SIMD) {
6706 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6712 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6716 if (COMPILE_LLVM (cfg)) {
6717 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6722 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6726 * This entry point could be used later for arbitrary method
6729 inline static MonoInst*
6730 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6731 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6733 if (method->klass == mono_defaults.string_class) {
6734 /* managed string allocation support */
6735 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6736 MonoInst *iargs [2];
6737 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6738 MonoMethod *managed_alloc = NULL;
6740 g_assert (vtable); /*Should not fail since it System.String*/
6741 #ifndef MONO_CROSS_COMPILE
6742 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6746 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6747 iargs [1] = args [0];
6748 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6755 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6757 MonoInst *store, *temp;
6760 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6761 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6764 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6765 * would be different than the MonoInst's used to represent arguments, and
6766 * the ldelema implementation can't deal with that.
6767 * Solution: When ldelema is used on an inline argument, create a var for
6768 * it, emit ldelema on that var, and emit the saving code below in
6769 * inline_method () if needed.
6771 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6772 cfg->args [i] = temp;
6773 /* This uses cfg->args [i] which is set by the preceeding line */
6774 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6775 store->cil_code = sp [0]->cil_code;
6780 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6781 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6783 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6785 check_inline_called_method_name_limit (MonoMethod *called_method)
6788 static const char *limit = NULL;
6790 if (limit == NULL) {
6791 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6793 if (limit_string != NULL)
6794 limit = limit_string;
6799 if (limit [0] != '\0') {
6800 char *called_method_name = mono_method_full_name (called_method, TRUE);
6802 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6803 g_free (called_method_name);
6805 //return (strncmp_result <= 0);
6806 return (strncmp_result == 0);
6813 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6815 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6818 static const char *limit = NULL;
6820 if (limit == NULL) {
6821 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6822 if (limit_string != NULL) {
6823 limit = limit_string;
6829 if (limit [0] != '\0') {
6830 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6832 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6833 g_free (caller_method_name);
6835 //return (strncmp_result <= 0);
6836 return (strncmp_result == 0);
6844 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6846 static double r8_0 = 0.0;
6847 static float r4_0 = 0.0;
6851 rtype = mini_get_underlying_type (rtype);
6855 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6856 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6857 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6858 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6859 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6860 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6861 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6862 ins->type = STACK_R4;
6863 ins->inst_p0 = (void*)&r4_0;
6865 MONO_ADD_INS (cfg->cbb, ins);
6866 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6867 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6868 ins->type = STACK_R8;
6869 ins->inst_p0 = (void*)&r8_0;
6871 MONO_ADD_INS (cfg->cbb, ins);
6872 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6873 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6874 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6875 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6876 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6878 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6883 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6887 rtype = mini_get_underlying_type (rtype);
6891 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6892 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6893 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6894 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6895 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6896 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6897 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6898 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6899 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6900 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6901 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6902 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6903 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6904 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6906 emit_init_rvar (cfg, dreg, rtype);
6910 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6912 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6914 MonoInst *var = cfg->locals [local];
6915 if (COMPILE_SOFT_FLOAT (cfg)) {
6917 int reg = alloc_dreg (cfg, var->type);
6918 emit_init_rvar (cfg, reg, type);
6919 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6922 emit_init_rvar (cfg, var->dreg, type);
6924 emit_dummy_init_rvar (cfg, var->dreg, type);
6931 * Return the cost of inlining CMETHOD.
6934 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6935 guchar *ip, guint real_offset, gboolean inline_always)
6937 MonoInst *ins, *rvar = NULL;
6938 MonoMethodHeader *cheader;
6939 MonoBasicBlock *ebblock, *sbblock;
6941 MonoMethod *prev_inlined_method;
6942 MonoInst **prev_locals, **prev_args;
6943 MonoType **prev_arg_types;
6944 guint prev_real_offset;
6945 GHashTable *prev_cbb_hash;
6946 MonoBasicBlock **prev_cil_offset_to_bb;
6947 MonoBasicBlock *prev_cbb;
6948 unsigned char* prev_cil_start;
6949 guint32 prev_cil_offset_to_bb_len;
6950 MonoMethod *prev_current_method;
6951 MonoGenericContext *prev_generic_context;
6952 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6954 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6956 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6957 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6960 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6961 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6966 fsig = mono_method_signature (cmethod);
6968 if (cfg->verbose_level > 2)
6969 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6971 if (!cmethod->inline_info) {
6972 cfg->stat_inlineable_methods++;
6973 cmethod->inline_info = 1;
6976 /* allocate local variables */
6977 cheader = mono_method_get_header (cmethod);
6979 if (cheader == NULL || mono_loader_get_last_error ()) {
6980 MonoLoaderError *error = mono_loader_get_last_error ();
6983 mono_metadata_free_mh (cheader);
6984 if (inline_always && error)
6985 mono_cfg_set_exception (cfg, error->exception_type);
6987 mono_loader_clear_error ();
6991 /*Must verify before creating locals as it can cause the JIT to assert.*/
6992 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6993 mono_metadata_free_mh (cheader);
6997 /* allocate space to store the return value */
6998 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6999 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7002 prev_locals = cfg->locals;
7003 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7004 for (i = 0; i < cheader->num_locals; ++i)
7005 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7007 /* allocate start and end blocks */
7008 /* This is needed so if the inline is aborted, we can clean up */
7009 NEW_BBLOCK (cfg, sbblock);
7010 sbblock->real_offset = real_offset;
7012 NEW_BBLOCK (cfg, ebblock);
7013 ebblock->block_num = cfg->num_bblocks++;
7014 ebblock->real_offset = real_offset;
7016 prev_args = cfg->args;
7017 prev_arg_types = cfg->arg_types;
7018 prev_inlined_method = cfg->inlined_method;
7019 cfg->inlined_method = cmethod;
7020 cfg->ret_var_set = FALSE;
7021 cfg->inline_depth ++;
7022 prev_real_offset = cfg->real_offset;
7023 prev_cbb_hash = cfg->cbb_hash;
7024 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7025 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7026 prev_cil_start = cfg->cil_start;
7027 prev_cbb = cfg->cbb;
7028 prev_current_method = cfg->current_method;
7029 prev_generic_context = cfg->generic_context;
7030 prev_ret_var_set = cfg->ret_var_set;
7031 prev_disable_inline = cfg->disable_inline;
7033 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7036 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
7038 ret_var_set = cfg->ret_var_set;
7040 cfg->inlined_method = prev_inlined_method;
7041 cfg->real_offset = prev_real_offset;
7042 cfg->cbb_hash = prev_cbb_hash;
7043 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7044 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7045 cfg->cil_start = prev_cil_start;
7046 cfg->locals = prev_locals;
7047 cfg->args = prev_args;
7048 cfg->arg_types = prev_arg_types;
7049 cfg->current_method = prev_current_method;
7050 cfg->generic_context = prev_generic_context;
7051 cfg->ret_var_set = prev_ret_var_set;
7052 cfg->disable_inline = prev_disable_inline;
7053 cfg->inline_depth --;
7055 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7056 if (cfg->verbose_level > 2)
7057 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7059 cfg->stat_inlined_methods++;
7061 /* always add some code to avoid block split failures */
7062 MONO_INST_NEW (cfg, ins, OP_NOP);
7063 MONO_ADD_INS (prev_cbb, ins);
7065 prev_cbb->next_bb = sbblock;
7066 link_bblock (cfg, prev_cbb, sbblock);
7069 * Get rid of the begin and end bblocks if possible to aid local
7072 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7074 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7075 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7077 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7078 MonoBasicBlock *prev = ebblock->in_bb [0];
7079 mono_merge_basic_blocks (cfg, prev, ebblock);
7081 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7082 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7083 cfg->cbb = prev_cbb;
7087 * Its possible that the rvar is set in some prev bblock, but not in others.
7093 for (i = 0; i < ebblock->in_count; ++i) {
7094 bb = ebblock->in_bb [i];
7096 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7099 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7109 * If the inlined method contains only a throw, then the ret var is not
7110 * set, so set it to a dummy value.
7113 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7115 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7118 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7121 if (cfg->verbose_level > 2)
7122 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7123 cfg->exception_type = MONO_EXCEPTION_NONE;
7124 mono_loader_clear_error ();
7126 /* This gets rid of the newly added bblocks */
7127 cfg->cbb = prev_cbb;
7129 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7134 * Some of these comments may well be out-of-date.
7135 * Design decisions: we do a single pass over the IL code (and we do bblock
7136 * splitting/merging in the few cases when it's required: a back jump to an IL
7137 * address that was not already seen as bblock starting point).
7138 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7139 * Complex operations are decomposed in simpler ones right away. We need to let the
7140 * arch-specific code peek and poke inside this process somehow (except when the
7141 * optimizations can take advantage of the full semantic info of coarse opcodes).
7142 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7143 * MonoInst->opcode initially is the IL opcode or some simplification of that
7144 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7145 * opcode with value bigger than OP_LAST.
7146 * At this point the IR can be handed over to an interpreter, a dumb code generator
7147 * or to the optimizing code generator that will translate it to SSA form.
7149 * Profiling directed optimizations.
7150 * We may compile by default with few or no optimizations and instrument the code
7151 * or the user may indicate what methods to optimize the most either in a config file
7152 * or through repeated runs where the compiler applies offline the optimizations to
7153 * each method and then decides if it was worth it.
7156 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7157 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7158 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7159 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7160 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7161 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7162 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7163 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7165 /* offset from br.s -> br like opcodes */
7166 #define BIG_BRANCH_OFFSET 13
7169 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7171 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7173 return b == NULL || b == bb;
7177 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7179 unsigned char *ip = start;
7180 unsigned char *target;
7183 MonoBasicBlock *bblock;
7184 const MonoOpcode *opcode;
7187 cli_addr = ip - start;
7188 i = mono_opcode_value ((const guint8 **)&ip, end);
7191 opcode = &mono_opcodes [i];
7192 switch (opcode->argument) {
7193 case MonoInlineNone:
7196 case MonoInlineString:
7197 case MonoInlineType:
7198 case MonoInlineField:
7199 case MonoInlineMethod:
7202 case MonoShortInlineR:
7209 case MonoShortInlineVar:
7210 case MonoShortInlineI:
7213 case MonoShortInlineBrTarget:
7214 target = start + cli_addr + 2 + (signed char)ip [1];
7215 GET_BBLOCK (cfg, bblock, target);
7218 GET_BBLOCK (cfg, bblock, ip);
7220 case MonoInlineBrTarget:
7221 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7222 GET_BBLOCK (cfg, bblock, target);
7225 GET_BBLOCK (cfg, bblock, ip);
7227 case MonoInlineSwitch: {
7228 guint32 n = read32 (ip + 1);
7231 cli_addr += 5 + 4 * n;
7232 target = start + cli_addr;
7233 GET_BBLOCK (cfg, bblock, target);
7235 for (j = 0; j < n; ++j) {
7236 target = start + cli_addr + (gint32)read32 (ip);
7237 GET_BBLOCK (cfg, bblock, target);
7247 g_assert_not_reached ();
7250 if (i == CEE_THROW) {
7251 unsigned char *bb_start = ip - 1;
7253 /* Find the start of the bblock containing the throw */
7255 while ((bb_start >= start) && !bblock) {
7256 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7260 bblock->out_of_line = 1;
7270 static inline MonoMethod *
7271 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7275 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7276 method = mono_method_get_wrapper_data (m, token);
7279 method = mono_class_inflate_generic_method_checked (method, context, &error);
7280 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7283 method = mono_get_method_full (m->klass->image, token, klass, context);
7289 static inline MonoMethod *
7290 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7292 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7294 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7300 static inline MonoClass*
7301 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7306 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7307 klass = mono_method_get_wrapper_data (method, token);
7309 klass = mono_class_inflate_generic_class (klass, context);
7311 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7312 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7315 mono_class_init (klass);
7319 static inline MonoMethodSignature*
7320 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7322 MonoMethodSignature *fsig;
7324 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7325 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7327 fsig = mono_metadata_parse_signature (method->klass->image, token);
7331 fsig = mono_inflate_generic_signature(fsig, context, &error);
7333 g_assert(mono_error_ok(&error));
7339 throw_exception (void)
7341 static MonoMethod *method = NULL;
7344 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7345 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7352 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7354 MonoMethod *thrower = throw_exception ();
7357 EMIT_NEW_PCONST (cfg, args [0], ex);
7358 mono_emit_method_call (cfg, thrower, args, NULL);
7362 * Return the original method is a wrapper is specified. We can only access
7363 * the custom attributes from the original method.
7366 get_original_method (MonoMethod *method)
7368 if (method->wrapper_type == MONO_WRAPPER_NONE)
7371 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7372 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7375 /* in other cases we need to find the original method */
7376 return mono_marshal_method_from_wrapper (method);
7380 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7382 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7383 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7385 emit_throw_exception (cfg, ex);
7389 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7391 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7392 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7394 emit_throw_exception (cfg, ex);
7398 * Check that the IL instructions at ip are the array initialization
7399 * sequence and return the pointer to the data and the size.
7402 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7405 * newarr[System.Int32]
7407 * ldtoken field valuetype ...
7408 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7410 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7412 guint32 token = read32 (ip + 7);
7413 guint32 field_token = read32 (ip + 2);
7414 guint32 field_index = field_token & 0xffffff;
7416 const char *data_ptr;
7418 MonoMethod *cmethod;
7419 MonoClass *dummy_class;
7420 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7424 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7428 *out_field_token = field_token;
7430 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7433 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7435 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7436 case MONO_TYPE_BOOLEAN:
7440 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7441 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7442 case MONO_TYPE_CHAR:
7459 if (size > mono_type_size (field->type, &dummy_align))
7462 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7463 if (!image_is_dynamic (method->klass->image)) {
7464 field_index = read32 (ip + 2) & 0xffffff;
7465 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7466 data_ptr = mono_image_rva_map (method->klass->image, rva);
7467 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7468 /* for aot code we do the lookup on load */
7469 if (aot && data_ptr)
7470 return GUINT_TO_POINTER (rva);
7472 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7474 data_ptr = mono_field_get_data (field);
7482 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7484 char *method_fname = mono_method_full_name (method, TRUE);
7486 MonoMethodHeader *header = mono_method_get_header (method);
7488 if (header->code_size == 0)
7489 method_code = g_strdup ("method body is empty.");
7491 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7492 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7493 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7494 g_free (method_fname);
7495 g_free (method_code);
7496 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7500 set_exception_object (MonoCompile *cfg, MonoException *exception)
7502 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7503 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7504 cfg->exception_ptr = exception;
7508 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7511 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7512 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7513 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7514 /* Optimize reg-reg moves away */
7516 * Can't optimize other opcodes, since sp[0] might point to
7517 * the last ins of a decomposed opcode.
7519 sp [0]->dreg = (cfg)->locals [n]->dreg;
7521 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7526 * ldloca inhibits many optimizations so try to get rid of it in common
7529 static inline unsigned char *
7530 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7540 local = read16 (ip + 2);
7544 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7545 /* From the INITOBJ case */
7546 token = read32 (ip + 2);
7547 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7548 CHECK_TYPELOAD (klass);
7549 type = mini_get_underlying_type (&klass->byval_arg);
7550 emit_init_local (cfg, local, type, TRUE);
7558 is_exception_class (MonoClass *klass)
7561 if (klass == mono_defaults.exception_class)
7563 klass = klass->parent;
7569 * is_jit_optimizer_disabled:
7571 * Determine whenever M's assembly has a DebuggableAttribute with the
7572 * IsJITOptimizerDisabled flag set.
7575 is_jit_optimizer_disabled (MonoMethod *m)
7577 MonoAssembly *ass = m->klass->image->assembly;
7578 MonoCustomAttrInfo* attrs;
7579 static MonoClass *klass;
7581 gboolean val = FALSE;
7584 if (ass->jit_optimizer_disabled_inited)
7585 return ass->jit_optimizer_disabled;
7588 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7591 ass->jit_optimizer_disabled = FALSE;
7592 mono_memory_barrier ();
7593 ass->jit_optimizer_disabled_inited = TRUE;
7597 attrs = mono_custom_attrs_from_assembly (ass);
7599 for (i = 0; i < attrs->num_attrs; ++i) {
7600 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7602 MonoMethodSignature *sig;
7604 if (!attr->ctor || attr->ctor->klass != klass)
7606 /* Decode the attribute. See reflection.c */
7607 p = (const char*)attr->data;
7608 g_assert (read16 (p) == 0x0001);
7611 // FIXME: Support named parameters
7612 sig = mono_method_signature (attr->ctor);
7613 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7615 /* Two boolean arguments */
7619 mono_custom_attrs_free (attrs);
7622 ass->jit_optimizer_disabled = val;
7623 mono_memory_barrier ();
7624 ass->jit_optimizer_disabled_inited = TRUE;
7630 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7632 gboolean supported_tail_call;
7635 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7637 for (i = 0; i < fsig->param_count; ++i) {
7638 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7639 /* These can point to the current method's stack */
7640 supported_tail_call = FALSE;
7642 if (fsig->hasthis && cmethod->klass->valuetype)
7643 /* this might point to the current method's stack */
7644 supported_tail_call = FALSE;
7645 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7646 supported_tail_call = FALSE;
7647 if (cfg->method->save_lmf)
7648 supported_tail_call = FALSE;
7649 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7650 supported_tail_call = FALSE;
7651 if (call_opcode != CEE_CALL)
7652 supported_tail_call = FALSE;
7654 /* Debugging support */
7656 if (supported_tail_call) {
7657 if (!mono_debug_count ())
7658 supported_tail_call = FALSE;
7662 return supported_tail_call;
7668 * Handle calls made to ctors from NEWOBJ opcodes.
7671 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7672 MonoInst **sp, guint8 *ip, int *inline_costs)
7674 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7676 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7677 mono_method_is_generic_sharable (cmethod, TRUE)) {
7678 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7679 mono_class_vtable (cfg->domain, cmethod->klass);
7680 CHECK_TYPELOAD (cmethod->klass);
7682 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7683 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7686 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7687 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7689 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7691 CHECK_TYPELOAD (cmethod->klass);
7692 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7697 /* Avoid virtual calls to ctors if possible */
7698 if (mono_class_is_marshalbyref (cmethod->klass))
7699 callvirt_this_arg = sp [0];
7701 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7702 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7703 CHECK_CFG_EXCEPTION;
7704 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7705 mono_method_check_inlining (cfg, cmethod) &&
7706 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7709 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7710 cfg->real_offset += 5;
7712 *inline_costs += costs - 5;
7714 INLINE_FAILURE ("inline failure");
7715 // FIXME-VT: Clean this up
7716 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7717 GSHAREDVT_FAILURE(*ip);
7718 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7720 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7723 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7724 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7725 } else if (context_used &&
7726 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7727 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7728 MonoInst *cmethod_addr;
7730 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7732 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7733 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7735 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7737 INLINE_FAILURE ("ctor call");
7738 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7739 callvirt_this_arg, NULL, vtable_arg);
7746 emit_setret (MonoCompile *cfg, MonoInst *val)
7748 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7751 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7754 if (!cfg->vret_addr) {
7755 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7757 EMIT_NEW_RETLOADA (cfg, ret_addr);
7759 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7760 ins->klass = mono_class_from_mono_type (ret_type);
7763 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7764 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7765 MonoInst *iargs [1];
7769 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7770 mono_arch_emit_setret (cfg, cfg->method, conv);
7772 mono_arch_emit_setret (cfg, cfg->method, val);
7775 mono_arch_emit_setret (cfg, cfg->method, val);
7780 static MonoMethodSignature*
7781 sig_to_rgctx_sig (MonoMethodSignature *sig)
7783 // FIXME: memory allocation
7784 MonoMethodSignature *res;
7787 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7788 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7789 res->param_count = sig->param_count + 1;
7790 for (i = 0; i < sig->param_count; ++i)
7791 res->params [i] = sig->params [i];
7792 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7797 * mono_method_to_ir:
7799 * Translate the .net IL into linear IR.
7802 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7803 MonoInst *return_var, MonoInst **inline_args,
7804 guint inline_offset, gboolean is_virtual_call)
7807 MonoInst *ins, **sp, **stack_start;
7808 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7809 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7810 MonoMethod *cmethod, *method_definition;
7811 MonoInst **arg_array;
7812 MonoMethodHeader *header;
7814 guint32 token, ins_flag;
7816 MonoClass *constrained_class = NULL;
7817 unsigned char *ip, *end, *target, *err_pos;
7818 MonoMethodSignature *sig;
7819 MonoGenericContext *generic_context = NULL;
7820 MonoGenericContainer *generic_container = NULL;
7821 MonoType **param_types;
7822 int i, n, start_new_bblock, dreg;
7823 int num_calls = 0, inline_costs = 0;
7824 int breakpoint_id = 0;
7826 GSList *class_inits = NULL;
7827 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7829 gboolean init_locals, seq_points, skip_dead_blocks;
7830 gboolean sym_seq_points = FALSE;
7831 MonoDebugMethodInfo *minfo;
7832 MonoBitSet *seq_point_locs = NULL;
7833 MonoBitSet *seq_point_set_locs = NULL;
7835 cfg->disable_inline = is_jit_optimizer_disabled (method);
7837 /* serialization and xdomain stuff may need access to private fields and methods */
7838 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7839 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7840 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7841 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7842 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7843 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7845 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7846 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7847 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7848 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7849 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7851 image = method->klass->image;
7852 header = mono_method_get_header (method);
7854 MonoLoaderError *error;
7856 if ((error = mono_loader_get_last_error ())) {
7857 mono_cfg_set_exception (cfg, error->exception_type);
7859 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7860 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7862 goto exception_exit;
7864 generic_container = mono_method_get_generic_container (method);
7865 sig = mono_method_signature (method);
7866 num_args = sig->hasthis + sig->param_count;
7867 ip = (unsigned char*)header->code;
7868 cfg->cil_start = ip;
7869 end = ip + header->code_size;
7870 cfg->stat_cil_code_size += header->code_size;
7872 seq_points = cfg->gen_seq_points && cfg->method == method;
7874 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7875 /* We could hit a seq point before attaching to the JIT (#8338) */
7879 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7880 minfo = mono_debug_lookup_method (method);
7882 MonoSymSeqPoint *sps;
7883 int i, n_il_offsets;
7885 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7886 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7887 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7888 sym_seq_points = TRUE;
7889 for (i = 0; i < n_il_offsets; ++i) {
7890 if (sps [i].il_offset < header->code_size)
7891 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7894 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7895 /* Methods without line number info like auto-generated property accessors */
7896 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7897 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7898 sym_seq_points = TRUE;
7903 * Methods without init_locals set could cause asserts in various passes
7904 * (#497220). To work around this, we emit dummy initialization opcodes
7905 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7906 * on some platforms.
7908 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7909 init_locals = header->init_locals;
7913 method_definition = method;
7914 while (method_definition->is_inflated) {
7915 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7916 method_definition = imethod->declaring;
7919 /* SkipVerification is not allowed if core-clr is enabled */
7920 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7922 dont_verify_stloc = TRUE;
7925 if (sig->is_inflated)
7926 generic_context = mono_method_get_context (method);
7927 else if (generic_container)
7928 generic_context = &generic_container->context;
7929 cfg->generic_context = generic_context;
7932 g_assert (!sig->has_type_parameters);
7934 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7935 g_assert (method->is_inflated);
7936 g_assert (mono_method_get_context (method)->method_inst);
7938 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7939 g_assert (sig->generic_param_count);
7941 if (cfg->method == method) {
7942 cfg->real_offset = 0;
7944 cfg->real_offset = inline_offset;
7947 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7948 cfg->cil_offset_to_bb_len = header->code_size;
7950 cfg->current_method = method;
7952 if (cfg->verbose_level > 2)
7953 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7955 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7957 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7958 for (n = 0; n < sig->param_count; ++n)
7959 param_types [n + sig->hasthis] = sig->params [n];
7960 cfg->arg_types = param_types;
7962 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7963 if (cfg->method == method) {
7965 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7966 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7969 NEW_BBLOCK (cfg, start_bblock);
7970 cfg->bb_entry = start_bblock;
7971 start_bblock->cil_code = NULL;
7972 start_bblock->cil_length = 0;
7975 NEW_BBLOCK (cfg, end_bblock);
7976 cfg->bb_exit = end_bblock;
7977 end_bblock->cil_code = NULL;
7978 end_bblock->cil_length = 0;
7979 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7980 g_assert (cfg->num_bblocks == 2);
7982 arg_array = cfg->args;
7984 if (header->num_clauses) {
7985 cfg->spvars = g_hash_table_new (NULL, NULL);
7986 cfg->exvars = g_hash_table_new (NULL, NULL);
7988 /* handle exception clauses */
7989 for (i = 0; i < header->num_clauses; ++i) {
7990 MonoBasicBlock *try_bb;
7991 MonoExceptionClause *clause = &header->clauses [i];
7992 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7994 try_bb->real_offset = clause->try_offset;
7995 try_bb->try_start = TRUE;
7996 try_bb->region = ((i + 1) << 8) | clause->flags;
7997 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7998 tblock->real_offset = clause->handler_offset;
7999 tblock->flags |= BB_EXCEPTION_HANDLER;
8002 * Linking the try block with the EH block hinders inlining as we won't be able to
8003 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8005 if (COMPILE_LLVM (cfg))
8006 link_bblock (cfg, try_bb, tblock);
8008 if (*(ip + clause->handler_offset) == CEE_POP)
8009 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8011 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8012 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8013 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8014 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8015 MONO_ADD_INS (tblock, ins);
8017 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8018 /* finally clauses already have a seq point */
8019 /* seq points for filter clauses are emitted below */
8020 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8021 MONO_ADD_INS (tblock, ins);
8024 /* todo: is a fault block unsafe to optimize? */
8025 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8026 tblock->flags |= BB_EXCEPTION_UNSAFE;
8029 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8031 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8033 /* catch and filter blocks get the exception object on the stack */
8034 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8035 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8037 /* mostly like handle_stack_args (), but just sets the input args */
8038 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8039 tblock->in_scount = 1;
8040 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8041 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8045 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8046 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8047 if (!cfg->compile_llvm) {
8048 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8049 ins->dreg = tblock->in_stack [0]->dreg;
8050 MONO_ADD_INS (tblock, ins);
8053 MonoInst *dummy_use;
8056 * Add a dummy use for the exvar so its liveness info will be
8059 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8062 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8063 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8064 MONO_ADD_INS (tblock, ins);
8067 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8068 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8069 tblock->flags |= BB_EXCEPTION_HANDLER;
8070 tblock->real_offset = clause->data.filter_offset;
8071 tblock->in_scount = 1;
8072 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8073 /* The filter block shares the exvar with the handler block */
8074 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8075 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8076 MONO_ADD_INS (tblock, ins);
8080 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8081 clause->data.catch_class &&
8083 mono_class_check_context_used (clause->data.catch_class)) {
8085 * In shared generic code with catch
8086 * clauses containing type variables
8087 * the exception handling code has to
8088 * be able to get to the rgctx.
8089 * Therefore we have to make sure that
8090 * the vtable/mrgctx argument (for
8091 * static or generic methods) or the
8092 * "this" argument (for non-static
8093 * methods) are live.
8095 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8096 mini_method_get_context (method)->method_inst ||
8097 method->klass->valuetype) {
8098 mono_get_vtable_var (cfg);
8100 MonoInst *dummy_use;
8102 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8107 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8108 cfg->cbb = start_bblock;
8109 cfg->args = arg_array;
8110 mono_save_args (cfg, sig, inline_args);
8113 /* FIRST CODE BLOCK */
8114 NEW_BBLOCK (cfg, tblock);
8115 tblock->cil_code = ip;
8119 ADD_BBLOCK (cfg, tblock);
8121 if (cfg->method == method) {
8122 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8123 if (breakpoint_id) {
8124 MONO_INST_NEW (cfg, ins, OP_BREAK);
8125 MONO_ADD_INS (cfg->cbb, ins);
8129 /* we use a separate basic block for the initialization code */
8130 NEW_BBLOCK (cfg, init_localsbb);
8131 cfg->bb_init = init_localsbb;
8132 init_localsbb->real_offset = cfg->real_offset;
8133 start_bblock->next_bb = init_localsbb;
8134 init_localsbb->next_bb = cfg->cbb;
8135 link_bblock (cfg, start_bblock, init_localsbb);
8136 link_bblock (cfg, init_localsbb, cfg->cbb);
8138 cfg->cbb = init_localsbb;
8140 if (cfg->gsharedvt && cfg->method == method) {
8141 MonoGSharedVtMethodInfo *info;
8142 MonoInst *var, *locals_var;
8145 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8146 info->method = cfg->method;
8147 info->count_entries = 16;
8148 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8149 cfg->gsharedvt_info = info;
8151 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8152 /* prevent it from being register allocated */
8153 //var->flags |= MONO_INST_VOLATILE;
8154 cfg->gsharedvt_info_var = var;
8156 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8157 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8159 /* Allocate locals */
8160 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8161 /* prevent it from being register allocated */
8162 //locals_var->flags |= MONO_INST_VOLATILE;
8163 cfg->gsharedvt_locals_var = locals_var;
8165 dreg = alloc_ireg (cfg);
8166 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8168 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8169 ins->dreg = locals_var->dreg;
8171 MONO_ADD_INS (cfg->cbb, ins);
8172 cfg->gsharedvt_locals_var_ins = ins;
8174 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8177 ins->flags |= MONO_INST_INIT;
8181 if (mono_security_core_clr_enabled ()) {
8182 /* check if this is native code, e.g. an icall or a p/invoke */
8183 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8184 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8186 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8187 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8189 /* if this ia a native call then it can only be JITted from platform code */
8190 if ((icall || pinvk) && method->klass && method->klass->image) {
8191 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8192 MonoException *ex = icall ? mono_get_exception_security () :
8193 mono_get_exception_method_access ();
8194 emit_throw_exception (cfg, ex);
8201 CHECK_CFG_EXCEPTION;
8203 if (header->code_size == 0)
8206 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8211 if (cfg->method == method)
8212 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8214 for (n = 0; n < header->num_locals; ++n) {
8215 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8220 /* We force the vtable variable here for all shared methods
8221 for the possibility that they might show up in a stack
8222 trace where their exact instantiation is needed. */
8223 if (cfg->gshared && method == cfg->method) {
8224 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8225 mini_method_get_context (method)->method_inst ||
8226 method->klass->valuetype) {
8227 mono_get_vtable_var (cfg);
8229 /* FIXME: Is there a better way to do this?
8230 We need the variable live for the duration
8231 of the whole method. */
8232 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8236 /* add a check for this != NULL to inlined methods */
8237 if (is_virtual_call) {
8240 NEW_ARGLOAD (cfg, arg_ins, 0);
8241 MONO_ADD_INS (cfg->cbb, arg_ins);
8242 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8245 skip_dead_blocks = !dont_verify;
8246 if (skip_dead_blocks) {
8247 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8252 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8253 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8256 start_new_bblock = 0;
8258 if (cfg->method == method)
8259 cfg->real_offset = ip - header->code;
8261 cfg->real_offset = inline_offset;
8266 if (start_new_bblock) {
8267 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8268 if (start_new_bblock == 2) {
8269 g_assert (ip == tblock->cil_code);
8271 GET_BBLOCK (cfg, tblock, ip);
8273 cfg->cbb->next_bb = tblock;
8275 start_new_bblock = 0;
8276 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8277 if (cfg->verbose_level > 3)
8278 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8279 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8283 g_slist_free (class_inits);
8286 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8287 link_bblock (cfg, cfg->cbb, tblock);
8288 if (sp != stack_start) {
8289 handle_stack_args (cfg, stack_start, sp - stack_start);
8291 CHECK_UNVERIFIABLE (cfg);
8293 cfg->cbb->next_bb = tblock;
8295 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8296 if (cfg->verbose_level > 3)
8297 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8298 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8301 g_slist_free (class_inits);
8306 if (skip_dead_blocks) {
8307 int ip_offset = ip - header->code;
8309 if (ip_offset == bb->end)
8313 int op_size = mono_opcode_size (ip, end);
8314 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8316 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8318 if (ip_offset + op_size == bb->end) {
8319 MONO_INST_NEW (cfg, ins, OP_NOP);
8320 MONO_ADD_INS (cfg->cbb, ins);
8321 start_new_bblock = 1;
8329 * Sequence points are points where the debugger can place a breakpoint.
8330 * Currently, we generate these automatically at points where the IL
8333 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8335 * Make methods interruptable at the beginning, and at the targets of
8336 * backward branches.
8337 * Also, do this at the start of every bblock in methods with clauses too,
8338 * to be able to handle instructions with inprecise control flow like
8340 * Backward branches are handled at the end of method-to-ir ().
8342 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8343 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8345 /* Avoid sequence points on empty IL like .volatile */
8346 // FIXME: Enable this
8347 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8348 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8349 if ((sp != stack_start) && !sym_seq_point)
8350 ins->flags |= MONO_INST_NONEMPTY_STACK;
8351 MONO_ADD_INS (cfg->cbb, ins);
8354 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8357 cfg->cbb->real_offset = cfg->real_offset;
8359 if ((cfg->method == method) && cfg->coverage_info) {
8360 guint32 cil_offset = ip - header->code;
8361 cfg->coverage_info->data [cil_offset].cil_code = ip;
8363 /* TODO: Use an increment here */
8364 #if defined(TARGET_X86)
8365 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8366 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8368 MONO_ADD_INS (cfg->cbb, ins);
8370 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8371 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8375 if (cfg->verbose_level > 3)
8376 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8380 if (seq_points && !sym_seq_points && sp != stack_start) {
8382 * The C# compiler uses these nops to notify the JIT that it should
8383 * insert seq points.
8385 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8386 MONO_ADD_INS (cfg->cbb, ins);
8388 if (cfg->keep_cil_nops)
8389 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8391 MONO_INST_NEW (cfg, ins, OP_NOP);
8393 MONO_ADD_INS (cfg->cbb, ins);
8396 if (should_insert_brekpoint (cfg->method)) {
8397 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8399 MONO_INST_NEW (cfg, ins, OP_NOP);
8402 MONO_ADD_INS (cfg->cbb, ins);
8408 CHECK_STACK_OVF (1);
8409 n = (*ip)-CEE_LDARG_0;
8411 EMIT_NEW_ARGLOAD (cfg, ins, n);
8419 CHECK_STACK_OVF (1);
8420 n = (*ip)-CEE_LDLOC_0;
8422 EMIT_NEW_LOCLOAD (cfg, ins, n);
8431 n = (*ip)-CEE_STLOC_0;
8434 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8436 emit_stloc_ir (cfg, sp, header, n);
8443 CHECK_STACK_OVF (1);
8446 EMIT_NEW_ARGLOAD (cfg, ins, n);
8452 CHECK_STACK_OVF (1);
8455 NEW_ARGLOADA (cfg, ins, n);
8456 MONO_ADD_INS (cfg->cbb, ins);
8466 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8468 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8473 CHECK_STACK_OVF (1);
8476 EMIT_NEW_LOCLOAD (cfg, ins, n);
8480 case CEE_LDLOCA_S: {
8481 unsigned char *tmp_ip;
8483 CHECK_STACK_OVF (1);
8484 CHECK_LOCAL (ip [1]);
8486 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8492 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8501 CHECK_LOCAL (ip [1]);
8502 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8504 emit_stloc_ir (cfg, sp, header, ip [1]);
8509 CHECK_STACK_OVF (1);
8510 EMIT_NEW_PCONST (cfg, ins, NULL);
8511 ins->type = STACK_OBJ;
8516 CHECK_STACK_OVF (1);
8517 EMIT_NEW_ICONST (cfg, ins, -1);
8530 CHECK_STACK_OVF (1);
8531 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8537 CHECK_STACK_OVF (1);
8539 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8545 CHECK_STACK_OVF (1);
8546 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8552 CHECK_STACK_OVF (1);
8553 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8554 ins->type = STACK_I8;
8555 ins->dreg = alloc_dreg (cfg, STACK_I8);
8557 ins->inst_l = (gint64)read64 (ip);
8558 MONO_ADD_INS (cfg->cbb, ins);
8564 gboolean use_aotconst = FALSE;
8566 #ifdef TARGET_POWERPC
8567 /* FIXME: Clean this up */
8568 if (cfg->compile_aot)
8569 use_aotconst = TRUE;
8572 /* FIXME: we should really allocate this only late in the compilation process */
8573 f = mono_domain_alloc (cfg->domain, sizeof (float));
8575 CHECK_STACK_OVF (1);
8581 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8583 dreg = alloc_freg (cfg);
8584 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8585 ins->type = cfg->r4_stack_type;
8587 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8588 ins->type = cfg->r4_stack_type;
8589 ins->dreg = alloc_dreg (cfg, STACK_R8);
8591 MONO_ADD_INS (cfg->cbb, ins);
8601 gboolean use_aotconst = FALSE;
8603 #ifdef TARGET_POWERPC
8604 /* FIXME: Clean this up */
8605 if (cfg->compile_aot)
8606 use_aotconst = TRUE;
8609 /* FIXME: we should really allocate this only late in the compilation process */
8610 d = mono_domain_alloc (cfg->domain, sizeof (double));
8612 CHECK_STACK_OVF (1);
8618 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8620 dreg = alloc_freg (cfg);
8621 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8622 ins->type = STACK_R8;
8624 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8625 ins->type = STACK_R8;
8626 ins->dreg = alloc_dreg (cfg, STACK_R8);
8628 MONO_ADD_INS (cfg->cbb, ins);
8637 MonoInst *temp, *store;
8639 CHECK_STACK_OVF (1);
8643 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8644 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8646 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8649 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8662 if (sp [0]->type == STACK_R8)
8663 /* we need to pop the value from the x86 FP stack */
8664 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8669 MonoMethodSignature *fsig;
8672 INLINE_FAILURE ("jmp");
8673 GSHAREDVT_FAILURE (*ip);
8676 if (stack_start != sp)
8678 token = read32 (ip + 1);
8679 /* FIXME: check the signature matches */
8680 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8682 if (!cmethod || mono_loader_get_last_error ())
8685 if (cfg->gshared && mono_method_check_context_used (cmethod))
8686 GENERIC_SHARING_FAILURE (CEE_JMP);
8688 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8690 fsig = mono_method_signature (cmethod);
8691 n = fsig->param_count + fsig->hasthis;
8692 if (cfg->llvm_only) {
8695 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8696 for (i = 0; i < n; ++i)
8697 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8698 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8700 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8701 * have to emit a normal return since llvm expects it.
8704 emit_setret (cfg, ins);
8705 MONO_INST_NEW (cfg, ins, OP_BR);
8706 ins->inst_target_bb = end_bblock;
8707 MONO_ADD_INS (cfg->cbb, ins);
8708 link_bblock (cfg, cfg->cbb, end_bblock);
8711 } else if (cfg->backend->have_op_tail_call) {
8712 /* Handle tail calls similarly to calls */
8715 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8716 call->method = cmethod;
8717 call->tail_call = TRUE;
8718 call->signature = mono_method_signature (cmethod);
8719 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8720 call->inst.inst_p0 = cmethod;
8721 for (i = 0; i < n; ++i)
8722 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8724 mono_arch_emit_call (cfg, call);
8725 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8726 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8728 for (i = 0; i < num_args; ++i)
8729 /* Prevent arguments from being optimized away */
8730 arg_array [i]->flags |= MONO_INST_VOLATILE;
8732 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8733 ins = (MonoInst*)call;
8734 ins->inst_p0 = cmethod;
8735 MONO_ADD_INS (cfg->cbb, ins);
8739 start_new_bblock = 1;
8744 MonoMethodSignature *fsig;
8747 token = read32 (ip + 1);
8751 //GSHAREDVT_FAILURE (*ip);
8756 fsig = mini_get_signature (method, token, generic_context);
8758 if (method->dynamic && fsig->pinvoke) {
8762 * This is a call through a function pointer using a pinvoke
8763 * signature. Have to create a wrapper and call that instead.
8764 * FIXME: This is very slow, need to create a wrapper at JIT time
8765 * instead based on the signature.
8767 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8768 EMIT_NEW_PCONST (cfg, args [1], fsig);
8770 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8773 n = fsig->param_count + fsig->hasthis;
8777 //g_assert (!virtual || fsig->hasthis);
8781 inline_costs += 10 * num_calls++;
8784 * Making generic calls out of gsharedvt methods.
8785 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8786 * patching gshared method addresses into a gsharedvt method.
8788 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8790 * We pass the address to the gsharedvt trampoline in the rgctx reg
8792 MonoInst *callee = addr;
8794 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8796 GSHAREDVT_FAILURE (*ip);
8798 addr = emit_get_rgctx_sig (cfg, context_used,
8799 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8800 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8804 /* Prevent inlining of methods with indirect calls */
8805 INLINE_FAILURE ("indirect call");
8807 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8812 * Instead of emitting an indirect call, emit a direct call
8813 * with the contents of the aotconst as the patch info.
8815 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8816 info_type = addr->inst_c1;
8817 info_data = addr->inst_p0;
8819 info_type = addr->inst_right->inst_c1;
8820 info_data = addr->inst_right->inst_left;
8823 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8824 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8829 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8833 /* End of call, INS should contain the result of the call, if any */
8835 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8837 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8840 CHECK_CFG_EXCEPTION;
8844 constrained_class = NULL;
8848 case CEE_CALLVIRT: {
8849 MonoInst *addr = NULL;
8850 MonoMethodSignature *fsig = NULL;
8852 int virtual = *ip == CEE_CALLVIRT;
8853 gboolean pass_imt_from_rgctx = FALSE;
8854 MonoInst *imt_arg = NULL;
8855 MonoInst *keep_this_alive = NULL;
8856 gboolean pass_vtable = FALSE;
8857 gboolean pass_mrgctx = FALSE;
8858 MonoInst *vtable_arg = NULL;
8859 gboolean check_this = FALSE;
8860 gboolean supported_tail_call = FALSE;
8861 gboolean tail_call = FALSE;
8862 gboolean need_seq_point = FALSE;
8863 guint32 call_opcode = *ip;
8864 gboolean emit_widen = TRUE;
8865 gboolean push_res = TRUE;
8866 gboolean skip_ret = FALSE;
8867 gboolean delegate_invoke = FALSE;
8868 gboolean direct_icall = FALSE;
8869 gboolean constrained_partial_call = FALSE;
8870 MonoMethod *cil_method;
8873 token = read32 (ip + 1);
8877 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8878 cil_method = cmethod;
8880 if (constrained_class) {
8881 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8882 if (!mini_is_gsharedvt_klass (constrained_class)) {
8883 g_assert (!cmethod->klass->valuetype);
8884 if (!mini_type_is_reference (&constrained_class->byval_arg))
8885 constrained_partial_call = TRUE;
8889 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8890 if (cfg->verbose_level > 2)
8891 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8892 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8893 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8895 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8899 if (cfg->verbose_level > 2)
8900 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8902 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8904 * This is needed since get_method_constrained can't find
8905 * the method in klass representing a type var.
8906 * The type var is guaranteed to be a reference type in this
8909 if (!mini_is_gsharedvt_klass (constrained_class))
8910 g_assert (!cmethod->klass->valuetype);
8912 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8918 if (!cmethod || mono_loader_get_last_error ())
8920 if (!dont_verify && !cfg->skip_visibility) {
8921 MonoMethod *target_method = cil_method;
8922 if (method->is_inflated) {
8923 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8925 if (!mono_method_can_access_method (method_definition, target_method) &&
8926 !mono_method_can_access_method (method, cil_method))
8927 METHOD_ACCESS_FAILURE (method, cil_method);
8930 if (mono_security_core_clr_enabled ())
8931 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8933 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8934 /* MS.NET seems to silently convert this to a callvirt */
8939 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8940 * converts to a callvirt.
8942 * tests/bug-515884.il is an example of this behavior
8944 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8945 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8946 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8950 if (!cmethod->klass->inited)
8951 if (!mono_class_init (cmethod->klass))
8952 TYPE_LOAD_ERROR (cmethod->klass);
8954 fsig = mono_method_signature (cmethod);
8957 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8958 mini_class_is_system_array (cmethod->klass)) {
8959 array_rank = cmethod->klass->rank;
8960 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8961 direct_icall = TRUE;
8962 } else if (fsig->pinvoke) {
8963 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8964 fsig = mono_method_signature (wrapper);
8965 } else if (constrained_class) {
8967 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8971 /* See code below */
8972 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8973 MonoBasicBlock *tbb;
8975 GET_BBLOCK (cfg, tbb, ip + 5);
8976 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8978 * We want to extend the try block to cover the call, but we can't do it if the
8979 * call is made directly since its followed by an exception check.
8981 direct_icall = FALSE;
8985 mono_save_token_info (cfg, image, token, cil_method);
8987 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8988 need_seq_point = TRUE;
8990 /* Don't support calls made using type arguments for now */
8992 if (cfg->gsharedvt) {
8993 if (mini_is_gsharedvt_signature (fsig))
8994 GSHAREDVT_FAILURE (*ip);
8998 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8999 g_assert_not_reached ();
9001 n = fsig->param_count + fsig->hasthis;
9003 if (!cfg->gshared && cmethod->klass->generic_container)
9007 g_assert (!mono_method_check_context_used (cmethod));
9011 //g_assert (!virtual || fsig->hasthis);
9016 * We have the `constrained.' prefix opcode.
9018 if (constrained_class) {
9019 if (mini_is_gsharedvt_klass (constrained_class)) {
9020 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9021 /* The 'Own method' case below */
9022 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9023 /* 'The type parameter is instantiated as a reference type' case below. */
9025 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9026 CHECK_CFG_EXCEPTION;
9032 if (constrained_partial_call) {
9033 gboolean need_box = TRUE;
9036 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9037 * called method is not known at compile time either. The called method could end up being
9038 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9039 * to box the receiver.
9040 * A simple solution would be to box always and make a normal virtual call, but that would
9041 * be bad performance wise.
9043 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9045 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9050 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9051 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9052 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9053 ins->klass = constrained_class;
9054 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9055 CHECK_CFG_EXCEPTION;
9056 } else if (need_box) {
9058 MonoBasicBlock *is_ref_bb, *end_bb;
9059 MonoInst *nonbox_call;
9062 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9064 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9065 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9067 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9069 NEW_BBLOCK (cfg, is_ref_bb);
9070 NEW_BBLOCK (cfg, end_bb);
9072 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9073 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9074 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9077 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9082 MONO_START_BB (cfg, is_ref_bb);
9083 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9084 ins->klass = constrained_class;
9085 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9086 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9088 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9090 MONO_START_BB (cfg, end_bb);
9093 nonbox_call->dreg = ins->dreg;
9096 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9097 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9098 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9101 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9103 * The type parameter is instantiated as a valuetype,
9104 * but that type doesn't override the method we're
9105 * calling, so we need to box `this'.
9107 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9108 ins->klass = constrained_class;
9109 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9110 CHECK_CFG_EXCEPTION;
9111 } else if (!constrained_class->valuetype) {
9112 int dreg = alloc_ireg_ref (cfg);
9115 * The type parameter is instantiated as a reference
9116 * type. We have a managed pointer on the stack, so
9117 * we need to dereference it here.
9119 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9120 ins->type = STACK_OBJ;
9123 if (cmethod->klass->valuetype) {
9126 /* Interface method */
9129 mono_class_setup_vtable (constrained_class);
9130 CHECK_TYPELOAD (constrained_class);
9131 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9133 TYPE_LOAD_ERROR (constrained_class);
9134 slot = mono_method_get_vtable_slot (cmethod);
9136 TYPE_LOAD_ERROR (cmethod->klass);
9137 cmethod = constrained_class->vtable [ioffset + slot];
9139 if (cmethod->klass == mono_defaults.enum_class) {
9140 /* Enum implements some interfaces, so treat this as the first case */
9141 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9142 ins->klass = constrained_class;
9143 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9144 CHECK_CFG_EXCEPTION;
9149 constrained_class = NULL;
9152 if (check_call_signature (cfg, fsig, sp))
9155 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9156 delegate_invoke = TRUE;
9158 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9159 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9160 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9168 * If the callee is a shared method, then its static cctor
9169 * might not get called after the call was patched.
9171 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9172 emit_class_init (cfg, cmethod->klass);
9173 CHECK_TYPELOAD (cmethod->klass);
9176 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9179 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9181 context_used = mini_method_check_context_used (cfg, cmethod);
9183 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9184 /* Generic method interface
9185 calls are resolved via a
9186 helper function and don't
9188 if (!cmethod_context || !cmethod_context->method_inst)
9189 pass_imt_from_rgctx = TRUE;
9193 * If a shared method calls another
9194 * shared method then the caller must
9195 * have a generic sharing context
9196 * because the magic trampoline
9197 * requires it. FIXME: We shouldn't
9198 * have to force the vtable/mrgctx
9199 * variable here. Instead there
9200 * should be a flag in the cfg to
9201 * request a generic sharing context.
9204 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9205 mono_get_vtable_var (cfg);
9210 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9212 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9214 CHECK_TYPELOAD (cmethod->klass);
9215 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9220 g_assert (!vtable_arg);
9222 if (!cfg->compile_aot) {
9224 * emit_get_rgctx_method () calls mono_class_vtable () so check
9225 * for type load errors before.
9227 mono_class_setup_vtable (cmethod->klass);
9228 CHECK_TYPELOAD (cmethod->klass);
9231 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9233 /* !marshalbyref is needed to properly handle generic methods + remoting */
9234 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9235 MONO_METHOD_IS_FINAL (cmethod)) &&
9236 !mono_class_is_marshalbyref (cmethod->klass)) {
9243 if (pass_imt_from_rgctx) {
9244 g_assert (!pass_vtable);
9246 imt_arg = emit_get_rgctx_method (cfg, context_used,
9247 cmethod, MONO_RGCTX_INFO_METHOD);
9251 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9253 /* Calling virtual generic methods */
9254 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9255 !(MONO_METHOD_IS_FINAL (cmethod) &&
9256 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9257 fsig->generic_param_count &&
9258 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9260 MonoInst *this_temp, *this_arg_temp, *store;
9261 MonoInst *iargs [4];
9263 g_assert (fsig->is_inflated);
9265 /* Prevent inlining of methods that contain indirect calls */
9266 INLINE_FAILURE ("virtual generic call");
9268 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9269 GSHAREDVT_FAILURE (*ip);
9271 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9272 g_assert (!imt_arg);
9274 g_assert (cmethod->is_inflated);
9275 imt_arg = emit_get_rgctx_method (cfg, context_used,
9276 cmethod, MONO_RGCTX_INFO_METHOD);
9277 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9279 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9280 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9281 MONO_ADD_INS (cfg->cbb, store);
9283 /* FIXME: This should be a managed pointer */
9284 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9286 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9287 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9288 cmethod, MONO_RGCTX_INFO_METHOD);
9289 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9290 addr = mono_emit_jit_icall (cfg,
9291 mono_helper_compile_generic_method, iargs);
9293 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9295 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9302 * Implement a workaround for the inherent races involved in locking:
9308 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9309 * try block, the Exit () won't be executed, see:
9310 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9311 * To work around this, we extend such try blocks to include the last x bytes
9312 * of the Monitor.Enter () call.
9314 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9315 MonoBasicBlock *tbb;
9317 GET_BBLOCK (cfg, tbb, ip + 5);
9319 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9320 * from Monitor.Enter like ArgumentNullException.
9322 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9323 /* Mark this bblock as needing to be extended */
9324 tbb->extend_try_block = TRUE;
9328 /* Conversion to a JIT intrinsic */
9329 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9330 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9331 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9338 if ((cfg->opt & MONO_OPT_INLINE) &&
9339 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9340 mono_method_check_inlining (cfg, cmethod)) {
9342 gboolean always = FALSE;
9344 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9345 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9346 /* Prevent inlining of methods that call wrappers */
9347 INLINE_FAILURE ("wrapper call");
9348 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9352 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9354 cfg->real_offset += 5;
9356 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9357 /* *sp is already set by inline_method */
9362 inline_costs += costs;
9368 /* Tail recursion elimination */
9369 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9370 gboolean has_vtargs = FALSE;
9373 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9374 INLINE_FAILURE ("tail call");
9376 /* keep it simple */
9377 for (i = fsig->param_count - 1; i >= 0; i--) {
9378 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9383 for (i = 0; i < n; ++i)
9384 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9385 MONO_INST_NEW (cfg, ins, OP_BR);
9386 MONO_ADD_INS (cfg->cbb, ins);
9387 tblock = start_bblock->out_bb [0];
9388 link_bblock (cfg, cfg->cbb, tblock);
9389 ins->inst_target_bb = tblock;
9390 start_new_bblock = 1;
9392 /* skip the CEE_RET, too */
9393 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9400 inline_costs += 10 * num_calls++;
9403 * Making generic calls out of gsharedvt methods.
9404 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9405 * patching gshared method addresses into a gsharedvt method.
9407 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9408 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9409 MonoRgctxInfoType info_type;
9412 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9413 //GSHAREDVT_FAILURE (*ip);
9414 // disable for possible remoting calls
9415 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9416 GSHAREDVT_FAILURE (*ip);
9417 if (fsig->generic_param_count) {
9418 /* virtual generic call */
9419 g_assert (!imt_arg);
9420 /* Same as the virtual generic case above */
9421 imt_arg = emit_get_rgctx_method (cfg, context_used,
9422 cmethod, MONO_RGCTX_INFO_METHOD);
9423 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9425 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9426 /* This can happen when we call a fully instantiated iface method */
9427 imt_arg = emit_get_rgctx_method (cfg, context_used,
9428 cmethod, MONO_RGCTX_INFO_METHOD);
9433 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9434 keep_this_alive = sp [0];
9436 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9437 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9439 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9440 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9442 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9446 /* Generic sharing */
9449 * Use this if the callee is gsharedvt sharable too, since
9450 * at runtime we might find an instantiation so the call cannot
9451 * be patched (the 'no_patch' code path in mini-trampolines.c).
9453 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9454 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9455 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9456 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9457 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9458 INLINE_FAILURE ("gshared");
9460 g_assert (cfg->gshared && cmethod);
9464 * We are compiling a call to a
9465 * generic method from shared code,
9466 * which means that we have to look up
9467 * the method in the rgctx and do an
9471 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9473 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9474 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9478 /* Direct calls to icalls */
9480 MonoMethod *wrapper;
9483 /* Inline the wrapper */
9484 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9486 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9487 g_assert (costs > 0);
9488 cfg->real_offset += 5;
9490 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9491 /* *sp is already set by inline_method */
9496 inline_costs += costs;
9505 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9506 MonoInst *val = sp [fsig->param_count];
9508 if (val->type == STACK_OBJ) {
9509 MonoInst *iargs [2];
9514 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9517 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9518 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9519 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9520 emit_write_barrier (cfg, addr, val);
9521 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9522 GSHAREDVT_FAILURE (*ip);
9523 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9524 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9526 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9527 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9528 if (!cmethod->klass->element_class->valuetype && !readonly)
9529 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9530 CHECK_TYPELOAD (cmethod->klass);
9533 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9536 g_assert_not_reached ();
9543 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9547 /* Tail prefix / tail call optimization */
9549 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9550 /* FIXME: runtime generic context pointer for jumps? */
9551 /* FIXME: handle this for generic sharing eventually */
9552 if ((ins_flag & MONO_INST_TAILCALL) &&
9553 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9554 supported_tail_call = TRUE;
9556 if (supported_tail_call) {
9559 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9560 INLINE_FAILURE ("tail call");
9562 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9564 if (cfg->backend->have_op_tail_call) {
9565 /* Handle tail calls similarly to normal calls */
9568 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9570 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9571 call->tail_call = TRUE;
9572 call->method = cmethod;
9573 call->signature = mono_method_signature (cmethod);
9576 * We implement tail calls by storing the actual arguments into the
9577 * argument variables, then emitting a CEE_JMP.
9579 for (i = 0; i < n; ++i) {
9580 /* Prevent argument from being register allocated */
9581 arg_array [i]->flags |= MONO_INST_VOLATILE;
9582 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9584 ins = (MonoInst*)call;
9585 ins->inst_p0 = cmethod;
9586 ins->inst_p1 = arg_array [0];
9587 MONO_ADD_INS (cfg->cbb, ins);
9588 link_bblock (cfg, cfg->cbb, end_bblock);
9589 start_new_bblock = 1;
9591 // FIXME: Eliminate unreachable epilogs
9594 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9595 * only reachable from this call.
9597 GET_BBLOCK (cfg, tblock, ip + 5);
9598 if (tblock == cfg->cbb || tblock->in_count == 0)
9607 * Synchronized wrappers.
9608 * Its hard to determine where to replace a method with its synchronized
9609 * wrapper without causing an infinite recursion. The current solution is
9610 * to add the synchronized wrapper in the trampolines, and to
9611 * change the called method to a dummy wrapper, and resolve that wrapper
9612 * to the real method in mono_jit_compile_method ().
9614 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9615 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9616 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9617 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9621 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9622 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9623 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9624 * based on whenever there is an rgctx or not.
9626 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9627 MonoInst *args_buf [16], *icall_args [16];
9629 MonoBasicBlock *rgctx_bb, *end_bb;
9630 MonoInst *call1, *call2, *call_target;
9631 MonoMethodSignature *rgctx_sig;
9632 int rgctx_reg, tmp_reg;
9634 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9636 NEW_BBLOCK (cfg, rgctx_bb);
9637 NEW_BBLOCK (cfg, end_bb);
9639 // FIXME: Optimize this
9641 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9643 icall_args [0] = sp [0];
9644 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9646 icall_args [2] = imt_arg;
9648 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9649 icall_args [2] = ins;
9652 rgctx_reg = alloc_preg (cfg);
9653 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9654 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9655 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9657 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9659 // FIXME: Only do this if needed (generic calls)
9661 // Check whenever to pass an rgctx
9662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9663 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9664 /* Non rgctx case */
9665 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9668 MONO_START_BB (cfg, rgctx_bb);
9669 /* Make a call with an rgctx */
9670 if (fsig->param_count + 2 < 16)
9673 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9675 for (i = 0; i < fsig->param_count; ++i)
9676 args [i + 1] = sp [i + 1];
9677 tmp_reg = alloc_preg (cfg);
9678 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9679 rgctx_sig = sig_to_rgctx_sig (fsig);
9680 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9681 call2->dreg = call1->dreg;
9682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9684 MONO_START_BB (cfg, end_bb);
9690 INLINE_FAILURE ("call");
9691 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9692 imt_arg, vtable_arg);
9694 if (tail_call && !cfg->llvm_only) {
9695 link_bblock (cfg, cfg->cbb, end_bblock);
9696 start_new_bblock = 1;
9698 // FIXME: Eliminate unreachable epilogs
9701 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9702 * only reachable from this call.
9704 GET_BBLOCK (cfg, tblock, ip + 5);
9705 if (tblock == cfg->cbb || tblock->in_count == 0)
9712 /* End of call, INS should contain the result of the call, if any */
9714 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9717 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9722 if (keep_this_alive) {
9723 MonoInst *dummy_use;
9725 /* See mono_emit_method_call_full () */
9726 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9729 CHECK_CFG_EXCEPTION;
9733 g_assert (*ip == CEE_RET);
9737 constrained_class = NULL;
9739 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9743 if (cfg->method != method) {
9744 /* return from inlined method */
9746 * If in_count == 0, that means the ret is unreachable due to
9747 * being preceeded by a throw. In that case, inline_method () will
9748 * handle setting the return value
9749 * (test case: test_0_inline_throw ()).
9751 if (return_var && cfg->cbb->in_count) {
9752 MonoType *ret_type = mono_method_signature (method)->ret;
9758 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9761 //g_assert (returnvar != -1);
9762 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9763 cfg->ret_var_set = TRUE;
9766 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9768 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9772 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9774 if (seq_points && !sym_seq_points) {
9776 * Place a seq point here too even through the IL stack is not
9777 * empty, so a step over on
9780 * will work correctly.
9782 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9783 MONO_ADD_INS (cfg->cbb, ins);
9786 g_assert (!return_var);
9790 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9793 emit_setret (cfg, *sp);
9796 if (sp != stack_start)
9798 MONO_INST_NEW (cfg, ins, OP_BR);
9800 ins->inst_target_bb = end_bblock;
9801 MONO_ADD_INS (cfg->cbb, ins);
9802 link_bblock (cfg, cfg->cbb, end_bblock);
9803 start_new_bblock = 1;
9807 MONO_INST_NEW (cfg, ins, OP_BR);
9809 target = ip + 1 + (signed char)(*ip);
9811 GET_BBLOCK (cfg, tblock, target);
9812 link_bblock (cfg, cfg->cbb, tblock);
9813 ins->inst_target_bb = tblock;
9814 if (sp != stack_start) {
9815 handle_stack_args (cfg, stack_start, sp - stack_start);
9817 CHECK_UNVERIFIABLE (cfg);
9819 MONO_ADD_INS (cfg->cbb, ins);
9820 start_new_bblock = 1;
9821 inline_costs += BRANCH_COST;
9835 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9837 target = ip + 1 + *(signed char*)ip;
9843 inline_costs += BRANCH_COST;
9847 MONO_INST_NEW (cfg, ins, OP_BR);
9850 target = ip + 4 + (gint32)read32(ip);
9852 GET_BBLOCK (cfg, tblock, target);
9853 link_bblock (cfg, cfg->cbb, tblock);
9854 ins->inst_target_bb = tblock;
9855 if (sp != stack_start) {
9856 handle_stack_args (cfg, stack_start, sp - stack_start);
9858 CHECK_UNVERIFIABLE (cfg);
9861 MONO_ADD_INS (cfg->cbb, ins);
9863 start_new_bblock = 1;
9864 inline_costs += BRANCH_COST;
9871 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9872 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9873 guint32 opsize = is_short ? 1 : 4;
9875 CHECK_OPSIZE (opsize);
9877 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9880 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9885 GET_BBLOCK (cfg, tblock, target);
9886 link_bblock (cfg, cfg->cbb, tblock);
9887 GET_BBLOCK (cfg, tblock, ip);
9888 link_bblock (cfg, cfg->cbb, tblock);
9890 if (sp != stack_start) {
9891 handle_stack_args (cfg, stack_start, sp - stack_start);
9892 CHECK_UNVERIFIABLE (cfg);
9895 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9896 cmp->sreg1 = sp [0]->dreg;
9897 type_from_op (cfg, cmp, sp [0], NULL);
9900 #if SIZEOF_REGISTER == 4
9901 if (cmp->opcode == OP_LCOMPARE_IMM) {
9902 /* Convert it to OP_LCOMPARE */
9903 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9904 ins->type = STACK_I8;
9905 ins->dreg = alloc_dreg (cfg, STACK_I8);
9907 MONO_ADD_INS (cfg->cbb, ins);
9908 cmp->opcode = OP_LCOMPARE;
9909 cmp->sreg2 = ins->dreg;
9912 MONO_ADD_INS (cfg->cbb, cmp);
9914 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9915 type_from_op (cfg, ins, sp [0], NULL);
9916 MONO_ADD_INS (cfg->cbb, ins);
9917 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9918 GET_BBLOCK (cfg, tblock, target);
9919 ins->inst_true_bb = tblock;
9920 GET_BBLOCK (cfg, tblock, ip);
9921 ins->inst_false_bb = tblock;
9922 start_new_bblock = 2;
9925 inline_costs += BRANCH_COST;
9940 MONO_INST_NEW (cfg, ins, *ip);
9942 target = ip + 4 + (gint32)read32(ip);
9948 inline_costs += BRANCH_COST;
9952 MonoBasicBlock **targets;
9953 MonoBasicBlock *default_bblock;
9954 MonoJumpInfoBBTable *table;
9955 int offset_reg = alloc_preg (cfg);
9956 int target_reg = alloc_preg (cfg);
9957 int table_reg = alloc_preg (cfg);
9958 int sum_reg = alloc_preg (cfg);
9959 gboolean use_op_switch;
9963 n = read32 (ip + 1);
9966 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9970 CHECK_OPSIZE (n * sizeof (guint32));
9971 target = ip + n * sizeof (guint32);
9973 GET_BBLOCK (cfg, default_bblock, target);
9974 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9976 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9977 for (i = 0; i < n; ++i) {
9978 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9979 targets [i] = tblock;
9980 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9984 if (sp != stack_start) {
9986 * Link the current bb with the targets as well, so handle_stack_args
9987 * will set their in_stack correctly.
9989 link_bblock (cfg, cfg->cbb, default_bblock);
9990 for (i = 0; i < n; ++i)
9991 link_bblock (cfg, cfg->cbb, targets [i]);
9993 handle_stack_args (cfg, stack_start, sp - stack_start);
9995 CHECK_UNVERIFIABLE (cfg);
9997 /* Undo the links */
9998 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9999 for (i = 0; i < n; ++i)
10000 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10004 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10006 for (i = 0; i < n; ++i)
10007 link_bblock (cfg, cfg->cbb, targets [i]);
10009 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10010 table->table = targets;
10011 table->table_size = n;
10013 use_op_switch = FALSE;
10015 /* ARM implements SWITCH statements differently */
10016 /* FIXME: Make it use the generic implementation */
10017 if (!cfg->compile_aot)
10018 use_op_switch = TRUE;
10021 if (COMPILE_LLVM (cfg))
10022 use_op_switch = TRUE;
10024 cfg->cbb->has_jump_table = 1;
10026 if (use_op_switch) {
10027 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10028 ins->sreg1 = src1->dreg;
10029 ins->inst_p0 = table;
10030 ins->inst_many_bb = targets;
10031 ins->klass = GUINT_TO_POINTER (n);
10032 MONO_ADD_INS (cfg->cbb, ins);
10034 if (sizeof (gpointer) == 8)
10035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10039 #if SIZEOF_REGISTER == 8
10040 /* The upper word might not be zero, and we add it to a 64 bit address later */
10041 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10044 if (cfg->compile_aot) {
10045 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10047 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10048 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10049 ins->inst_p0 = table;
10050 ins->dreg = table_reg;
10051 MONO_ADD_INS (cfg->cbb, ins);
10054 /* FIXME: Use load_memindex */
10055 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10057 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10059 start_new_bblock = 1;
10060 inline_costs += (BRANCH_COST * 2);
10073 case CEE_LDIND_REF:
10080 dreg = alloc_freg (cfg);
10083 dreg = alloc_lreg (cfg);
10085 case CEE_LDIND_REF:
10086 dreg = alloc_ireg_ref (cfg);
10089 dreg = alloc_preg (cfg);
10092 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10093 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10094 if (*ip == CEE_LDIND_R4)
10095 ins->type = cfg->r4_stack_type;
10096 ins->flags |= ins_flag;
10097 MONO_ADD_INS (cfg->cbb, ins);
10099 if (ins_flag & MONO_INST_VOLATILE) {
10100 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10101 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10106 case CEE_STIND_REF:
10117 if (ins_flag & MONO_INST_VOLATILE) {
10118 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10119 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10122 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10123 ins->flags |= ins_flag;
10126 MONO_ADD_INS (cfg->cbb, ins);
10128 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10129 emit_write_barrier (cfg, sp [0], sp [1]);
10138 MONO_INST_NEW (cfg, ins, (*ip));
10140 ins->sreg1 = sp [0]->dreg;
10141 ins->sreg2 = sp [1]->dreg;
10142 type_from_op (cfg, ins, sp [0], sp [1]);
10144 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10146 /* Use the immediate opcodes if possible */
10147 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10148 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10149 if (imm_opcode != -1) {
10150 ins->opcode = imm_opcode;
10151 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10154 NULLIFY_INS (sp [1]);
10158 MONO_ADD_INS ((cfg)->cbb, (ins));
10160 *sp++ = mono_decompose_opcode (cfg, ins);
10177 MONO_INST_NEW (cfg, ins, (*ip));
10179 ins->sreg1 = sp [0]->dreg;
10180 ins->sreg2 = sp [1]->dreg;
10181 type_from_op (cfg, ins, sp [0], sp [1]);
10183 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10184 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10186 /* FIXME: Pass opcode to is_inst_imm */
10188 /* Use the immediate opcodes if possible */
10189 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10192 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10193 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10194 /* Keep emulated opcodes which are optimized away later */
10195 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10196 imm_opcode = mono_op_to_op_imm (ins->opcode);
10199 if (imm_opcode != -1) {
10200 ins->opcode = imm_opcode;
10201 if (sp [1]->opcode == OP_I8CONST) {
10202 #if SIZEOF_REGISTER == 8
10203 ins->inst_imm = sp [1]->inst_l;
10205 ins->inst_ls_word = sp [1]->inst_ls_word;
10206 ins->inst_ms_word = sp [1]->inst_ms_word;
10210 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10213 /* Might be followed by an instruction added by add_widen_op */
10214 if (sp [1]->next == NULL)
10215 NULLIFY_INS (sp [1]);
10218 MONO_ADD_INS ((cfg)->cbb, (ins));
10220 *sp++ = mono_decompose_opcode (cfg, ins);
10233 case CEE_CONV_OVF_I8:
10234 case CEE_CONV_OVF_U8:
10235 case CEE_CONV_R_UN:
10238 /* Special case this earlier so we have long constants in the IR */
10239 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10240 int data = sp [-1]->inst_c0;
10241 sp [-1]->opcode = OP_I8CONST;
10242 sp [-1]->type = STACK_I8;
10243 #if SIZEOF_REGISTER == 8
10244 if ((*ip) == CEE_CONV_U8)
10245 sp [-1]->inst_c0 = (guint32)data;
10247 sp [-1]->inst_c0 = data;
10249 sp [-1]->inst_ls_word = data;
10250 if ((*ip) == CEE_CONV_U8)
10251 sp [-1]->inst_ms_word = 0;
10253 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10255 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10262 case CEE_CONV_OVF_I4:
10263 case CEE_CONV_OVF_I1:
10264 case CEE_CONV_OVF_I2:
10265 case CEE_CONV_OVF_I:
10266 case CEE_CONV_OVF_U:
10269 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10270 ADD_UNOP (CEE_CONV_OVF_I8);
10277 case CEE_CONV_OVF_U1:
10278 case CEE_CONV_OVF_U2:
10279 case CEE_CONV_OVF_U4:
10282 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10283 ADD_UNOP (CEE_CONV_OVF_U8);
10290 case CEE_CONV_OVF_I1_UN:
10291 case CEE_CONV_OVF_I2_UN:
10292 case CEE_CONV_OVF_I4_UN:
10293 case CEE_CONV_OVF_I8_UN:
10294 case CEE_CONV_OVF_U1_UN:
10295 case CEE_CONV_OVF_U2_UN:
10296 case CEE_CONV_OVF_U4_UN:
10297 case CEE_CONV_OVF_U8_UN:
10298 case CEE_CONV_OVF_I_UN:
10299 case CEE_CONV_OVF_U_UN:
10306 CHECK_CFG_EXCEPTION;
10310 case CEE_ADD_OVF_UN:
10312 case CEE_MUL_OVF_UN:
10314 case CEE_SUB_OVF_UN:
10320 GSHAREDVT_FAILURE (*ip);
10323 token = read32 (ip + 1);
10324 klass = mini_get_class (method, token, generic_context);
10325 CHECK_TYPELOAD (klass);
10327 if (generic_class_is_reference_type (cfg, klass)) {
10328 MonoInst *store, *load;
10329 int dreg = alloc_ireg_ref (cfg);
10331 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10332 load->flags |= ins_flag;
10333 MONO_ADD_INS (cfg->cbb, load);
10335 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10336 store->flags |= ins_flag;
10337 MONO_ADD_INS (cfg->cbb, store);
10339 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10340 emit_write_barrier (cfg, sp [0], sp [1]);
10342 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10348 int loc_index = -1;
10354 token = read32 (ip + 1);
10355 klass = mini_get_class (method, token, generic_context);
10356 CHECK_TYPELOAD (klass);
10358 /* Optimize the common ldobj+stloc combination */
10361 loc_index = ip [6];
10368 loc_index = ip [5] - CEE_STLOC_0;
10375 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10376 CHECK_LOCAL (loc_index);
10378 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10379 ins->dreg = cfg->locals [loc_index]->dreg;
10380 ins->flags |= ins_flag;
10383 if (ins_flag & MONO_INST_VOLATILE) {
10384 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10385 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10391 /* Optimize the ldobj+stobj combination */
10392 /* The reference case ends up being a load+store anyway */
10393 /* Skip this if the operation is volatile. */
10394 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10399 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10406 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10407 ins->flags |= ins_flag;
10410 if (ins_flag & MONO_INST_VOLATILE) {
10411 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10412 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10421 CHECK_STACK_OVF (1);
10423 n = read32 (ip + 1);
10425 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10426 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10427 ins->type = STACK_OBJ;
10430 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10431 MonoInst *iargs [1];
10432 char *str = mono_method_get_wrapper_data (method, n);
10434 if (cfg->compile_aot)
10435 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10437 EMIT_NEW_PCONST (cfg, iargs [0], str);
10438 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10440 if (cfg->opt & MONO_OPT_SHARED) {
10441 MonoInst *iargs [3];
10443 if (cfg->compile_aot) {
10444 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10446 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10447 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10448 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10449 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10450 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10452 if (cfg->cbb->out_of_line) {
10453 MonoInst *iargs [2];
10455 if (image == mono_defaults.corlib) {
10457 * Avoid relocations in AOT and save some space by using a
10458 * version of helper_ldstr specialized to mscorlib.
10460 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10461 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10463 /* Avoid creating the string object */
10464 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10465 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10466 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10470 if (cfg->compile_aot) {
10471 NEW_LDSTRCONST (cfg, ins, image, n);
10473 MONO_ADD_INS (cfg->cbb, ins);
10476 NEW_PCONST (cfg, ins, NULL);
10477 ins->type = STACK_OBJ;
10478 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10480 OUT_OF_MEMORY_FAILURE;
10483 MONO_ADD_INS (cfg->cbb, ins);
10492 MonoInst *iargs [2];
10493 MonoMethodSignature *fsig;
10496 MonoInst *vtable_arg = NULL;
10499 token = read32 (ip + 1);
10500 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10501 if (!cmethod || mono_loader_get_last_error ())
10503 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10506 mono_save_token_info (cfg, image, token, cmethod);
10508 if (!mono_class_init (cmethod->klass))
10509 TYPE_LOAD_ERROR (cmethod->klass);
10511 context_used = mini_method_check_context_used (cfg, cmethod);
10513 if (mono_security_core_clr_enabled ())
10514 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10516 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10517 emit_class_init (cfg, cmethod->klass);
10518 CHECK_TYPELOAD (cmethod->klass);
10522 if (cfg->gsharedvt) {
10523 if (mini_is_gsharedvt_variable_signature (sig))
10524 GSHAREDVT_FAILURE (*ip);
10528 n = fsig->param_count;
10532 * Generate smaller code for the common newobj <exception> instruction in
10533 * argument checking code.
10535 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10536 is_exception_class (cmethod->klass) && n <= 2 &&
10537 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10538 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10539 MonoInst *iargs [3];
10543 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10546 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10549 iargs [1] = sp [0];
10550 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10553 iargs [1] = sp [0];
10554 iargs [2] = sp [1];
10555 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10558 g_assert_not_reached ();
10566 /* move the args to allow room for 'this' in the first position */
10572 /* check_call_signature () requires sp[0] to be set */
10573 this_ins.type = STACK_OBJ;
10574 sp [0] = &this_ins;
10575 if (check_call_signature (cfg, fsig, sp))
10580 if (mini_class_is_system_array (cmethod->klass)) {
10581 *sp = emit_get_rgctx_method (cfg, context_used,
10582 cmethod, MONO_RGCTX_INFO_METHOD);
10584 /* Avoid varargs in the common case */
10585 if (fsig->param_count == 1)
10586 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10587 else if (fsig->param_count == 2)
10588 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10589 else if (fsig->param_count == 3)
10590 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10591 else if (fsig->param_count == 4)
10592 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10594 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10595 } else if (cmethod->string_ctor) {
10596 g_assert (!context_used);
10597 g_assert (!vtable_arg);
10598 /* we simply pass a null pointer */
10599 EMIT_NEW_PCONST (cfg, *sp, NULL);
10600 /* now call the string ctor */
10601 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10603 if (cmethod->klass->valuetype) {
10604 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10605 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10606 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10611 * The code generated by mini_emit_virtual_call () expects
10612 * iargs [0] to be a boxed instance, but luckily the vcall
10613 * will be transformed into a normal call there.
10615 } else if (context_used) {
10616 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10619 MonoVTable *vtable = NULL;
10621 if (!cfg->compile_aot)
10622 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10623 CHECK_TYPELOAD (cmethod->klass);
10626 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10627 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10628 * As a workaround, we call class cctors before allocating objects.
10630 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10631 emit_class_init (cfg, cmethod->klass);
10632 if (cfg->verbose_level > 2)
10633 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10634 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10637 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10640 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10643 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10645 /* Now call the actual ctor */
10646 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10647 CHECK_CFG_EXCEPTION;
10650 if (alloc == NULL) {
10652 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10653 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10661 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10662 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10665 case CEE_CASTCLASS:
10669 token = read32 (ip + 1);
10670 klass = mini_get_class (method, token, generic_context);
10671 CHECK_TYPELOAD (klass);
10672 if (sp [0]->type != STACK_OBJ)
10675 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10676 CHECK_CFG_EXCEPTION;
10685 token = read32 (ip + 1);
10686 klass = mini_get_class (method, token, generic_context);
10687 CHECK_TYPELOAD (klass);
10688 if (sp [0]->type != STACK_OBJ)
10691 context_used = mini_class_check_context_used (cfg, klass);
10693 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10694 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10695 MonoInst *args [3];
10702 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10705 idx = get_castclass_cache_idx (cfg);
10706 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10708 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10711 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10712 MonoMethod *mono_isinst;
10713 MonoInst *iargs [1];
10716 mono_isinst = mono_marshal_get_isinst (klass);
10717 iargs [0] = sp [0];
10719 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10720 iargs, ip, cfg->real_offset, TRUE);
10721 CHECK_CFG_EXCEPTION;
10722 g_assert (costs > 0);
10725 cfg->real_offset += 5;
10729 inline_costs += costs;
10732 ins = handle_isinst (cfg, klass, *sp, context_used);
10733 CHECK_CFG_EXCEPTION;
10739 case CEE_UNBOX_ANY: {
10740 MonoInst *res, *addr;
10745 token = read32 (ip + 1);
10746 klass = mini_get_class (method, token, generic_context);
10747 CHECK_TYPELOAD (klass);
10749 mono_save_token_info (cfg, image, token, klass);
10751 context_used = mini_class_check_context_used (cfg, klass);
10753 if (mini_is_gsharedvt_klass (klass)) {
10754 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10756 } else if (generic_class_is_reference_type (cfg, klass)) {
10757 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10758 CHECK_CFG_EXCEPTION;
10759 } else if (mono_class_is_nullable (klass)) {
10760 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10762 addr = handle_unbox (cfg, klass, sp, context_used);
10764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10775 MonoClass *enum_class;
10776 MonoMethod *has_flag;
10782 token = read32 (ip + 1);
10783 klass = mini_get_class (method, token, generic_context);
10784 CHECK_TYPELOAD (klass);
10786 mono_save_token_info (cfg, image, token, klass);
10788 context_used = mini_class_check_context_used (cfg, klass);
10790 if (generic_class_is_reference_type (cfg, klass)) {
10796 if (klass == mono_defaults.void_class)
10798 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10800 /* frequent check in generic code: box (struct), brtrue */
10805 * <push int/long ptr>
10808 * constrained. MyFlags
10809 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10811 * If we find this sequence and the operand types on box and constrained
10812 * are equal, we can emit a specialized instruction sequence instead of
10813 * the very slow HasFlag () call.
10815 if ((cfg->opt & MONO_OPT_INTRINS) &&
10816 /* Cheap checks first. */
10817 ip + 5 + 6 + 5 < end &&
10818 ip [5] == CEE_PREFIX1 &&
10819 ip [6] == CEE_CONSTRAINED_ &&
10820 ip [11] == CEE_CALLVIRT &&
10821 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10822 mono_class_is_enum (klass) &&
10823 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10824 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10825 has_flag->klass == mono_defaults.enum_class &&
10826 !strcmp (has_flag->name, "HasFlag") &&
10827 has_flag->signature->hasthis &&
10828 has_flag->signature->param_count == 1) {
10829 CHECK_TYPELOAD (enum_class);
10831 if (enum_class == klass) {
10832 MonoInst *enum_this, *enum_flag;
10837 enum_this = sp [0];
10838 enum_flag = sp [1];
10840 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10845 // FIXME: LLVM can't handle the inconsistent bb linking
10846 if (!mono_class_is_nullable (klass) &&
10847 !mini_is_gsharedvt_klass (klass) &&
10848 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10849 (ip [5] == CEE_BRTRUE ||
10850 ip [5] == CEE_BRTRUE_S ||
10851 ip [5] == CEE_BRFALSE ||
10852 ip [5] == CEE_BRFALSE_S)) {
10853 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10855 MonoBasicBlock *true_bb, *false_bb;
10859 if (cfg->verbose_level > 3) {
10860 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10861 printf ("<box+brtrue opt>\n");
10866 case CEE_BRFALSE_S:
10869 target = ip + 1 + (signed char)(*ip);
10876 target = ip + 4 + (gint)(read32 (ip));
10880 g_assert_not_reached ();
10884 * We need to link both bblocks, since it is needed for handling stack
10885 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10886 * Branching to only one of them would lead to inconsistencies, so
10887 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10889 GET_BBLOCK (cfg, true_bb, target);
10890 GET_BBLOCK (cfg, false_bb, ip);
10892 mono_link_bblock (cfg, cfg->cbb, true_bb);
10893 mono_link_bblock (cfg, cfg->cbb, false_bb);
10895 if (sp != stack_start) {
10896 handle_stack_args (cfg, stack_start, sp - stack_start);
10898 CHECK_UNVERIFIABLE (cfg);
10901 if (COMPILE_LLVM (cfg)) {
10902 dreg = alloc_ireg (cfg);
10903 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10906 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10908 /* The JIT can't eliminate the iconst+compare */
10909 MONO_INST_NEW (cfg, ins, OP_BR);
10910 ins->inst_target_bb = is_true ? true_bb : false_bb;
10911 MONO_ADD_INS (cfg->cbb, ins);
10914 start_new_bblock = 1;
10918 *sp++ = handle_box (cfg, val, klass, context_used);
10920 CHECK_CFG_EXCEPTION;
10929 token = read32 (ip + 1);
10930 klass = mini_get_class (method, token, generic_context);
10931 CHECK_TYPELOAD (klass);
10933 mono_save_token_info (cfg, image, token, klass);
10935 context_used = mini_class_check_context_used (cfg, klass);
10937 if (mono_class_is_nullable (klass)) {
10940 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10941 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10945 ins = handle_unbox (cfg, klass, sp, context_used);
10958 MonoClassField *field;
10959 #ifndef DISABLE_REMOTING
10963 gboolean is_instance;
10965 gpointer addr = NULL;
10966 gboolean is_special_static;
10968 MonoInst *store_val = NULL;
10969 MonoInst *thread_ins;
10972 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10974 if (op == CEE_STFLD) {
10977 store_val = sp [1];
10982 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10984 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10987 if (op == CEE_STSFLD) {
10990 store_val = sp [0];
10995 token = read32 (ip + 1);
10996 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10997 field = mono_method_get_wrapper_data (method, token);
10998 klass = field->parent;
11001 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11004 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11005 FIELD_ACCESS_FAILURE (method, field);
11006 mono_class_init (klass);
11008 /* if the class is Critical then transparent code cannot access it's fields */
11009 if (!is_instance && mono_security_core_clr_enabled ())
11010 ensure_method_is_allowed_to_access_field (cfg, method, field);
11012 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11013 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11014 if (mono_security_core_clr_enabled ())
11015 ensure_method_is_allowed_to_access_field (cfg, method, field);
11018 ftype = mono_field_get_type (field);
11021 * LDFLD etc. is usable on static fields as well, so convert those cases to
11024 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11036 g_assert_not_reached ();
11038 is_instance = FALSE;
11041 context_used = mini_class_check_context_used (cfg, klass);
11043 /* INSTANCE CASE */
11045 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11046 if (op == CEE_STFLD) {
11047 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11049 #ifndef DISABLE_REMOTING
11050 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11051 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11052 MonoInst *iargs [5];
11054 GSHAREDVT_FAILURE (op);
11056 iargs [0] = sp [0];
11057 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11058 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11059 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11061 iargs [4] = sp [1];
11063 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11064 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11065 iargs, ip, cfg->real_offset, TRUE);
11066 CHECK_CFG_EXCEPTION;
11067 g_assert (costs > 0);
11069 cfg->real_offset += 5;
11071 inline_costs += costs;
11073 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11080 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11082 if (mini_is_gsharedvt_klass (klass)) {
11083 MonoInst *offset_ins;
11085 context_used = mini_class_check_context_used (cfg, klass);
11087 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11088 /* The value is offset by 1 */
11089 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11090 dreg = alloc_ireg_mp (cfg);
11091 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11092 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11093 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11095 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11097 if (sp [0]->opcode != OP_LDADDR)
11098 store->flags |= MONO_INST_FAULT;
11100 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11101 /* insert call to write barrier */
11105 dreg = alloc_ireg_mp (cfg);
11106 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11107 emit_write_barrier (cfg, ptr, sp [1]);
11110 store->flags |= ins_flag;
11117 #ifndef DISABLE_REMOTING
11118 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11119 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11120 MonoInst *iargs [4];
11122 GSHAREDVT_FAILURE (op);
11124 iargs [0] = sp [0];
11125 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11126 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11127 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11128 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11129 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11130 iargs, ip, cfg->real_offset, TRUE);
11131 CHECK_CFG_EXCEPTION;
11132 g_assert (costs > 0);
11134 cfg->real_offset += 5;
11138 inline_costs += costs;
11140 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11146 if (sp [0]->type == STACK_VTYPE) {
11149 /* Have to compute the address of the variable */
11151 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11153 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11155 g_assert (var->klass == klass);
11157 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11161 if (op == CEE_LDFLDA) {
11162 if (sp [0]->type == STACK_OBJ) {
11163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11164 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11167 dreg = alloc_ireg_mp (cfg);
11169 if (mini_is_gsharedvt_klass (klass)) {
11170 MonoInst *offset_ins;
11172 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11173 /* The value is offset by 1 */
11174 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11175 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11177 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11179 ins->klass = mono_class_from_mono_type (field->type);
11180 ins->type = STACK_MP;
11185 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11187 if (mini_is_gsharedvt_klass (klass)) {
11188 MonoInst *offset_ins;
11190 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11191 /* The value is offset by 1 */
11192 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11193 dreg = alloc_ireg_mp (cfg);
11194 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11195 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11197 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11199 load->flags |= ins_flag;
11200 if (sp [0]->opcode != OP_LDADDR)
11201 load->flags |= MONO_INST_FAULT;
11213 context_used = mini_class_check_context_used (cfg, klass);
11215 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11218 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11219 * to be called here.
11221 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11222 mono_class_vtable (cfg->domain, klass);
11223 CHECK_TYPELOAD (klass);
11225 mono_domain_lock (cfg->domain);
11226 if (cfg->domain->special_static_fields)
11227 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11228 mono_domain_unlock (cfg->domain);
11230 is_special_static = mono_class_field_is_special_static (field);
11232 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11233 thread_ins = mono_get_thread_intrinsic (cfg);
11237 /* Generate IR to compute the field address */
11238 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11240 * Fast access to TLS data
11241 * Inline version of get_thread_static_data () in
11245 int idx, static_data_reg, array_reg, dreg;
11247 GSHAREDVT_FAILURE (op);
11249 MONO_ADD_INS (cfg->cbb, thread_ins);
11250 static_data_reg = alloc_ireg (cfg);
11251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11253 if (cfg->compile_aot) {
11254 int offset_reg, offset2_reg, idx_reg;
11256 /* For TLS variables, this will return the TLS offset */
11257 EMIT_NEW_SFLDACONST (cfg, ins, field);
11258 offset_reg = ins->dreg;
11259 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11260 idx_reg = alloc_ireg (cfg);
11261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11263 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11264 array_reg = alloc_ireg (cfg);
11265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11266 offset2_reg = alloc_ireg (cfg);
11267 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11268 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11269 dreg = alloc_ireg (cfg);
11270 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11272 offset = (gsize)addr & 0x7fffffff;
11273 idx = offset & 0x3f;
11275 array_reg = alloc_ireg (cfg);
11276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11277 dreg = alloc_ireg (cfg);
11278 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11280 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11281 (cfg->compile_aot && is_special_static) ||
11282 (context_used && is_special_static)) {
11283 MonoInst *iargs [2];
11285 g_assert (field->parent);
11286 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11287 if (context_used) {
11288 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11289 field, MONO_RGCTX_INFO_CLASS_FIELD);
11291 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11293 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11294 } else if (context_used) {
11295 MonoInst *static_data;
11298 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11299 method->klass->name_space, method->klass->name, method->name,
11300 depth, field->offset);
11303 if (mono_class_needs_cctor_run (klass, method))
11304 emit_class_init (cfg, klass);
11307 * The pointer we're computing here is
11309 * super_info.static_data + field->offset
11311 static_data = emit_get_rgctx_klass (cfg, context_used,
11312 klass, MONO_RGCTX_INFO_STATIC_DATA);
11314 if (mini_is_gsharedvt_klass (klass)) {
11315 MonoInst *offset_ins;
11317 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11318 /* The value is offset by 1 */
11319 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11320 dreg = alloc_ireg_mp (cfg);
11321 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11322 } else if (field->offset == 0) {
11325 int addr_reg = mono_alloc_preg (cfg);
11326 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11328 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11329 MonoInst *iargs [2];
11331 g_assert (field->parent);
11332 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11333 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11334 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11336 MonoVTable *vtable = NULL;
11338 if (!cfg->compile_aot)
11339 vtable = mono_class_vtable (cfg->domain, klass);
11340 CHECK_TYPELOAD (klass);
11343 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11344 if (!(g_slist_find (class_inits, klass))) {
11345 emit_class_init (cfg, klass);
11346 if (cfg->verbose_level > 2)
11347 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11348 class_inits = g_slist_prepend (class_inits, klass);
11351 if (cfg->run_cctors) {
11353 /* This makes so that inline cannot trigger */
11354 /* .cctors: too many apps depend on them */
11355 /* running with a specific order... */
11357 if (! vtable->initialized)
11358 INLINE_FAILURE ("class init");
11359 ex = mono_runtime_class_init_full (vtable, FALSE);
11361 set_exception_object (cfg, ex);
11362 goto exception_exit;
11366 if (cfg->compile_aot)
11367 EMIT_NEW_SFLDACONST (cfg, ins, field);
11370 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11372 EMIT_NEW_PCONST (cfg, ins, addr);
11375 MonoInst *iargs [1];
11376 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11377 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11381 /* Generate IR to do the actual load/store operation */
11383 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11384 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11385 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11388 if (op == CEE_LDSFLDA) {
11389 ins->klass = mono_class_from_mono_type (ftype);
11390 ins->type = STACK_PTR;
11392 } else if (op == CEE_STSFLD) {
11395 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11396 store->flags |= ins_flag;
11398 gboolean is_const = FALSE;
11399 MonoVTable *vtable = NULL;
11400 gpointer addr = NULL;
11402 if (!context_used) {
11403 vtable = mono_class_vtable (cfg->domain, klass);
11404 CHECK_TYPELOAD (klass);
11406 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11407 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11408 int ro_type = ftype->type;
11410 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11411 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11412 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11415 GSHAREDVT_FAILURE (op);
11417 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11420 case MONO_TYPE_BOOLEAN:
11422 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11426 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11429 case MONO_TYPE_CHAR:
11431 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11435 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11440 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11444 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11449 case MONO_TYPE_PTR:
11450 case MONO_TYPE_FNPTR:
11451 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11452 type_to_eval_stack_type ((cfg), field->type, *sp);
11455 case MONO_TYPE_STRING:
11456 case MONO_TYPE_OBJECT:
11457 case MONO_TYPE_CLASS:
11458 case MONO_TYPE_SZARRAY:
11459 case MONO_TYPE_ARRAY:
11460 if (!mono_gc_is_moving ()) {
11461 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11462 type_to_eval_stack_type ((cfg), field->type, *sp);
11470 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11475 case MONO_TYPE_VALUETYPE:
11485 CHECK_STACK_OVF (1);
11487 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11488 load->flags |= ins_flag;
11494 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11495 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11496 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11507 token = read32 (ip + 1);
11508 klass = mini_get_class (method, token, generic_context);
11509 CHECK_TYPELOAD (klass);
11510 if (ins_flag & MONO_INST_VOLATILE) {
11511 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11512 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11514 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11515 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11516 ins->flags |= ins_flag;
11517 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11518 generic_class_is_reference_type (cfg, klass)) {
11519 /* insert call to write barrier */
11520 emit_write_barrier (cfg, sp [0], sp [1]);
11532 const char *data_ptr;
11534 guint32 field_token;
11540 token = read32 (ip + 1);
11542 klass = mini_get_class (method, token, generic_context);
11543 CHECK_TYPELOAD (klass);
11545 context_used = mini_class_check_context_used (cfg, klass);
11547 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11548 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11549 ins->sreg1 = sp [0]->dreg;
11550 ins->type = STACK_I4;
11551 ins->dreg = alloc_ireg (cfg);
11552 MONO_ADD_INS (cfg->cbb, ins);
11553 *sp = mono_decompose_opcode (cfg, ins);
11556 if (context_used) {
11557 MonoInst *args [3];
11558 MonoClass *array_class = mono_array_class_get (klass, 1);
11559 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11561 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11564 args [0] = emit_get_rgctx_klass (cfg, context_used,
11565 array_class, MONO_RGCTX_INFO_VTABLE);
11570 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11572 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11574 if (cfg->opt & MONO_OPT_SHARED) {
11575 /* Decompose now to avoid problems with references to the domainvar */
11576 MonoInst *iargs [3];
11578 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11579 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11580 iargs [2] = sp [0];
11582 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11584 /* Decompose later since it is needed by abcrem */
11585 MonoClass *array_type = mono_array_class_get (klass, 1);
11586 mono_class_vtable (cfg->domain, array_type);
11587 CHECK_TYPELOAD (array_type);
11589 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11590 ins->dreg = alloc_ireg_ref (cfg);
11591 ins->sreg1 = sp [0]->dreg;
11592 ins->inst_newa_class = klass;
11593 ins->type = STACK_OBJ;
11594 ins->klass = array_type;
11595 MONO_ADD_INS (cfg->cbb, ins);
11596 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11597 cfg->cbb->has_array_access = TRUE;
11599 /* Needed so mono_emit_load_get_addr () gets called */
11600 mono_get_got_var (cfg);
11610 * we inline/optimize the initialization sequence if possible.
11611 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11612 * for small sizes open code the memcpy
11613 * ensure the rva field is big enough
11615 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11616 MonoMethod *memcpy_method = get_memcpy_method ();
11617 MonoInst *iargs [3];
11618 int add_reg = alloc_ireg_mp (cfg);
11620 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11621 if (cfg->compile_aot) {
11622 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11624 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11626 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11627 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11636 if (sp [0]->type != STACK_OBJ)
11639 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11640 ins->dreg = alloc_preg (cfg);
11641 ins->sreg1 = sp [0]->dreg;
11642 ins->type = STACK_I4;
11643 /* This flag will be inherited by the decomposition */
11644 ins->flags |= MONO_INST_FAULT;
11645 MONO_ADD_INS (cfg->cbb, ins);
11646 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11647 cfg->cbb->has_array_access = TRUE;
11655 if (sp [0]->type != STACK_OBJ)
11658 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11660 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11661 CHECK_TYPELOAD (klass);
11662 /* we need to make sure that this array is exactly the type it needs
11663 * to be for correctness. the wrappers are lax with their usage
11664 * so we need to ignore them here
11666 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11667 MonoClass *array_class = mono_array_class_get (klass, 1);
11668 mini_emit_check_array_type (cfg, sp [0], array_class);
11669 CHECK_TYPELOAD (array_class);
11673 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11678 case CEE_LDELEM_I1:
11679 case CEE_LDELEM_U1:
11680 case CEE_LDELEM_I2:
11681 case CEE_LDELEM_U2:
11682 case CEE_LDELEM_I4:
11683 case CEE_LDELEM_U4:
11684 case CEE_LDELEM_I8:
11686 case CEE_LDELEM_R4:
11687 case CEE_LDELEM_R8:
11688 case CEE_LDELEM_REF: {
11694 if (*ip == CEE_LDELEM) {
11696 token = read32 (ip + 1);
11697 klass = mini_get_class (method, token, generic_context);
11698 CHECK_TYPELOAD (klass);
11699 mono_class_init (klass);
11702 klass = array_access_to_klass (*ip);
11704 if (sp [0]->type != STACK_OBJ)
11707 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11709 if (mini_is_gsharedvt_variable_klass (klass)) {
11710 // FIXME-VT: OP_ICONST optimization
11711 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11712 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11713 ins->opcode = OP_LOADV_MEMBASE;
11714 } else if (sp [1]->opcode == OP_ICONST) {
11715 int array_reg = sp [0]->dreg;
11716 int index_reg = sp [1]->dreg;
11717 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11719 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11720 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11722 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11726 if (*ip == CEE_LDELEM)
11733 case CEE_STELEM_I1:
11734 case CEE_STELEM_I2:
11735 case CEE_STELEM_I4:
11736 case CEE_STELEM_I8:
11737 case CEE_STELEM_R4:
11738 case CEE_STELEM_R8:
11739 case CEE_STELEM_REF:
11744 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11746 if (*ip == CEE_STELEM) {
11748 token = read32 (ip + 1);
11749 klass = mini_get_class (method, token, generic_context);
11750 CHECK_TYPELOAD (klass);
11751 mono_class_init (klass);
11754 klass = array_access_to_klass (*ip);
11756 if (sp [0]->type != STACK_OBJ)
11759 emit_array_store (cfg, klass, sp, TRUE);
11761 if (*ip == CEE_STELEM)
11768 case CEE_CKFINITE: {
11772 if (cfg->llvm_only) {
11773 MonoInst *iargs [1];
11775 iargs [0] = sp [0];
11776 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11778 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11779 ins->sreg1 = sp [0]->dreg;
11780 ins->dreg = alloc_freg (cfg);
11781 ins->type = STACK_R8;
11782 MONO_ADD_INS (cfg->cbb, ins);
11784 *sp++ = mono_decompose_opcode (cfg, ins);
11790 case CEE_REFANYVAL: {
11791 MonoInst *src_var, *src;
11793 int klass_reg = alloc_preg (cfg);
11794 int dreg = alloc_preg (cfg);
11796 GSHAREDVT_FAILURE (*ip);
11799 MONO_INST_NEW (cfg, ins, *ip);
11802 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11803 CHECK_TYPELOAD (klass);
11805 context_used = mini_class_check_context_used (cfg, klass);
11808 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11810 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11811 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11814 if (context_used) {
11815 MonoInst *klass_ins;
11817 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11818 klass, MONO_RGCTX_INFO_KLASS);
11821 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11822 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11824 mini_emit_class_check (cfg, klass_reg, klass);
11826 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11827 ins->type = STACK_MP;
11828 ins->klass = klass;
11833 case CEE_MKREFANY: {
11834 MonoInst *loc, *addr;
11836 GSHAREDVT_FAILURE (*ip);
11839 MONO_INST_NEW (cfg, ins, *ip);
11842 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11843 CHECK_TYPELOAD (klass);
11845 context_used = mini_class_check_context_used (cfg, klass);
11847 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11848 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11850 if (context_used) {
11851 MonoInst *const_ins;
11852 int type_reg = alloc_preg (cfg);
11854 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11856 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11858 } else if (cfg->compile_aot) {
11859 int const_reg = alloc_preg (cfg);
11860 int type_reg = alloc_preg (cfg);
11862 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11867 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11868 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11872 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11873 ins->type = STACK_VTYPE;
11874 ins->klass = mono_defaults.typed_reference_class;
11879 case CEE_LDTOKEN: {
11881 MonoClass *handle_class;
11883 CHECK_STACK_OVF (1);
11886 n = read32 (ip + 1);
11888 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11889 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11890 handle = mono_method_get_wrapper_data (method, n);
11891 handle_class = mono_method_get_wrapper_data (method, n + 1);
11892 if (handle_class == mono_defaults.typehandle_class)
11893 handle = &((MonoClass*)handle)->byval_arg;
11896 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11901 mono_class_init (handle_class);
11902 if (cfg->gshared) {
11903 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11904 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11905 /* This case handles ldtoken
11906 of an open type, like for
11909 } else if (handle_class == mono_defaults.typehandle_class) {
11910 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11911 } else if (handle_class == mono_defaults.fieldhandle_class)
11912 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11913 else if (handle_class == mono_defaults.methodhandle_class)
11914 context_used = mini_method_check_context_used (cfg, handle);
11916 g_assert_not_reached ();
11919 if ((cfg->opt & MONO_OPT_SHARED) &&
11920 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11921 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11922 MonoInst *addr, *vtvar, *iargs [3];
11923 int method_context_used;
11925 method_context_used = mini_method_check_context_used (cfg, method);
11927 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11929 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11930 EMIT_NEW_ICONST (cfg, iargs [1], n);
11931 if (method_context_used) {
11932 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11933 method, MONO_RGCTX_INFO_METHOD);
11934 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11936 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11937 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11939 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11943 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11945 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11946 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11947 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11948 (cmethod->klass == mono_defaults.systemtype_class) &&
11949 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11950 MonoClass *tclass = mono_class_from_mono_type (handle);
11952 mono_class_init (tclass);
11953 if (context_used) {
11954 ins = emit_get_rgctx_klass (cfg, context_used,
11955 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11956 } else if (cfg->compile_aot) {
11957 if (method->wrapper_type) {
11958 mono_error_init (&error); //got to do it since there are multiple conditionals below
11959 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11960 /* Special case for static synchronized wrappers */
11961 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11963 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11964 /* FIXME: n is not a normal token */
11966 EMIT_NEW_PCONST (cfg, ins, NULL);
11969 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11972 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11974 ins->type = STACK_OBJ;
11975 ins->klass = cmethod->klass;
11978 MonoInst *addr, *vtvar;
11980 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11982 if (context_used) {
11983 if (handle_class == mono_defaults.typehandle_class) {
11984 ins = emit_get_rgctx_klass (cfg, context_used,
11985 mono_class_from_mono_type (handle),
11986 MONO_RGCTX_INFO_TYPE);
11987 } else if (handle_class == mono_defaults.methodhandle_class) {
11988 ins = emit_get_rgctx_method (cfg, context_used,
11989 handle, MONO_RGCTX_INFO_METHOD);
11990 } else if (handle_class == mono_defaults.fieldhandle_class) {
11991 ins = emit_get_rgctx_field (cfg, context_used,
11992 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11994 g_assert_not_reached ();
11996 } else if (cfg->compile_aot) {
11997 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11999 EMIT_NEW_PCONST (cfg, ins, handle);
12001 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12002 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12003 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12013 MONO_INST_NEW (cfg, ins, OP_THROW);
12015 ins->sreg1 = sp [0]->dreg;
12017 cfg->cbb->out_of_line = TRUE;
12018 MONO_ADD_INS (cfg->cbb, ins);
12019 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12020 MONO_ADD_INS (cfg->cbb, ins);
12023 link_bblock (cfg, cfg->cbb, end_bblock);
12024 start_new_bblock = 1;
12025 /* This can complicate code generation for llvm since the return value might not be defined */
12026 if (COMPILE_LLVM (cfg))
12027 INLINE_FAILURE ("throw");
12029 case CEE_ENDFINALLY:
12030 /* mono_save_seq_point_info () depends on this */
12031 if (sp != stack_start)
12032 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12033 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12034 MONO_ADD_INS (cfg->cbb, ins);
12036 start_new_bblock = 1;
12039 * Control will leave the method so empty the stack, otherwise
12040 * the next basic block will start with a nonempty stack.
12042 while (sp != stack_start) {
12047 case CEE_LEAVE_S: {
12050 if (*ip == CEE_LEAVE) {
12052 target = ip + 5 + (gint32)read32(ip + 1);
12055 target = ip + 2 + (signed char)(ip [1]);
12058 /* empty the stack */
12059 while (sp != stack_start) {
12064 * If this leave statement is in a catch block, check for a
12065 * pending exception, and rethrow it if necessary.
12066 * We avoid doing this in runtime invoke wrappers, since those are called
12067 * by native code which excepts the wrapper to catch all exceptions.
12069 for (i = 0; i < header->num_clauses; ++i) {
12070 MonoExceptionClause *clause = &header->clauses [i];
12073 * Use <= in the final comparison to handle clauses with multiple
12074 * leave statements, like in bug #78024.
12075 * The ordering of the exception clauses guarantees that we find the
12076 * innermost clause.
12078 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12080 MonoBasicBlock *dont_throw;
12085 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12088 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12090 NEW_BBLOCK (cfg, dont_throw);
12093 * Currently, we always rethrow the abort exception, despite the
12094 * fact that this is not correct. See thread6.cs for an example.
12095 * But propagating the abort exception is more important than
12096 * getting the sematics right.
12098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12100 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12102 MONO_START_BB (cfg, dont_throw);
12107 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12110 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12112 MonoExceptionClause *clause;
12114 for (tmp = handlers; tmp; tmp = tmp->next) {
12115 clause = tmp->data;
12116 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12118 link_bblock (cfg, cfg->cbb, tblock);
12119 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12120 ins->inst_target_bb = tblock;
12121 ins->inst_eh_block = clause;
12122 MONO_ADD_INS (cfg->cbb, ins);
12123 cfg->cbb->has_call_handler = 1;
12124 if (COMPILE_LLVM (cfg)) {
12125 MonoBasicBlock *target_bb;
12128 * Link the finally bblock with the target, since it will
12129 * conceptually branch there.
12130 * FIXME: Have to link the bblock containing the endfinally.
12132 GET_BBLOCK (cfg, target_bb, target);
12133 link_bblock (cfg, tblock, target_bb);
12136 g_list_free (handlers);
12139 MONO_INST_NEW (cfg, ins, OP_BR);
12140 MONO_ADD_INS (cfg->cbb, ins);
12141 GET_BBLOCK (cfg, tblock, target);
12142 link_bblock (cfg, cfg->cbb, tblock);
12143 ins->inst_target_bb = tblock;
12145 start_new_bblock = 1;
12147 if (*ip == CEE_LEAVE)
12156 * Mono specific opcodes
12158 case MONO_CUSTOM_PREFIX: {
12160 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12164 case CEE_MONO_ICALL: {
12166 MonoJitICallInfo *info;
12168 token = read32 (ip + 2);
12169 func = mono_method_get_wrapper_data (method, token);
12170 info = mono_find_jit_icall_by_addr (func);
12172 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12175 CHECK_STACK (info->sig->param_count);
12176 sp -= info->sig->param_count;
12178 ins = mono_emit_jit_icall (cfg, info->func, sp);
12179 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12183 inline_costs += 10 * num_calls++;
12187 case CEE_MONO_LDPTR_CARD_TABLE:
12188 case CEE_MONO_LDPTR_NURSERY_START:
12189 case CEE_MONO_LDPTR_NURSERY_BITS:
12190 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12191 CHECK_STACK_OVF (1);
12194 case CEE_MONO_LDPTR_CARD_TABLE:
12195 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12197 case CEE_MONO_LDPTR_NURSERY_START:
12198 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12200 case CEE_MONO_LDPTR_NURSERY_BITS:
12201 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12203 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12204 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12210 inline_costs += 10 * num_calls++;
12213 case CEE_MONO_LDPTR: {
12216 CHECK_STACK_OVF (1);
12218 token = read32 (ip + 2);
12220 ptr = mono_method_get_wrapper_data (method, token);
12221 EMIT_NEW_PCONST (cfg, ins, ptr);
12224 inline_costs += 10 * num_calls++;
12225 /* Can't embed random pointers into AOT code */
12229 case CEE_MONO_JIT_ICALL_ADDR: {
12230 MonoJitICallInfo *callinfo;
12233 CHECK_STACK_OVF (1);
12235 token = read32 (ip + 2);
12237 ptr = mono_method_get_wrapper_data (method, token);
12238 callinfo = mono_find_jit_icall_by_addr (ptr);
12239 g_assert (callinfo);
12240 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12243 inline_costs += 10 * num_calls++;
12246 case CEE_MONO_ICALL_ADDR: {
12247 MonoMethod *cmethod;
12250 CHECK_STACK_OVF (1);
12252 token = read32 (ip + 2);
12254 cmethod = mono_method_get_wrapper_data (method, token);
12256 if (cfg->compile_aot) {
12257 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12259 ptr = mono_lookup_internal_call (cmethod);
12261 EMIT_NEW_PCONST (cfg, ins, ptr);
12267 case CEE_MONO_VTADDR: {
12268 MonoInst *src_var, *src;
12274 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12275 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12280 case CEE_MONO_NEWOBJ: {
12281 MonoInst *iargs [2];
12283 CHECK_STACK_OVF (1);
12285 token = read32 (ip + 2);
12286 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12287 mono_class_init (klass);
12288 NEW_DOMAINCONST (cfg, iargs [0]);
12289 MONO_ADD_INS (cfg->cbb, iargs [0]);
12290 NEW_CLASSCONST (cfg, iargs [1], klass);
12291 MONO_ADD_INS (cfg->cbb, iargs [1]);
12292 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12294 inline_costs += 10 * num_calls++;
12297 case CEE_MONO_OBJADDR:
12300 MONO_INST_NEW (cfg, ins, OP_MOVE);
12301 ins->dreg = alloc_ireg_mp (cfg);
12302 ins->sreg1 = sp [0]->dreg;
12303 ins->type = STACK_MP;
12304 MONO_ADD_INS (cfg->cbb, ins);
12308 case CEE_MONO_LDNATIVEOBJ:
12310 * Similar to LDOBJ, but instead load the unmanaged
12311 * representation of the vtype to the stack.
12316 token = read32 (ip + 2);
12317 klass = mono_method_get_wrapper_data (method, token);
12318 g_assert (klass->valuetype);
12319 mono_class_init (klass);
12322 MonoInst *src, *dest, *temp;
12325 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12326 temp->backend.is_pinvoke = 1;
12327 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12328 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12330 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12331 dest->type = STACK_VTYPE;
12332 dest->klass = klass;
12338 case CEE_MONO_RETOBJ: {
12340 * Same as RET, but return the native representation of a vtype
12343 g_assert (cfg->ret);
12344 g_assert (mono_method_signature (method)->pinvoke);
12349 token = read32 (ip + 2);
12350 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12352 if (!cfg->vret_addr) {
12353 g_assert (cfg->ret_var_is_local);
12355 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12357 EMIT_NEW_RETLOADA (cfg, ins);
12359 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12361 if (sp != stack_start)
12364 MONO_INST_NEW (cfg, ins, OP_BR);
12365 ins->inst_target_bb = end_bblock;
12366 MONO_ADD_INS (cfg->cbb, ins);
12367 link_bblock (cfg, cfg->cbb, end_bblock);
12368 start_new_bblock = 1;
12372 case CEE_MONO_CISINST:
12373 case CEE_MONO_CCASTCLASS: {
12378 token = read32 (ip + 2);
12379 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12380 if (ip [1] == CEE_MONO_CISINST)
12381 ins = handle_cisinst (cfg, klass, sp [0]);
12383 ins = handle_ccastclass (cfg, klass, sp [0]);
12388 case CEE_MONO_SAVE_LMF:
12389 case CEE_MONO_RESTORE_LMF:
12392 case CEE_MONO_CLASSCONST:
12393 CHECK_STACK_OVF (1);
12395 token = read32 (ip + 2);
12396 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12399 inline_costs += 10 * num_calls++;
12401 case CEE_MONO_NOT_TAKEN:
12402 cfg->cbb->out_of_line = TRUE;
12405 case CEE_MONO_TLS: {
12408 CHECK_STACK_OVF (1);
12410 key = (gint32)read32 (ip + 2);
12411 g_assert (key < TLS_KEY_NUM);
12413 ins = mono_create_tls_get (cfg, key);
12415 if (cfg->compile_aot) {
12417 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12418 ins->dreg = alloc_preg (cfg);
12419 ins->type = STACK_PTR;
12421 g_assert_not_reached ();
12424 ins->type = STACK_PTR;
12425 MONO_ADD_INS (cfg->cbb, ins);
12430 case CEE_MONO_DYN_CALL: {
12431 MonoCallInst *call;
12433 /* It would be easier to call a trampoline, but that would put an
12434 * extra frame on the stack, confusing exception handling. So
12435 * implement it inline using an opcode for now.
12438 if (!cfg->dyn_call_var) {
12439 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12440 /* prevent it from being register allocated */
12441 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12444 /* Has to use a call inst since it local regalloc expects it */
12445 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12446 ins = (MonoInst*)call;
12448 ins->sreg1 = sp [0]->dreg;
12449 ins->sreg2 = sp [1]->dreg;
12450 MONO_ADD_INS (cfg->cbb, ins);
12452 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12455 inline_costs += 10 * num_calls++;
12459 case CEE_MONO_MEMORY_BARRIER: {
12461 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12465 case CEE_MONO_JIT_ATTACH: {
12466 MonoInst *args [16], *domain_ins;
12467 MonoInst *ad_ins, *jit_tls_ins;
12468 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12470 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12472 EMIT_NEW_PCONST (cfg, ins, NULL);
12473 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12475 ad_ins = mono_get_domain_intrinsic (cfg);
12476 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12478 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12479 NEW_BBLOCK (cfg, next_bb);
12480 NEW_BBLOCK (cfg, call_bb);
12482 if (cfg->compile_aot) {
12483 /* AOT code is only used in the root domain */
12484 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12486 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12488 MONO_ADD_INS (cfg->cbb, ad_ins);
12489 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12492 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12497 MONO_START_BB (cfg, call_bb);
12500 if (cfg->compile_aot) {
12501 /* AOT code is only used in the root domain */
12502 EMIT_NEW_PCONST (cfg, args [0], NULL);
12504 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12506 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12507 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12510 MONO_START_BB (cfg, next_bb);
12514 case CEE_MONO_JIT_DETACH: {
12515 MonoInst *args [16];
12517 /* Restore the original domain */
12518 dreg = alloc_ireg (cfg);
12519 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12520 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12525 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12531 case CEE_PREFIX1: {
12534 case CEE_ARGLIST: {
12535 /* somewhat similar to LDTOKEN */
12536 MonoInst *addr, *vtvar;
12537 CHECK_STACK_OVF (1);
12538 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12540 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12541 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12543 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12544 ins->type = STACK_VTYPE;
12545 ins->klass = mono_defaults.argumenthandle_class;
12555 MonoInst *cmp, *arg1, *arg2;
12563 * The following transforms:
12564 * CEE_CEQ into OP_CEQ
12565 * CEE_CGT into OP_CGT
12566 * CEE_CGT_UN into OP_CGT_UN
12567 * CEE_CLT into OP_CLT
12568 * CEE_CLT_UN into OP_CLT_UN
12570 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12572 MONO_INST_NEW (cfg, ins, cmp->opcode);
12573 cmp->sreg1 = arg1->dreg;
12574 cmp->sreg2 = arg2->dreg;
12575 type_from_op (cfg, cmp, arg1, arg2);
12577 add_widen_op (cfg, cmp, &arg1, &arg2);
12578 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12579 cmp->opcode = OP_LCOMPARE;
12580 else if (arg1->type == STACK_R4)
12581 cmp->opcode = OP_RCOMPARE;
12582 else if (arg1->type == STACK_R8)
12583 cmp->opcode = OP_FCOMPARE;
12585 cmp->opcode = OP_ICOMPARE;
12586 MONO_ADD_INS (cfg->cbb, cmp);
12587 ins->type = STACK_I4;
12588 ins->dreg = alloc_dreg (cfg, ins->type);
12589 type_from_op (cfg, ins, arg1, arg2);
12591 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12593 * The backends expect the fceq opcodes to do the
12596 ins->sreg1 = cmp->sreg1;
12597 ins->sreg2 = cmp->sreg2;
12600 MONO_ADD_INS (cfg->cbb, ins);
12606 MonoInst *argconst;
12607 MonoMethod *cil_method;
12609 CHECK_STACK_OVF (1);
12611 n = read32 (ip + 2);
12612 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12613 if (!cmethod || mono_loader_get_last_error ())
12615 mono_class_init (cmethod->klass);
12617 mono_save_token_info (cfg, image, n, cmethod);
12619 context_used = mini_method_check_context_used (cfg, cmethod);
12621 cil_method = cmethod;
12622 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12623 METHOD_ACCESS_FAILURE (method, cil_method);
12625 if (mono_security_core_clr_enabled ())
12626 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12629 * Optimize the common case of ldftn+delegate creation
12631 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12632 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12633 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12634 MonoInst *target_ins, *handle_ins;
12635 MonoMethod *invoke;
12636 int invoke_context_used;
12638 invoke = mono_get_delegate_invoke (ctor_method->klass);
12639 if (!invoke || !mono_method_signature (invoke))
12642 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12644 target_ins = sp [-1];
12646 if (mono_security_core_clr_enabled ())
12647 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12649 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12650 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12651 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12653 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12657 /* FIXME: SGEN support */
12658 if (invoke_context_used == 0) {
12660 if (cfg->verbose_level > 3)
12661 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12662 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12665 CHECK_CFG_EXCEPTION;
12675 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12676 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12680 inline_costs += 10 * num_calls++;
12683 case CEE_LDVIRTFTN: {
12684 MonoInst *args [2];
12688 n = read32 (ip + 2);
12689 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12690 if (!cmethod || mono_loader_get_last_error ())
12692 mono_class_init (cmethod->klass);
12694 context_used = mini_method_check_context_used (cfg, cmethod);
12696 if (mono_security_core_clr_enabled ())
12697 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12700 * Optimize the common case of ldvirtftn+delegate creation
12702 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12703 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12704 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12705 MonoInst *target_ins, *handle_ins;
12706 MonoMethod *invoke;
12707 int invoke_context_used;
12708 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12710 invoke = mono_get_delegate_invoke (ctor_method->klass);
12711 if (!invoke || !mono_method_signature (invoke))
12714 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12716 target_ins = sp [-1];
12718 if (mono_security_core_clr_enabled ())
12719 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12721 /* FIXME: SGEN support */
12722 if (invoke_context_used == 0 || cfg->llvm_only) {
12724 if (cfg->verbose_level > 3)
12725 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12726 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12729 CHECK_CFG_EXCEPTION;
12742 args [1] = emit_get_rgctx_method (cfg, context_used,
12743 cmethod, MONO_RGCTX_INFO_METHOD);
12746 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12748 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12751 inline_costs += 10 * num_calls++;
12755 CHECK_STACK_OVF (1);
12757 n = read16 (ip + 2);
12759 EMIT_NEW_ARGLOAD (cfg, ins, n);
12764 CHECK_STACK_OVF (1);
12766 n = read16 (ip + 2);
12768 NEW_ARGLOADA (cfg, ins, n);
12769 MONO_ADD_INS (cfg->cbb, ins);
12777 n = read16 (ip + 2);
12779 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12781 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12785 CHECK_STACK_OVF (1);
12787 n = read16 (ip + 2);
12789 EMIT_NEW_LOCLOAD (cfg, ins, n);
12794 unsigned char *tmp_ip;
12795 CHECK_STACK_OVF (1);
12797 n = read16 (ip + 2);
12800 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12806 EMIT_NEW_LOCLOADA (cfg, ins, n);
12815 n = read16 (ip + 2);
12817 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12819 emit_stloc_ir (cfg, sp, header, n);
12826 if (sp != stack_start)
12828 if (cfg->method != method)
12830 * Inlining this into a loop in a parent could lead to
12831 * stack overflows which is different behavior than the
12832 * non-inlined case, thus disable inlining in this case.
12834 INLINE_FAILURE("localloc");
12836 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12837 ins->dreg = alloc_preg (cfg);
12838 ins->sreg1 = sp [0]->dreg;
12839 ins->type = STACK_PTR;
12840 MONO_ADD_INS (cfg->cbb, ins);
12842 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12844 ins->flags |= MONO_INST_INIT;
12849 case CEE_ENDFILTER: {
12850 MonoExceptionClause *clause, *nearest;
12855 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12857 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12858 ins->sreg1 = (*sp)->dreg;
12859 MONO_ADD_INS (cfg->cbb, ins);
12860 start_new_bblock = 1;
12864 for (cc = 0; cc < header->num_clauses; ++cc) {
12865 clause = &header->clauses [cc];
12866 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12867 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12868 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12871 g_assert (nearest);
12872 if ((ip - header->code) != nearest->handler_offset)
12877 case CEE_UNALIGNED_:
12878 ins_flag |= MONO_INST_UNALIGNED;
12879 /* FIXME: record alignment? we can assume 1 for now */
12883 case CEE_VOLATILE_:
12884 ins_flag |= MONO_INST_VOLATILE;
12888 ins_flag |= MONO_INST_TAILCALL;
12889 cfg->flags |= MONO_CFG_HAS_TAIL;
12890 /* Can't inline tail calls at this time */
12891 inline_costs += 100000;
12898 token = read32 (ip + 2);
12899 klass = mini_get_class (method, token, generic_context);
12900 CHECK_TYPELOAD (klass);
12901 if (generic_class_is_reference_type (cfg, klass))
12902 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12904 mini_emit_initobj (cfg, *sp, NULL, klass);
12908 case CEE_CONSTRAINED_:
12910 token = read32 (ip + 2);
12911 constrained_class = mini_get_class (method, token, generic_context);
12912 CHECK_TYPELOAD (constrained_class);
12916 case CEE_INITBLK: {
12917 MonoInst *iargs [3];
12921 /* Skip optimized paths for volatile operations. */
12922 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12923 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12924 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12925 /* emit_memset only works when val == 0 */
12926 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12929 iargs [0] = sp [0];
12930 iargs [1] = sp [1];
12931 iargs [2] = sp [2];
12932 if (ip [1] == CEE_CPBLK) {
12934 * FIXME: It's unclear whether we should be emitting both the acquire
12935 * and release barriers for cpblk. It is technically both a load and
12936 * store operation, so it seems like that's the sensible thing to do.
12938 * FIXME: We emit full barriers on both sides of the operation for
12939 * simplicity. We should have a separate atomic memcpy method instead.
12941 MonoMethod *memcpy_method = get_memcpy_method ();
12943 if (ins_flag & MONO_INST_VOLATILE)
12944 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12946 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12947 call->flags |= ins_flag;
12949 if (ins_flag & MONO_INST_VOLATILE)
12950 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12952 MonoMethod *memset_method = get_memset_method ();
12953 if (ins_flag & MONO_INST_VOLATILE) {
12954 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12955 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12957 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12958 call->flags |= ins_flag;
12969 ins_flag |= MONO_INST_NOTYPECHECK;
12971 ins_flag |= MONO_INST_NORANGECHECK;
12972 /* we ignore the no-nullcheck for now since we
12973 * really do it explicitly only when doing callvirt->call
12977 case CEE_RETHROW: {
12979 int handler_offset = -1;
12981 for (i = 0; i < header->num_clauses; ++i) {
12982 MonoExceptionClause *clause = &header->clauses [i];
12983 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12984 handler_offset = clause->handler_offset;
12989 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12991 if (handler_offset == -1)
12994 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12995 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12996 ins->sreg1 = load->dreg;
12997 MONO_ADD_INS (cfg->cbb, ins);
12999 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13000 MONO_ADD_INS (cfg->cbb, ins);
13003 link_bblock (cfg, cfg->cbb, end_bblock);
13004 start_new_bblock = 1;
13012 CHECK_STACK_OVF (1);
13014 token = read32 (ip + 2);
13015 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13016 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13019 val = mono_type_size (type, &ialign);
13021 MonoClass *klass = mini_get_class (method, token, generic_context);
13022 CHECK_TYPELOAD (klass);
13024 val = mono_type_size (&klass->byval_arg, &ialign);
13026 if (mini_is_gsharedvt_klass (klass))
13027 GSHAREDVT_FAILURE (*ip);
13029 EMIT_NEW_ICONST (cfg, ins, val);
13034 case CEE_REFANYTYPE: {
13035 MonoInst *src_var, *src;
13037 GSHAREDVT_FAILURE (*ip);
13043 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13045 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13046 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13047 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13052 case CEE_READONLY_:
13065 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13075 g_warning ("opcode 0x%02x not handled", *ip);
13079 if (start_new_bblock != 1)
13082 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13083 if (cfg->cbb->next_bb) {
13084 /* This could already be set because of inlining, #693905 */
13085 MonoBasicBlock *bb = cfg->cbb;
13087 while (bb->next_bb)
13089 bb->next_bb = end_bblock;
13091 cfg->cbb->next_bb = end_bblock;
13094 if (cfg->method == method && cfg->domainvar) {
13096 MonoInst *get_domain;
13098 cfg->cbb = init_localsbb;
13100 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13101 MONO_ADD_INS (cfg->cbb, get_domain);
13103 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13105 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13106 MONO_ADD_INS (cfg->cbb, store);
13109 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13110 if (cfg->compile_aot)
13111 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13112 mono_get_got_var (cfg);
13115 if (cfg->method == method && cfg->got_var)
13116 mono_emit_load_got_addr (cfg);
13118 if (init_localsbb) {
13119 cfg->cbb = init_localsbb;
13121 for (i = 0; i < header->num_locals; ++i) {
13122 emit_init_local (cfg, i, header->locals [i], init_locals);
13126 if (cfg->init_ref_vars && cfg->method == method) {
13127 /* Emit initialization for ref vars */
13128 // FIXME: Avoid duplication initialization for IL locals.
13129 for (i = 0; i < cfg->num_varinfo; ++i) {
13130 MonoInst *ins = cfg->varinfo [i];
13132 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13133 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13137 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13138 cfg->cbb = init_localsbb;
13139 emit_push_lmf (cfg);
13142 cfg->cbb = init_localsbb;
13143 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13146 MonoBasicBlock *bb;
13149 * Make seq points at backward branch targets interruptable.
13151 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13152 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13153 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13156 /* Add a sequence point for method entry/exit events */
13157 if (seq_points && cfg->gen_sdb_seq_points) {
13158 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13159 MONO_ADD_INS (init_localsbb, ins);
13160 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13161 MONO_ADD_INS (cfg->bb_exit, ins);
13165 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13166 * the code they refer to was dead (#11880).
13168 if (sym_seq_points) {
13169 for (i = 0; i < header->code_size; ++i) {
13170 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13173 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13174 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13181 if (cfg->method == method) {
13182 MonoBasicBlock *bb;
13183 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13184 bb->region = mono_find_block_region (cfg, bb->real_offset);
13186 mono_create_spvar_for_region (cfg, bb->region);
13187 if (cfg->verbose_level > 2)
13188 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13192 if (inline_costs < 0) {
13195 /* Method is too large */
13196 mname = mono_method_full_name (method, TRUE);
13197 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13198 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13202 if ((cfg->verbose_level > 2) && (cfg->method == method))
13203 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13208 g_assert (!mono_error_ok (&cfg->error));
13212 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13216 set_exception_type_from_invalid_il (cfg, method, ip);
13220 g_slist_free (class_inits);
13221 mono_basic_block_free (original_bb);
13222 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13223 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13224 if (cfg->exception_type)
13227 return inline_costs;
13231 store_membase_reg_to_store_membase_imm (int opcode)
13234 case OP_STORE_MEMBASE_REG:
13235 return OP_STORE_MEMBASE_IMM;
13236 case OP_STOREI1_MEMBASE_REG:
13237 return OP_STOREI1_MEMBASE_IMM;
13238 case OP_STOREI2_MEMBASE_REG:
13239 return OP_STOREI2_MEMBASE_IMM;
13240 case OP_STOREI4_MEMBASE_REG:
13241 return OP_STOREI4_MEMBASE_IMM;
13242 case OP_STOREI8_MEMBASE_REG:
13243 return OP_STOREI8_MEMBASE_IMM;
13245 g_assert_not_reached ();
13252 mono_op_to_op_imm (int opcode)
13256 return OP_IADD_IMM;
13258 return OP_ISUB_IMM;
13260 return OP_IDIV_IMM;
13262 return OP_IDIV_UN_IMM;
13264 return OP_IREM_IMM;
13266 return OP_IREM_UN_IMM;
13268 return OP_IMUL_IMM;
13270 return OP_IAND_IMM;
13274 return OP_IXOR_IMM;
13276 return OP_ISHL_IMM;
13278 return OP_ISHR_IMM;
13280 return OP_ISHR_UN_IMM;
13283 return OP_LADD_IMM;
13285 return OP_LSUB_IMM;
13287 return OP_LAND_IMM;
13291 return OP_LXOR_IMM;
13293 return OP_LSHL_IMM;
13295 return OP_LSHR_IMM;
13297 return OP_LSHR_UN_IMM;
13298 #if SIZEOF_REGISTER == 8
13300 return OP_LREM_IMM;
13304 return OP_COMPARE_IMM;
13306 return OP_ICOMPARE_IMM;
13308 return OP_LCOMPARE_IMM;
13310 case OP_STORE_MEMBASE_REG:
13311 return OP_STORE_MEMBASE_IMM;
13312 case OP_STOREI1_MEMBASE_REG:
13313 return OP_STOREI1_MEMBASE_IMM;
13314 case OP_STOREI2_MEMBASE_REG:
13315 return OP_STOREI2_MEMBASE_IMM;
13316 case OP_STOREI4_MEMBASE_REG:
13317 return OP_STOREI4_MEMBASE_IMM;
13319 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13321 return OP_X86_PUSH_IMM;
13322 case OP_X86_COMPARE_MEMBASE_REG:
13323 return OP_X86_COMPARE_MEMBASE_IMM;
13325 #if defined(TARGET_AMD64)
13326 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13327 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13329 case OP_VOIDCALL_REG:
13330 return OP_VOIDCALL;
13338 return OP_LOCALLOC_IMM;
13345 ldind_to_load_membase (int opcode)
13349 return OP_LOADI1_MEMBASE;
13351 return OP_LOADU1_MEMBASE;
13353 return OP_LOADI2_MEMBASE;
13355 return OP_LOADU2_MEMBASE;
13357 return OP_LOADI4_MEMBASE;
13359 return OP_LOADU4_MEMBASE;
13361 return OP_LOAD_MEMBASE;
13362 case CEE_LDIND_REF:
13363 return OP_LOAD_MEMBASE;
13365 return OP_LOADI8_MEMBASE;
13367 return OP_LOADR4_MEMBASE;
13369 return OP_LOADR8_MEMBASE;
13371 g_assert_not_reached ();
13378 stind_to_store_membase (int opcode)
13382 return OP_STOREI1_MEMBASE_REG;
13384 return OP_STOREI2_MEMBASE_REG;
13386 return OP_STOREI4_MEMBASE_REG;
13388 case CEE_STIND_REF:
13389 return OP_STORE_MEMBASE_REG;
13391 return OP_STOREI8_MEMBASE_REG;
13393 return OP_STORER4_MEMBASE_REG;
13395 return OP_STORER8_MEMBASE_REG;
13397 g_assert_not_reached ();
13404 mono_load_membase_to_load_mem (int opcode)
13406 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13407 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13409 case OP_LOAD_MEMBASE:
13410 return OP_LOAD_MEM;
13411 case OP_LOADU1_MEMBASE:
13412 return OP_LOADU1_MEM;
13413 case OP_LOADU2_MEMBASE:
13414 return OP_LOADU2_MEM;
13415 case OP_LOADI4_MEMBASE:
13416 return OP_LOADI4_MEM;
13417 case OP_LOADU4_MEMBASE:
13418 return OP_LOADU4_MEM;
13419 #if SIZEOF_REGISTER == 8
13420 case OP_LOADI8_MEMBASE:
13421 return OP_LOADI8_MEM;
13430 op_to_op_dest_membase (int store_opcode, int opcode)
13432 #if defined(TARGET_X86)
13433 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13438 return OP_X86_ADD_MEMBASE_REG;
13440 return OP_X86_SUB_MEMBASE_REG;
13442 return OP_X86_AND_MEMBASE_REG;
13444 return OP_X86_OR_MEMBASE_REG;
13446 return OP_X86_XOR_MEMBASE_REG;
13449 return OP_X86_ADD_MEMBASE_IMM;
13452 return OP_X86_SUB_MEMBASE_IMM;
13455 return OP_X86_AND_MEMBASE_IMM;
13458 return OP_X86_OR_MEMBASE_IMM;
13461 return OP_X86_XOR_MEMBASE_IMM;
13467 #if defined(TARGET_AMD64)
13468 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13473 return OP_X86_ADD_MEMBASE_REG;
13475 return OP_X86_SUB_MEMBASE_REG;
13477 return OP_X86_AND_MEMBASE_REG;
13479 return OP_X86_OR_MEMBASE_REG;
13481 return OP_X86_XOR_MEMBASE_REG;
13483 return OP_X86_ADD_MEMBASE_IMM;
13485 return OP_X86_SUB_MEMBASE_IMM;
13487 return OP_X86_AND_MEMBASE_IMM;
13489 return OP_X86_OR_MEMBASE_IMM;
13491 return OP_X86_XOR_MEMBASE_IMM;
13493 return OP_AMD64_ADD_MEMBASE_REG;
13495 return OP_AMD64_SUB_MEMBASE_REG;
13497 return OP_AMD64_AND_MEMBASE_REG;
13499 return OP_AMD64_OR_MEMBASE_REG;
13501 return OP_AMD64_XOR_MEMBASE_REG;
13504 return OP_AMD64_ADD_MEMBASE_IMM;
13507 return OP_AMD64_SUB_MEMBASE_IMM;
13510 return OP_AMD64_AND_MEMBASE_IMM;
13513 return OP_AMD64_OR_MEMBASE_IMM;
13516 return OP_AMD64_XOR_MEMBASE_IMM;
13526 op_to_op_store_membase (int store_opcode, int opcode)
13528 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13531 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13532 return OP_X86_SETEQ_MEMBASE;
13534 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13535 return OP_X86_SETNE_MEMBASE;
13543 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13546 /* FIXME: This has sign extension issues */
13548 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13549 return OP_X86_COMPARE_MEMBASE8_IMM;
13552 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13557 return OP_X86_PUSH_MEMBASE;
13558 case OP_COMPARE_IMM:
13559 case OP_ICOMPARE_IMM:
13560 return OP_X86_COMPARE_MEMBASE_IMM;
13563 return OP_X86_COMPARE_MEMBASE_REG;
13567 #ifdef TARGET_AMD64
13568 /* FIXME: This has sign extension issues */
13570 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13571 return OP_X86_COMPARE_MEMBASE8_IMM;
13576 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13577 return OP_X86_PUSH_MEMBASE;
13579 /* FIXME: This only works for 32 bit immediates
13580 case OP_COMPARE_IMM:
13581 case OP_LCOMPARE_IMM:
13582 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13583 return OP_AMD64_COMPARE_MEMBASE_IMM;
13585 case OP_ICOMPARE_IMM:
13586 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13587 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13591 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13592 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13593 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13594 return OP_AMD64_COMPARE_MEMBASE_REG;
13597 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13598 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13607 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13610 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13616 return OP_X86_COMPARE_REG_MEMBASE;
13618 return OP_X86_ADD_REG_MEMBASE;
13620 return OP_X86_SUB_REG_MEMBASE;
13622 return OP_X86_AND_REG_MEMBASE;
13624 return OP_X86_OR_REG_MEMBASE;
13626 return OP_X86_XOR_REG_MEMBASE;
13630 #ifdef TARGET_AMD64
13631 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13634 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13636 return OP_X86_ADD_REG_MEMBASE;
13638 return OP_X86_SUB_REG_MEMBASE;
13640 return OP_X86_AND_REG_MEMBASE;
13642 return OP_X86_OR_REG_MEMBASE;
13644 return OP_X86_XOR_REG_MEMBASE;
13646 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13650 return OP_AMD64_COMPARE_REG_MEMBASE;
13652 return OP_AMD64_ADD_REG_MEMBASE;
13654 return OP_AMD64_SUB_REG_MEMBASE;
13656 return OP_AMD64_AND_REG_MEMBASE;
13658 return OP_AMD64_OR_REG_MEMBASE;
13660 return OP_AMD64_XOR_REG_MEMBASE;
13669 mono_op_to_op_imm_noemul (int opcode)
13672 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13678 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13685 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13690 return mono_op_to_op_imm (opcode);
13695 * mono_handle_global_vregs:
13697 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13701 mono_handle_global_vregs (MonoCompile *cfg)
13703 gint32 *vreg_to_bb;
13704 MonoBasicBlock *bb;
13707 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13709 #ifdef MONO_ARCH_SIMD_INTRINSICS
13710 if (cfg->uses_simd_intrinsics)
13711 mono_simd_simplify_indirection (cfg);
13714 /* Find local vregs used in more than one bb */
13715 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13716 MonoInst *ins = bb->code;
13717 int block_num = bb->block_num;
13719 if (cfg->verbose_level > 2)
13720 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13723 for (; ins; ins = ins->next) {
13724 const char *spec = INS_INFO (ins->opcode);
13725 int regtype = 0, regindex;
13728 if (G_UNLIKELY (cfg->verbose_level > 2))
13729 mono_print_ins (ins);
13731 g_assert (ins->opcode >= MONO_CEE_LAST);
13733 for (regindex = 0; regindex < 4; regindex ++) {
13736 if (regindex == 0) {
13737 regtype = spec [MONO_INST_DEST];
13738 if (regtype == ' ')
13741 } else if (regindex == 1) {
13742 regtype = spec [MONO_INST_SRC1];
13743 if (regtype == ' ')
13746 } else if (regindex == 2) {
13747 regtype = spec [MONO_INST_SRC2];
13748 if (regtype == ' ')
13751 } else if (regindex == 3) {
13752 regtype = spec [MONO_INST_SRC3];
13753 if (regtype == ' ')
13758 #if SIZEOF_REGISTER == 4
13759 /* In the LLVM case, the long opcodes are not decomposed */
13760 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13762 * Since some instructions reference the original long vreg,
13763 * and some reference the two component vregs, it is quite hard
13764 * to determine when it needs to be global. So be conservative.
13766 if (!get_vreg_to_inst (cfg, vreg)) {
13767 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13769 if (cfg->verbose_level > 2)
13770 printf ("LONG VREG R%d made global.\n", vreg);
13774 * Make the component vregs volatile since the optimizations can
13775 * get confused otherwise.
13777 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13778 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13782 g_assert (vreg != -1);
13784 prev_bb = vreg_to_bb [vreg];
13785 if (prev_bb == 0) {
13786 /* 0 is a valid block num */
13787 vreg_to_bb [vreg] = block_num + 1;
13788 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13789 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13792 if (!get_vreg_to_inst (cfg, vreg)) {
13793 if (G_UNLIKELY (cfg->verbose_level > 2))
13794 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13798 if (vreg_is_ref (cfg, vreg))
13799 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13801 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13804 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13807 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13810 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13813 g_assert_not_reached ();
13817 /* Flag as having been used in more than one bb */
13818 vreg_to_bb [vreg] = -1;
13824 /* If a variable is used in only one bblock, convert it into a local vreg */
13825 for (i = 0; i < cfg->num_varinfo; i++) {
13826 MonoInst *var = cfg->varinfo [i];
13827 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13829 switch (var->type) {
13835 #if SIZEOF_REGISTER == 8
13838 #if !defined(TARGET_X86)
13839 /* Enabling this screws up the fp stack on x86 */
13842 if (mono_arch_is_soft_float ())
13845 /* Arguments are implicitly global */
13846 /* Putting R4 vars into registers doesn't work currently */
13847 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13848 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13850 * Make that the variable's liveness interval doesn't contain a call, since
13851 * that would cause the lvreg to be spilled, making the whole optimization
13854 /* This is too slow for JIT compilation */
13856 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13858 int def_index, call_index, ins_index;
13859 gboolean spilled = FALSE;
13864 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13865 const char *spec = INS_INFO (ins->opcode);
13867 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13868 def_index = ins_index;
13870 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13871 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13872 if (call_index > def_index) {
13878 if (MONO_IS_CALL (ins))
13879 call_index = ins_index;
13889 if (G_UNLIKELY (cfg->verbose_level > 2))
13890 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13891 var->flags |= MONO_INST_IS_DEAD;
13892 cfg->vreg_to_inst [var->dreg] = NULL;
13899 * Compress the varinfo and vars tables so the liveness computation is faster and
13900 * takes up less space.
13903 for (i = 0; i < cfg->num_varinfo; ++i) {
13904 MonoInst *var = cfg->varinfo [i];
13905 if (pos < i && cfg->locals_start == i)
13906 cfg->locals_start = pos;
13907 if (!(var->flags & MONO_INST_IS_DEAD)) {
13909 cfg->varinfo [pos] = cfg->varinfo [i];
13910 cfg->varinfo [pos]->inst_c0 = pos;
13911 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13912 cfg->vars [pos].idx = pos;
13913 #if SIZEOF_REGISTER == 4
13914 if (cfg->varinfo [pos]->type == STACK_I8) {
13915 /* Modify the two component vars too */
13918 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13919 var1->inst_c0 = pos;
13920 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13921 var1->inst_c0 = pos;
13928 cfg->num_varinfo = pos;
13929 if (cfg->locals_start > cfg->num_varinfo)
13930 cfg->locals_start = cfg->num_varinfo;
13934 * mono_spill_global_vars:
13936 * Generate spill code for variables which are not allocated to registers,
13937 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13938 * code is generated which could be optimized by the local optimization passes.
13941 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13943 MonoBasicBlock *bb;
13945 int orig_next_vreg;
13946 guint32 *vreg_to_lvreg;
13948 guint32 i, lvregs_len;
13949 gboolean dest_has_lvreg = FALSE;
13950 guint32 stacktypes [128];
13951 MonoInst **live_range_start, **live_range_end;
13952 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13953 int *gsharedvt_vreg_to_idx = NULL;
13955 *need_local_opts = FALSE;
13957 memset (spec2, 0, sizeof (spec2));
13959 /* FIXME: Move this function to mini.c */
13960 stacktypes ['i'] = STACK_PTR;
13961 stacktypes ['l'] = STACK_I8;
13962 stacktypes ['f'] = STACK_R8;
13963 #ifdef MONO_ARCH_SIMD_INTRINSICS
13964 stacktypes ['x'] = STACK_VTYPE;
13967 #if SIZEOF_REGISTER == 4
13968 /* Create MonoInsts for longs */
13969 for (i = 0; i < cfg->num_varinfo; i++) {
13970 MonoInst *ins = cfg->varinfo [i];
13972 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13973 switch (ins->type) {
13978 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13981 g_assert (ins->opcode == OP_REGOFFSET);
13983 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13985 tree->opcode = OP_REGOFFSET;
13986 tree->inst_basereg = ins->inst_basereg;
13987 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13989 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13991 tree->opcode = OP_REGOFFSET;
13992 tree->inst_basereg = ins->inst_basereg;
13993 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14003 if (cfg->compute_gc_maps) {
14004 /* registers need liveness info even for !non refs */
14005 for (i = 0; i < cfg->num_varinfo; i++) {
14006 MonoInst *ins = cfg->varinfo [i];
14008 if (ins->opcode == OP_REGVAR)
14009 ins->flags |= MONO_INST_GC_TRACK;
14013 if (cfg->gsharedvt) {
14014 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14016 for (i = 0; i < cfg->num_varinfo; ++i) {
14017 MonoInst *ins = cfg->varinfo [i];
14020 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14021 if (i >= cfg->locals_start) {
14023 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14024 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14025 ins->opcode = OP_GSHAREDVT_LOCAL;
14026 ins->inst_imm = idx;
14029 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14030 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14036 /* FIXME: widening and truncation */
14039 * As an optimization, when a variable allocated to the stack is first loaded into
14040 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14041 * the variable again.
14043 orig_next_vreg = cfg->next_vreg;
14044 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14045 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14049 * These arrays contain the first and last instructions accessing a given
14051 * Since we emit bblocks in the same order we process them here, and we
14052 * don't split live ranges, these will precisely describe the live range of
14053 * the variable, i.e. the instruction range where a valid value can be found
14054 * in the variables location.
14055 * The live range is computed using the liveness info computed by the liveness pass.
14056 * We can't use vmv->range, since that is an abstract live range, and we need
14057 * one which is instruction precise.
14058 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14060 /* FIXME: Only do this if debugging info is requested */
14061 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14062 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14063 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14064 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14066 /* Add spill loads/stores */
14067 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14070 if (cfg->verbose_level > 2)
14071 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14073 /* Clear vreg_to_lvreg array */
14074 for (i = 0; i < lvregs_len; i++)
14075 vreg_to_lvreg [lvregs [i]] = 0;
14079 MONO_BB_FOR_EACH_INS (bb, ins) {
14080 const char *spec = INS_INFO (ins->opcode);
14081 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14082 gboolean store, no_lvreg;
14083 int sregs [MONO_MAX_SRC_REGS];
14085 if (G_UNLIKELY (cfg->verbose_level > 2))
14086 mono_print_ins (ins);
14088 if (ins->opcode == OP_NOP)
14092 * We handle LDADDR here as well, since it can only be decomposed
14093 * when variable addresses are known.
14095 if (ins->opcode == OP_LDADDR) {
14096 MonoInst *var = ins->inst_p0;
14098 if (var->opcode == OP_VTARG_ADDR) {
14099 /* Happens on SPARC/S390 where vtypes are passed by reference */
14100 MonoInst *vtaddr = var->inst_left;
14101 if (vtaddr->opcode == OP_REGVAR) {
14102 ins->opcode = OP_MOVE;
14103 ins->sreg1 = vtaddr->dreg;
14105 else if (var->inst_left->opcode == OP_REGOFFSET) {
14106 ins->opcode = OP_LOAD_MEMBASE;
14107 ins->inst_basereg = vtaddr->inst_basereg;
14108 ins->inst_offset = vtaddr->inst_offset;
14111 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14112 /* gsharedvt arg passed by ref */
14113 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14115 ins->opcode = OP_LOAD_MEMBASE;
14116 ins->inst_basereg = var->inst_basereg;
14117 ins->inst_offset = var->inst_offset;
14118 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14119 MonoInst *load, *load2, *load3;
14120 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14121 int reg1, reg2, reg3;
14122 MonoInst *info_var = cfg->gsharedvt_info_var;
14123 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14127 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14130 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14132 g_assert (info_var);
14133 g_assert (locals_var);
14135 /* Mark the instruction used to compute the locals var as used */
14136 cfg->gsharedvt_locals_var_ins = NULL;
14138 /* Load the offset */
14139 if (info_var->opcode == OP_REGOFFSET) {
14140 reg1 = alloc_ireg (cfg);
14141 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14142 } else if (info_var->opcode == OP_REGVAR) {
14144 reg1 = info_var->dreg;
14146 g_assert_not_reached ();
14148 reg2 = alloc_ireg (cfg);
14149 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14150 /* Load the locals area address */
14151 reg3 = alloc_ireg (cfg);
14152 if (locals_var->opcode == OP_REGOFFSET) {
14153 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14154 } else if (locals_var->opcode == OP_REGVAR) {
14155 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14157 g_assert_not_reached ();
14159 /* Compute the address */
14160 ins->opcode = OP_PADD;
14164 mono_bblock_insert_before_ins (bb, ins, load3);
14165 mono_bblock_insert_before_ins (bb, load3, load2);
14167 mono_bblock_insert_before_ins (bb, load2, load);
14169 g_assert (var->opcode == OP_REGOFFSET);
14171 ins->opcode = OP_ADD_IMM;
14172 ins->sreg1 = var->inst_basereg;
14173 ins->inst_imm = var->inst_offset;
14176 *need_local_opts = TRUE;
14177 spec = INS_INFO (ins->opcode);
14180 if (ins->opcode < MONO_CEE_LAST) {
14181 mono_print_ins (ins);
14182 g_assert_not_reached ();
14186 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14190 if (MONO_IS_STORE_MEMBASE (ins)) {
14191 tmp_reg = ins->dreg;
14192 ins->dreg = ins->sreg2;
14193 ins->sreg2 = tmp_reg;
14196 spec2 [MONO_INST_DEST] = ' ';
14197 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14198 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14199 spec2 [MONO_INST_SRC3] = ' ';
14201 } else if (MONO_IS_STORE_MEMINDEX (ins))
14202 g_assert_not_reached ();
14207 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14208 printf ("\t %.3s %d", spec, ins->dreg);
14209 num_sregs = mono_inst_get_src_registers (ins, sregs);
14210 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14211 printf (" %d", sregs [srcindex]);
14218 regtype = spec [MONO_INST_DEST];
14219 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14222 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14223 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14224 MonoInst *store_ins;
14226 MonoInst *def_ins = ins;
14227 int dreg = ins->dreg; /* The original vreg */
14229 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14231 if (var->opcode == OP_REGVAR) {
14232 ins->dreg = var->dreg;
14233 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14235 * Instead of emitting a load+store, use a _membase opcode.
14237 g_assert (var->opcode == OP_REGOFFSET);
14238 if (ins->opcode == OP_MOVE) {
14242 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14243 ins->inst_basereg = var->inst_basereg;
14244 ins->inst_offset = var->inst_offset;
14247 spec = INS_INFO (ins->opcode);
14251 g_assert (var->opcode == OP_REGOFFSET);
14253 prev_dreg = ins->dreg;
14255 /* Invalidate any previous lvreg for this vreg */
14256 vreg_to_lvreg [ins->dreg] = 0;
14260 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14262 store_opcode = OP_STOREI8_MEMBASE_REG;
14265 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14267 #if SIZEOF_REGISTER != 8
14268 if (regtype == 'l') {
14269 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14270 mono_bblock_insert_after_ins (bb, ins, store_ins);
14271 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14272 mono_bblock_insert_after_ins (bb, ins, store_ins);
14273 def_ins = store_ins;
14278 g_assert (store_opcode != OP_STOREV_MEMBASE);
14280 /* Try to fuse the store into the instruction itself */
14281 /* FIXME: Add more instructions */
14282 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14283 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14284 ins->inst_imm = ins->inst_c0;
14285 ins->inst_destbasereg = var->inst_basereg;
14286 ins->inst_offset = var->inst_offset;
14287 spec = INS_INFO (ins->opcode);
14288 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14289 ins->opcode = store_opcode;
14290 ins->inst_destbasereg = var->inst_basereg;
14291 ins->inst_offset = var->inst_offset;
14295 tmp_reg = ins->dreg;
14296 ins->dreg = ins->sreg2;
14297 ins->sreg2 = tmp_reg;
14300 spec2 [MONO_INST_DEST] = ' ';
14301 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14302 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14303 spec2 [MONO_INST_SRC3] = ' ';
14305 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14306 // FIXME: The backends expect the base reg to be in inst_basereg
14307 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14309 ins->inst_basereg = var->inst_basereg;
14310 ins->inst_offset = var->inst_offset;
14311 spec = INS_INFO (ins->opcode);
14313 /* printf ("INS: "); mono_print_ins (ins); */
14314 /* Create a store instruction */
14315 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14317 /* Insert it after the instruction */
14318 mono_bblock_insert_after_ins (bb, ins, store_ins);
14320 def_ins = store_ins;
14323 * We can't assign ins->dreg to var->dreg here, since the
14324 * sregs could use it. So set a flag, and do it after
14327 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14328 dest_has_lvreg = TRUE;
14333 if (def_ins && !live_range_start [dreg]) {
14334 live_range_start [dreg] = def_ins;
14335 live_range_start_bb [dreg] = bb;
14338 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14341 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14342 tmp->inst_c1 = dreg;
14343 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14350 num_sregs = mono_inst_get_src_registers (ins, sregs);
14351 for (srcindex = 0; srcindex < 3; ++srcindex) {
14352 regtype = spec [MONO_INST_SRC1 + srcindex];
14353 sreg = sregs [srcindex];
14355 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14356 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14357 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14358 MonoInst *use_ins = ins;
14359 MonoInst *load_ins;
14360 guint32 load_opcode;
14362 if (var->opcode == OP_REGVAR) {
14363 sregs [srcindex] = var->dreg;
14364 //mono_inst_set_src_registers (ins, sregs);
14365 live_range_end [sreg] = use_ins;
14366 live_range_end_bb [sreg] = bb;
14368 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14371 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14372 /* var->dreg is a hreg */
14373 tmp->inst_c1 = sreg;
14374 mono_bblock_insert_after_ins (bb, ins, tmp);
14380 g_assert (var->opcode == OP_REGOFFSET);
14382 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14384 g_assert (load_opcode != OP_LOADV_MEMBASE);
14386 if (vreg_to_lvreg [sreg]) {
14387 g_assert (vreg_to_lvreg [sreg] != -1);
14389 /* The variable is already loaded to an lvreg */
14390 if (G_UNLIKELY (cfg->verbose_level > 2))
14391 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14392 sregs [srcindex] = vreg_to_lvreg [sreg];
14393 //mono_inst_set_src_registers (ins, sregs);
14397 /* Try to fuse the load into the instruction */
14398 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14399 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14400 sregs [0] = var->inst_basereg;
14401 //mono_inst_set_src_registers (ins, sregs);
14402 ins->inst_offset = var->inst_offset;
14403 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14404 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14405 sregs [1] = var->inst_basereg;
14406 //mono_inst_set_src_registers (ins, sregs);
14407 ins->inst_offset = var->inst_offset;
14409 if (MONO_IS_REAL_MOVE (ins)) {
14410 ins->opcode = OP_NOP;
14413 //printf ("%d ", srcindex); mono_print_ins (ins);
14415 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14417 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14418 if (var->dreg == prev_dreg) {
14420 * sreg refers to the value loaded by the load
14421 * emitted below, but we need to use ins->dreg
14422 * since it refers to the store emitted earlier.
14426 g_assert (sreg != -1);
14427 vreg_to_lvreg [var->dreg] = sreg;
14428 g_assert (lvregs_len < 1024);
14429 lvregs [lvregs_len ++] = var->dreg;
14433 sregs [srcindex] = sreg;
14434 //mono_inst_set_src_registers (ins, sregs);
14436 #if SIZEOF_REGISTER != 8
14437 if (regtype == 'l') {
14438 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14439 mono_bblock_insert_before_ins (bb, ins, load_ins);
14440 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14441 mono_bblock_insert_before_ins (bb, ins, load_ins);
14442 use_ins = load_ins;
14447 #if SIZEOF_REGISTER == 4
14448 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14450 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14451 mono_bblock_insert_before_ins (bb, ins, load_ins);
14452 use_ins = load_ins;
14456 if (var->dreg < orig_next_vreg) {
14457 live_range_end [var->dreg] = use_ins;
14458 live_range_end_bb [var->dreg] = bb;
14461 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14464 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14465 tmp->inst_c1 = var->dreg;
14466 mono_bblock_insert_after_ins (bb, ins, tmp);
14470 mono_inst_set_src_registers (ins, sregs);
14472 if (dest_has_lvreg) {
14473 g_assert (ins->dreg != -1);
14474 vreg_to_lvreg [prev_dreg] = ins->dreg;
14475 g_assert (lvregs_len < 1024);
14476 lvregs [lvregs_len ++] = prev_dreg;
14477 dest_has_lvreg = FALSE;
14481 tmp_reg = ins->dreg;
14482 ins->dreg = ins->sreg2;
14483 ins->sreg2 = tmp_reg;
14486 if (MONO_IS_CALL (ins)) {
14487 /* Clear vreg_to_lvreg array */
14488 for (i = 0; i < lvregs_len; i++)
14489 vreg_to_lvreg [lvregs [i]] = 0;
14491 } else if (ins->opcode == OP_NOP) {
14493 MONO_INST_NULLIFY_SREGS (ins);
14496 if (cfg->verbose_level > 2)
14497 mono_print_ins_index (1, ins);
14500 /* Extend the live range based on the liveness info */
14501 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14502 for (i = 0; i < cfg->num_varinfo; i ++) {
14503 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14505 if (vreg_is_volatile (cfg, vi->vreg))
14506 /* The liveness info is incomplete */
14509 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14510 /* Live from at least the first ins of this bb */
14511 live_range_start [vi->vreg] = bb->code;
14512 live_range_start_bb [vi->vreg] = bb;
14515 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14516 /* Live at least until the last ins of this bb */
14517 live_range_end [vi->vreg] = bb->last_ins;
14518 live_range_end_bb [vi->vreg] = bb;
14525 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14526 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14528 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14529 for (i = 0; i < cfg->num_varinfo; ++i) {
14530 int vreg = MONO_VARINFO (cfg, i)->vreg;
14533 if (live_range_start [vreg]) {
14534 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14536 ins->inst_c1 = vreg;
14537 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14539 if (live_range_end [vreg]) {
14540 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14542 ins->inst_c1 = vreg;
14543 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14544 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14546 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14551 if (cfg->gsharedvt_locals_var_ins) {
14552 /* Nullify if unused */
14553 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14554 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14557 g_free (live_range_start);
14558 g_free (live_range_end);
14559 g_free (live_range_start_bb);
14560 g_free (live_range_end_bb);
14565 * - use 'iadd' instead of 'int_add'
14566 * - handling ovf opcodes: decompose in method_to_ir.
14567 * - unify iregs/fregs
14568 * -> partly done, the missing parts are:
14569 * - a more complete unification would involve unifying the hregs as well, so
14570 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14571 * would no longer map to the machine hregs, so the code generators would need to
14572 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14573 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14574 * fp/non-fp branches speeds it up by about 15%.
14575 * - use sext/zext opcodes instead of shifts
14577 * - get rid of TEMPLOADs if possible and use vregs instead
14578 * - clean up usage of OP_P/OP_ opcodes
14579 * - cleanup usage of DUMMY_USE
14580 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14582 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14583 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14584 * - make sure handle_stack_args () is called before the branch is emitted
14585 * - when the new IR is done, get rid of all unused stuff
14586 * - COMPARE/BEQ as separate instructions or unify them ?
14587 * - keeping them separate allows specialized compare instructions like
14588 * compare_imm, compare_membase
14589 * - most back ends unify fp compare+branch, fp compare+ceq
14590 * - integrate mono_save_args into inline_method
14591 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14592 * - handle long shift opts on 32 bit platforms somehow: they require
14593 * 3 sregs (2 for arg1 and 1 for arg2)
14594 * - make byref a 'normal' type.
14595 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14596 * variable if needed.
14597 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14598 * like inline_method.
14599 * - remove inlining restrictions
14600 * - fix LNEG and enable cfold of INEG
14601 * - generalize x86 optimizations like ldelema as a peephole optimization
14602 * - add store_mem_imm for amd64
14603 * - optimize the loading of the interruption flag in the managed->native wrappers
14604 * - avoid special handling of OP_NOP in passes
14605 * - move code inserting instructions into one function/macro.
14606 * - try a coalescing phase after liveness analysis
14607 * - add float -> vreg conversion + local optimizations on !x86
14608 * - figure out how to handle decomposed branches during optimizations, ie.
14609 * compare+branch, op_jump_table+op_br etc.
14610 * - promote RuntimeXHandles to vregs
14611 * - vtype cleanups:
14612 * - add a NEW_VARLOADA_VREG macro
14613 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14614 * accessing vtype fields.
14615 * - get rid of I8CONST on 64 bit platforms
14616 * - dealing with the increase in code size due to branches created during opcode
14618 * - use extended basic blocks
14619 * - all parts of the JIT
14620 * - handle_global_vregs () && local regalloc
14621 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14622 * - sources of increase in code size:
14625 * - isinst and castclass
14626 * - lvregs not allocated to global registers even if used multiple times
14627 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14629 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14630 * - add all micro optimizations from the old JIT
14631 * - put tree optimizations into the deadce pass
14632 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14633 * specific function.
14634 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14635 * fcompare + branchCC.
14636 * - create a helper function for allocating a stack slot, taking into account
14637 * MONO_CFG_HAS_SPILLUP.
14639 * - merge the ia64 switch changes.
14640 * - optimize mono_regstate2_alloc_int/float.
14641 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14642 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14643 * parts of the tree could be separated by other instructions, killing the tree
14644 * arguments, or stores killing loads etc. Also, should we fold loads into other
14645 * instructions if the result of the load is used multiple times ?
14646 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14647 * - LAST MERGE: 108395.
14648 * - when returning vtypes in registers, generate IR and append it to the end of the
14649 * last bb instead of doing it in the epilog.
14650 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14658 - When to decompose opcodes:
14659 - earlier: this makes some optimizations hard to implement, since the low level IR
14660 no longer contains the neccessary information. But it is easier to do.
14661 - later: harder to implement, enables more optimizations.
14662 - Branches inside bblocks:
14663 - created when decomposing complex opcodes.
14664 - branches to another bblock: harmless, but not tracked by the branch
14665 optimizations, so need to branch to a label at the start of the bblock.
14666 - branches to inside the same bblock: very problematic, trips up the local
14667 reg allocator. Can be fixed by spitting the current bblock, but that is a
14668 complex operation, since some local vregs can become global vregs etc.
14669 - Local/global vregs:
14670 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14671 local register allocator.
14672 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14673 structure, created by mono_create_var (). Assigned to hregs or the stack by
14674 the global register allocator.
14675 - When to do optimizations like alu->alu_imm:
14676 - earlier -> saves work later on since the IR will be smaller/simpler
14677 - later -> can work on more instructions
14678 - Handling of valuetypes:
14679 - When a vtype is pushed on the stack, a new temporary is created, an
14680 instruction computing its address (LDADDR) is emitted and pushed on
14681 the stack. Need to optimize cases when the vtype is used immediately as in
14682 argument passing, stloc etc.
14683 - Instead of the to_end stuff in the old JIT, simply call the function handling
14684 the values on the stack before emitting the last instruction of the bb.
14687 #endif /* DISABLE_JIT */