2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
72 #include "aot-compiler.h"
73 #include "mini-llvm.h"
75 #define BRANCH_COST 10
76 #define INLINE_LENGTH_LIMIT 20
78 /* These have 'cfg' as an implicit argument */
79 #define INLINE_FAILURE(msg) do { \
80 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
81 inline_failure (cfg, msg); \
82 goto exception_exit; \
85 #define CHECK_CFG_EXCEPTION do {\
86 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
87 goto exception_exit; \
89 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
90 method_access_failure ((cfg), (method), (cmethod)); \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
147 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
148 guchar *ip, guint real_offset, gboolean inline_always);
150 /* helper methods signatures */
151 static MonoMethodSignature *helper_sig_domain_get;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
155 * Instruction metadata
163 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
164 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
170 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
175 /* keep in sync with the enum in mini.h */
178 #include "mini-ops.h"
183 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
184 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
186 * This should contain the index of the last sreg + 1. This is not the same
187 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
189 const gint8 ins_sreg_counts[] = {
190 #include "mini-ops.h"
195 #define MONO_INIT_VARINFO(vi,id) do { \
196 (vi)->range.first_use.pos.bid = 0xffff; \
202 mono_alloc_ireg (MonoCompile *cfg)
204 return alloc_ireg (cfg);
208 mono_alloc_lreg (MonoCompile *cfg)
210 return alloc_lreg (cfg);
214 mono_alloc_freg (MonoCompile *cfg)
216 return alloc_freg (cfg);
220 mono_alloc_preg (MonoCompile *cfg)
222 return alloc_preg (cfg);
226 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
228 return alloc_dreg (cfg, stack_type);
232 * mono_alloc_ireg_ref:
234 * Allocate an IREG, and mark it as holding a GC ref.
237 mono_alloc_ireg_ref (MonoCompile *cfg)
239 return alloc_ireg_ref (cfg);
243 * mono_alloc_ireg_mp:
245 * Allocate an IREG, and mark it as holding a managed pointer.
248 mono_alloc_ireg_mp (MonoCompile *cfg)
250 return alloc_ireg_mp (cfg);
254 * mono_alloc_ireg_copy:
256 * Allocate an IREG with the same GC type as VREG.
259 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
261 if (vreg_is_ref (cfg, vreg))
262 return alloc_ireg_ref (cfg);
263 else if (vreg_is_mp (cfg, vreg))
264 return alloc_ireg_mp (cfg);
266 return alloc_ireg (cfg);
270 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
275 type = mini_get_underlying_type (type);
277 switch (type->type) {
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
306 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->gshared);
325 if (mini_type_var_is_vt (type))
328 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
359 static MONO_NEVER_INLINE void
360 break_on_unverified (void)
362 if (mini_get_debug_options ()->break_on_unverified)
366 static MONO_NEVER_INLINE void
367 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
369 char *method_fname = mono_method_full_name (method, TRUE);
370 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
371 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
372 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
373 g_free (method_fname);
374 g_free (cil_method_fname);
377 static MONO_NEVER_INLINE void
378 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
380 char *method_fname = mono_method_full_name (method, TRUE);
381 char *field_fname = mono_field_full_name (field);
382 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
383 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
384 g_free (method_fname);
385 g_free (field_fname);
388 static MONO_NEVER_INLINE void
389 inline_failure (MonoCompile *cfg, const char *msg)
391 if (cfg->verbose_level >= 2)
392 printf ("inline failed: %s\n", msg);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
396 static MONO_NEVER_INLINE void
397 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
399 if (cfg->verbose_level > 2) \
400 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
404 static MONO_NEVER_INLINE void
405 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
407 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
408 if (cfg->verbose_level >= 2)
409 printf ("%s\n", cfg->exception_message);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
415 * foo<T> (int i) { ldarg.0; box T; }
417 #define UNVERIFIED do { \
418 if (cfg->gsharedvt) { \
419 if (cfg->verbose_level > 2) \
420 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
422 goto exception_exit; \
424 break_on_unverified (); \
428 #define GET_BBLOCK(cfg,tblock,ip) do { \
429 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
431 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
432 NEW_BBLOCK (cfg, (tblock)); \
433 (tblock)->cil_code = (ip); \
434 ADD_BBLOCK (cfg, (tblock)); \
438 #if defined(TARGET_X86) || defined(TARGET_AMD64)
439 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
440 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
441 (dest)->dreg = alloc_ireg_mp ((cfg)); \
442 (dest)->sreg1 = (sr1); \
443 (dest)->sreg2 = (sr2); \
444 (dest)->inst_imm = (imm); \
445 (dest)->backend.shift_amount = (shift); \
446 MONO_ADD_INS ((cfg)->cbb, (dest)); \
450 /* Emit conversions so both operands of a binary opcode are of the same type */
452 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
454 MonoInst *arg1 = *arg1_ref;
455 MonoInst *arg2 = *arg2_ref;
458 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
459 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
462 /* Mixing r4/r8 is allowed by the spec */
463 if (arg1->type == STACK_R4) {
464 int dreg = alloc_freg (cfg);
466 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
467 conv->type = STACK_R8;
471 if (arg2->type == STACK_R4) {
472 int dreg = alloc_freg (cfg);
474 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
475 conv->type = STACK_R8;
481 #if SIZEOF_REGISTER == 8
482 /* FIXME: Need to add many more cases */
483 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
486 int dr = alloc_preg (cfg);
487 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
488 (ins)->sreg2 = widen->dreg;
493 #define ADD_BINOP(op) do { \
494 MONO_INST_NEW (cfg, ins, (op)); \
496 ins->sreg1 = sp [0]->dreg; \
497 ins->sreg2 = sp [1]->dreg; \
498 type_from_op (cfg, ins, sp [0], sp [1]); \
500 /* Have to insert a widening op */ \
501 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
502 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
503 MONO_ADD_INS ((cfg)->cbb, (ins)); \
504 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
507 #define ADD_UNOP(op) do { \
508 MONO_INST_NEW (cfg, ins, (op)); \
510 ins->sreg1 = sp [0]->dreg; \
511 type_from_op (cfg, ins, sp [0], NULL); \
513 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode (cfg, ins); \
518 #define ADD_BINCOND(next_block) do { \
521 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
522 cmp->sreg1 = sp [0]->dreg; \
523 cmp->sreg2 = sp [1]->dreg; \
524 type_from_op (cfg, cmp, sp [0], sp [1]); \
526 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
527 type_from_op (cfg, ins, sp [0], sp [1]); \
528 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
529 GET_BBLOCK (cfg, tblock, target); \
530 link_bblock (cfg, cfg->cbb, tblock); \
531 ins->inst_true_bb = tblock; \
532 if ((next_block)) { \
533 link_bblock (cfg, cfg->cbb, (next_block)); \
534 ins->inst_false_bb = (next_block); \
535 start_new_bblock = 1; \
537 GET_BBLOCK (cfg, tblock, ip); \
538 link_bblock (cfg, cfg->cbb, tblock); \
539 ins->inst_false_bb = tblock; \
540 start_new_bblock = 2; \
542 if (sp != stack_start) { \
543 handle_stack_args (cfg, stack_start, sp - stack_start); \
544 CHECK_UNVERIFIABLE (cfg); \
546 MONO_ADD_INS (cfg->cbb, cmp); \
547 MONO_ADD_INS (cfg->cbb, ins); \
551 * link_bblock: Links two basic blocks
553 * links two basic blocks in the control flow graph, the 'from'
554 * argument is the starting block and the 'to' argument is the block
555 * the control flow ends to after 'from'.
558 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
560 MonoBasicBlock **newa;
564 if (from->cil_code) {
566 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
568 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
571 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
573 printf ("edge from entry to exit\n");
578 for (i = 0; i < from->out_count; ++i) {
579 if (to == from->out_bb [i]) {
585 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
586 for (i = 0; i < from->out_count; ++i) {
587 newa [i] = from->out_bb [i];
595 for (i = 0; i < to->in_count; ++i) {
596 if (from == to->in_bb [i]) {
602 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
603 for (i = 0; i < to->in_count; ++i) {
604 newa [i] = to->in_bb [i];
613 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
615 link_bblock (cfg, from, to);
619 * mono_find_block_region:
621 * We mark each basic block with a region ID. We use that to avoid BB
622 * optimizations when blocks are in different regions.
625 * A region token that encodes where this region is, and information
626 * about the clause owner for this block.
628 * The region encodes the try/catch/filter clause that owns this block
629 * as well as the type. -1 is a special value that represents a block
630 * that is in none of try/catch/filter.
633 mono_find_block_region (MonoCompile *cfg, int offset)
635 MonoMethodHeader *header = cfg->header;
636 MonoExceptionClause *clause;
639 for (i = 0; i < header->num_clauses; ++i) {
640 clause = &header->clauses [i];
641 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
642 (offset < (clause->handler_offset)))
643 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
645 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
646 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
647 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
648 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
649 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
651 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
654 for (i = 0; i < header->num_clauses; ++i) {
655 clause = &header->clauses [i];
657 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
658 return ((i + 1) << 8) | clause->flags;
665 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
667 MonoMethodHeader *header = cfg->header;
668 MonoExceptionClause *clause;
672 for (i = 0; i < header->num_clauses; ++i) {
673 clause = &header->clauses [i];
674 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
675 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
676 if (clause->flags == type)
677 res = g_list_append (res, clause);
684 mono_create_spvar_for_region (MonoCompile *cfg, int region)
688 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
692 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
693 /* prevent it from being register allocated */
694 var->flags |= MONO_INST_VOLATILE;
696 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
700 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
702 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
706 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
710 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
714 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
715 /* prevent it from being register allocated */
716 var->flags |= MONO_INST_VOLATILE;
718 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
724 * Returns the type used in the eval stack when @type is loaded.
725 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
728 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
732 type = mini_get_underlying_type (type);
733 inst->klass = klass = mono_class_from_mono_type (type);
735 inst->type = STACK_MP;
740 switch (type->type) {
742 inst->type = STACK_INV;
750 inst->type = STACK_I4;
755 case MONO_TYPE_FNPTR:
756 inst->type = STACK_PTR;
758 case MONO_TYPE_CLASS:
759 case MONO_TYPE_STRING:
760 case MONO_TYPE_OBJECT:
761 case MONO_TYPE_SZARRAY:
762 case MONO_TYPE_ARRAY:
763 inst->type = STACK_OBJ;
767 inst->type = STACK_I8;
770 inst->type = cfg->r4_stack_type;
773 inst->type = STACK_R8;
775 case MONO_TYPE_VALUETYPE:
776 if (type->data.klass->enumtype) {
777 type = mono_class_enum_basetype (type->data.klass);
781 inst->type = STACK_VTYPE;
784 case MONO_TYPE_TYPEDBYREF:
785 inst->klass = mono_defaults.typed_reference_class;
786 inst->type = STACK_VTYPE;
788 case MONO_TYPE_GENERICINST:
789 type = &type->data.generic_class->container_class->byval_arg;
793 g_assert (cfg->gshared);
794 if (mini_is_gsharedvt_type (type)) {
795 g_assert (cfg->gsharedvt);
796 inst->type = STACK_VTYPE;
798 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
802 g_error ("unknown type 0x%02x in eval stack type", type->type);
807 * The following tables are used to quickly validate the IL code in type_from_op ().
810 bin_num_table [STACK_MAX] [STACK_MAX] = {
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
816 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
824 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
827 /* reduce the size of this table */
829 bin_int_table [STACK_MAX] [STACK_MAX] = {
830 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
841 bin_comp_table [STACK_MAX] [STACK_MAX] = {
842 /* Inv i L p F & O vt r4 */
844 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
845 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
846 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
847 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
848 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
849 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
850 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
851 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
854 /* reduce the size of this table */
856 shift_table [STACK_MAX] [STACK_MAX] = {
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
868 * Tables to map from the non-specific opcode to the matching
869 * type-specific opcode.
871 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
873 binops_op_map [STACK_MAX] = {
874 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
877 /* handles from CEE_NEG to CEE_CONV_U8 */
879 unops_op_map [STACK_MAX] = {
880 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
883 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
885 ovfops_op_map [STACK_MAX] = {
886 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
889 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
891 ovf2ops_op_map [STACK_MAX] = {
892 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
895 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
897 ovf3ops_op_map [STACK_MAX] = {
898 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
901 /* handles from CEE_BEQ to CEE_BLT_UN */
903 beqops_op_map [STACK_MAX] = {
904 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
907 /* handles from CEE_CEQ to CEE_CLT_UN */
909 ceqops_op_map [STACK_MAX] = {
910 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
914 * Sets ins->type (the type on the eval stack) according to the
915 * type of the opcode and the arguments to it.
916 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
918 * FIXME: this function sets ins->type unconditionally in some cases, but
919 * it should set it to invalid for some types (a conv.x on an object)
922 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
924 switch (ins->opcode) {
931 /* FIXME: check unverifiable args for STACK_MP */
932 ins->type = bin_num_table [src1->type] [src2->type];
933 ins->opcode += binops_op_map [ins->type];
940 ins->type = bin_int_table [src1->type] [src2->type];
941 ins->opcode += binops_op_map [ins->type];
946 ins->type = shift_table [src1->type] [src2->type];
947 ins->opcode += binops_op_map [ins->type];
952 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
953 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
954 ins->opcode = OP_LCOMPARE;
955 else if (src1->type == STACK_R4)
956 ins->opcode = OP_RCOMPARE;
957 else if (src1->type == STACK_R8)
958 ins->opcode = OP_FCOMPARE;
960 ins->opcode = OP_ICOMPARE;
962 case OP_ICOMPARE_IMM:
963 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
964 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
965 ins->opcode = OP_LCOMPARE_IMM;
977 ins->opcode += beqops_op_map [src1->type];
980 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
981 ins->opcode += ceqops_op_map [src1->type];
987 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
992 ins->type = neg_table [src1->type];
993 ins->opcode += unops_op_map [ins->type];
996 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
997 ins->type = src1->type;
999 ins->type = STACK_INV;
1000 ins->opcode += unops_op_map [ins->type];
1006 ins->type = STACK_I4;
1007 ins->opcode += unops_op_map [src1->type];
1010 ins->type = STACK_R8;
1011 switch (src1->type) {
1014 ins->opcode = OP_ICONV_TO_R_UN;
1017 ins->opcode = OP_LCONV_TO_R_UN;
1021 case CEE_CONV_OVF_I1:
1022 case CEE_CONV_OVF_U1:
1023 case CEE_CONV_OVF_I2:
1024 case CEE_CONV_OVF_U2:
1025 case CEE_CONV_OVF_I4:
1026 case CEE_CONV_OVF_U4:
1027 ins->type = STACK_I4;
1028 ins->opcode += ovf3ops_op_map [src1->type];
1030 case CEE_CONV_OVF_I_UN:
1031 case CEE_CONV_OVF_U_UN:
1032 ins->type = STACK_PTR;
1033 ins->opcode += ovf2ops_op_map [src1->type];
1035 case CEE_CONV_OVF_I1_UN:
1036 case CEE_CONV_OVF_I2_UN:
1037 case CEE_CONV_OVF_I4_UN:
1038 case CEE_CONV_OVF_U1_UN:
1039 case CEE_CONV_OVF_U2_UN:
1040 case CEE_CONV_OVF_U4_UN:
1041 ins->type = STACK_I4;
1042 ins->opcode += ovf2ops_op_map [src1->type];
1045 ins->type = STACK_PTR;
1046 switch (src1->type) {
1048 ins->opcode = OP_ICONV_TO_U;
1052 #if SIZEOF_VOID_P == 8
1053 ins->opcode = OP_LCONV_TO_U;
1055 ins->opcode = OP_MOVE;
1059 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_FCONV_TO_U;
1068 ins->type = STACK_I8;
1069 ins->opcode += unops_op_map [src1->type];
1071 case CEE_CONV_OVF_I8:
1072 case CEE_CONV_OVF_U8:
1073 ins->type = STACK_I8;
1074 ins->opcode += ovf3ops_op_map [src1->type];
1076 case CEE_CONV_OVF_U8_UN:
1077 case CEE_CONV_OVF_I8_UN:
1078 ins->type = STACK_I8;
1079 ins->opcode += ovf2ops_op_map [src1->type];
1082 ins->type = cfg->r4_stack_type;
1083 ins->opcode += unops_op_map [src1->type];
1086 ins->type = STACK_R8;
1087 ins->opcode += unops_op_map [src1->type];
1090 ins->type = STACK_R8;
1094 ins->type = STACK_I4;
1095 ins->opcode += ovfops_op_map [src1->type];
1098 case CEE_CONV_OVF_I:
1099 case CEE_CONV_OVF_U:
1100 ins->type = STACK_PTR;
1101 ins->opcode += ovfops_op_map [src1->type];
1104 case CEE_ADD_OVF_UN:
1106 case CEE_MUL_OVF_UN:
1108 case CEE_SUB_OVF_UN:
1109 ins->type = bin_num_table [src1->type] [src2->type];
1110 ins->opcode += ovfops_op_map [src1->type];
1111 if (ins->type == STACK_R8)
1112 ins->type = STACK_INV;
1114 case OP_LOAD_MEMBASE:
1115 ins->type = STACK_PTR;
1117 case OP_LOADI1_MEMBASE:
1118 case OP_LOADU1_MEMBASE:
1119 case OP_LOADI2_MEMBASE:
1120 case OP_LOADU2_MEMBASE:
1121 case OP_LOADI4_MEMBASE:
1122 case OP_LOADU4_MEMBASE:
1123 ins->type = STACK_PTR;
1125 case OP_LOADI8_MEMBASE:
1126 ins->type = STACK_I8;
1128 case OP_LOADR4_MEMBASE:
1129 ins->type = cfg->r4_stack_type;
1131 case OP_LOADR8_MEMBASE:
1132 ins->type = STACK_R8;
1135 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1139 if (ins->type == STACK_MP)
1140 ins->klass = mono_defaults.object_class;
1145 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1151 param_table [STACK_MAX] [STACK_MAX] = {
1156 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1161 switch (args->type) {
1171 for (i = 0; i < sig->param_count; ++i) {
1172 switch (args [i].type) {
1176 if (!sig->params [i]->byref)
1180 if (sig->params [i]->byref)
1182 switch (sig->params [i]->type) {
1183 case MONO_TYPE_CLASS:
1184 case MONO_TYPE_STRING:
1185 case MONO_TYPE_OBJECT:
1186 case MONO_TYPE_SZARRAY:
1187 case MONO_TYPE_ARRAY:
1194 if (sig->params [i]->byref)
1196 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1205 /*if (!param_table [args [i].type] [sig->params [i]->type])
1213 * When we need a pointer to the current domain many times in a method, we
1214 * call mono_domain_get() once and we store the result in a local variable.
1215 * This function returns the variable that represents the MonoDomain*.
1217 inline static MonoInst *
1218 mono_get_domainvar (MonoCompile *cfg)
1220 if (!cfg->domainvar)
1221 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1222 return cfg->domainvar;
1226 * The got_var contains the address of the Global Offset Table when AOT
1230 mono_get_got_var (MonoCompile *cfg)
1232 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1234 if (!cfg->got_var) {
1235 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1237 return cfg->got_var;
1241 mono_get_vtable_var (MonoCompile *cfg)
1243 g_assert (cfg->gshared);
1245 if (!cfg->rgctx_var) {
1246 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 /* force the var to be stack allocated */
1248 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1251 return cfg->rgctx_var;
1255 type_from_stack_type (MonoInst *ins) {
1256 switch (ins->type) {
1257 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1258 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1259 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1260 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1261 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1263 return &ins->klass->this_arg;
1264 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1265 case STACK_VTYPE: return &ins->klass->byval_arg;
1267 g_error ("stack type %d to monotype not handled\n", ins->type);
1272 static G_GNUC_UNUSED int
1273 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1275 t = mono_type_get_underlying_type (t);
1287 case MONO_TYPE_FNPTR:
1289 case MONO_TYPE_CLASS:
1290 case MONO_TYPE_STRING:
1291 case MONO_TYPE_OBJECT:
1292 case MONO_TYPE_SZARRAY:
1293 case MONO_TYPE_ARRAY:
1299 return cfg->r4_stack_type;
1302 case MONO_TYPE_VALUETYPE:
1303 case MONO_TYPE_TYPEDBYREF:
1305 case MONO_TYPE_GENERICINST:
1306 if (mono_type_generic_inst_is_valuetype (t))
1312 g_assert_not_reached ();
1319 array_access_to_klass (int opcode)
1323 return mono_defaults.byte_class;
1325 return mono_defaults.uint16_class;
1328 return mono_defaults.int_class;
1331 return mono_defaults.sbyte_class;
1334 return mono_defaults.int16_class;
1337 return mono_defaults.int32_class;
1339 return mono_defaults.uint32_class;
1342 return mono_defaults.int64_class;
1345 return mono_defaults.single_class;
1348 return mono_defaults.double_class;
1349 case CEE_LDELEM_REF:
1350 case CEE_STELEM_REF:
1351 return mono_defaults.object_class;
1353 g_assert_not_reached ();
1359 * We try to share variables when possible
1362 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1367 /* inlining can result in deeper stacks */
1368 if (slot >= cfg->header->max_stack)
1369 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1371 pos = ins->type - 1 + slot * STACK_MAX;
1373 switch (ins->type) {
1380 if ((vnum = cfg->intvars [pos]))
1381 return cfg->varinfo [vnum];
1382 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1383 cfg->intvars [pos] = res->inst_c0;
1386 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1392 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1395 * Don't use this if a generic_context is set, since that means AOT can't
1396 * look up the method using just the image+token.
1397 * table == 0 means this is a reference made from a wrapper.
1399 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1400 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1401 jump_info_token->image = image;
1402 jump_info_token->token = token;
1403 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1408 * This function is called to handle items that are left on the evaluation stack
1409 * at basic block boundaries. What happens is that we save the values to local variables
1410 * and we reload them later when first entering the target basic block (with the
1411 * handle_loaded_temps () function).
1412 * A single joint point will use the same variables (stored in the array bb->out_stack or
1413 * bb->in_stack, if the basic block is before or after the joint point).
1415 * This function needs to be called _before_ emitting the last instruction of
1416 * the bb (i.e. before emitting a branch).
1417 * If the stack merge fails at a join point, cfg->unverifiable is set.
1420 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1423 MonoBasicBlock *bb = cfg->cbb;
1424 MonoBasicBlock *outb;
1425 MonoInst *inst, **locals;
1430 if (cfg->verbose_level > 3)
1431 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1432 if (!bb->out_scount) {
1433 bb->out_scount = count;
1434 //printf ("bblock %d has out:", bb->block_num);
1436 for (i = 0; i < bb->out_count; ++i) {
1437 outb = bb->out_bb [i];
1438 /* exception handlers are linked, but they should not be considered for stack args */
1439 if (outb->flags & BB_EXCEPTION_HANDLER)
1441 //printf (" %d", outb->block_num);
1442 if (outb->in_stack) {
1444 bb->out_stack = outb->in_stack;
1450 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1451 for (i = 0; i < count; ++i) {
1453 * try to reuse temps already allocated for this purpouse, if they occupy the same
1454 * stack slot and if they are of the same type.
1455 * This won't cause conflicts since if 'local' is used to
1456 * store one of the values in the in_stack of a bblock, then
1457 * the same variable will be used for the same outgoing stack
1459 * This doesn't work when inlining methods, since the bblocks
1460 * in the inlined methods do not inherit their in_stack from
1461 * the bblock they are inlined to. See bug #58863 for an
1464 if (cfg->inlined_method)
1465 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1467 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1472 for (i = 0; i < bb->out_count; ++i) {
1473 outb = bb->out_bb [i];
1474 /* exception handlers are linked, but they should not be considered for stack args */
1475 if (outb->flags & BB_EXCEPTION_HANDLER)
1477 if (outb->in_scount) {
1478 if (outb->in_scount != bb->out_scount) {
1479 cfg->unverifiable = TRUE;
1482 continue; /* check they are the same locals */
1484 outb->in_scount = count;
1485 outb->in_stack = bb->out_stack;
1488 locals = bb->out_stack;
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1499 * It is possible that the out bblocks already have in_stack assigned, and
1500 * the in_stacks differ. In this case, we will store to all the different
1507 /* Find a bblock which has a different in_stack */
1509 while (bindex < bb->out_count) {
1510 outb = bb->out_bb [bindex];
1511 /* exception handlers are linked, but they should not be considered for stack args */
1512 if (outb->flags & BB_EXCEPTION_HANDLER) {
1516 if (outb->in_stack != locals) {
1517 for (i = 0; i < count; ++i) {
1518 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1519 inst->cil_code = sp [i]->cil_code;
1520 sp [i] = locals [i];
1521 if (cfg->verbose_level > 3)
1522 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1524 locals = outb->in_stack;
1534 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1538 if (cfg->compile_aot) {
1539 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1544 ji.type = patch_type;
1545 ji.data.target = data;
1546 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1548 EMIT_NEW_PCONST (cfg, ins, target);
1554 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1556 int ibitmap_reg = alloc_preg (cfg);
1557 #ifdef COMPRESSED_INTERFACE_BITMAP
1559 MonoInst *res, *ins;
1560 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1561 MONO_ADD_INS (cfg->cbb, ins);
1563 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1564 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1567 int ibitmap_byte_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1571 if (cfg->compile_aot) {
1572 int iid_reg = alloc_preg (cfg);
1573 int shifted_iid_reg = alloc_preg (cfg);
1574 int ibitmap_byte_address_reg = alloc_preg (cfg);
1575 int masked_iid_reg = alloc_preg (cfg);
1576 int iid_one_bit_reg = alloc_preg (cfg);
1577 int iid_bit_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1580 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1583 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1584 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1585 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1594 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1595 * stored in "klass_reg" implements the interface "klass".
1598 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1600 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1604 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1605 * stored in "vtable_reg" implements the interface "klass".
1608 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1610 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1614 * Emit code which checks whenever the interface id of @klass is smaller than
1615 * than the value given by max_iid_reg.
1618 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1619 MonoBasicBlock *false_target)
1621 if (cfg->compile_aot) {
1622 int iid_reg = alloc_preg (cfg);
1623 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1624 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1629 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1631 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1634 /* Same as above, but obtains max_iid from a vtable */
1636 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1637 MonoBasicBlock *false_target)
1639 int max_iid_reg = alloc_preg (cfg);
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1642 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1645 /* Same as above, but obtains max_iid from a klass */
1647 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1648 MonoBasicBlock *false_target)
1650 int max_iid_reg = alloc_preg (cfg);
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1653 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1657 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1659 int idepth_reg = alloc_preg (cfg);
1660 int stypes_reg = alloc_preg (cfg);
1661 int stype = alloc_preg (cfg);
1663 mono_class_setup_supertypes (klass);
1665 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1666 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1673 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1674 } else if (cfg->compile_aot) {
1675 int const_reg = alloc_preg (cfg);
1676 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1677 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1685 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1687 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1691 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1693 int intf_reg = alloc_preg (cfg);
1695 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1696 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1701 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1705 * Variant of the above that takes a register to the class, not the vtable.
1708 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1710 int intf_bit_reg = alloc_preg (cfg);
1712 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1713 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1716 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1718 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1722 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1727 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1728 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1730 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1734 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1736 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1740 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1742 if (cfg->compile_aot) {
1743 int const_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1745 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1753 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1756 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1759 int rank_reg = alloc_preg (cfg);
1760 int eclass_reg = alloc_preg (cfg);
1762 g_assert (!klass_inst);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1765 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1766 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1768 if (klass->cast_class == mono_defaults.object_class) {
1769 int parent_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1771 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1772 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1773 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1774 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1775 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1776 } else if (klass->cast_class == mono_defaults.enum_class) {
1777 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1778 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1779 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1781 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1782 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1785 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1786 /* Check that the object is a vector too */
1787 int bounds_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1790 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1793 int idepth_reg = alloc_preg (cfg);
1794 int stypes_reg = alloc_preg (cfg);
1795 int stype = alloc_preg (cfg);
1797 mono_class_setup_supertypes (klass);
1799 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1802 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1806 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1811 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1813 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1817 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1821 g_assert (val == 0);
1826 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1829 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1832 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1835 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1837 #if SIZEOF_REGISTER == 8
1839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1845 val_reg = alloc_preg (cfg);
1847 if (SIZEOF_REGISTER == 8)
1848 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1850 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1853 /* This could be optimized further if neccesary */
1855 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1862 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1869 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1893 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1900 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1901 g_assert (size < 10000);
1904 /* This could be optimized further if neccesary */
1906 cur_reg = alloc_preg (cfg);
1907 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1908 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1915 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1943 cur_reg = alloc_preg (cfg);
1944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1953 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1957 if (cfg->compile_aot) {
1958 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1959 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1961 ins->sreg2 = c->dreg;
1962 MONO_ADD_INS (cfg->cbb, ins);
1964 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1966 ins->inst_offset = mini_get_tls_offset (tls_key);
1967 MONO_ADD_INS (cfg->cbb, ins);
1974 * Emit IR to push the current LMF onto the LMF stack.
1977 emit_push_lmf (MonoCompile *cfg)
1980 * Emit IR to push the LMF:
1981 * lmf_addr = <lmf_addr from tls>
1982 * lmf->lmf_addr = lmf_addr
1983 * lmf->prev_lmf = *lmf_addr
1986 int lmf_reg, prev_lmf_reg;
1987 MonoInst *ins, *lmf_ins;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load current lmf */
1994 lmf_ins = mono_get_lmf_intrinsic (cfg);
1996 MONO_ADD_INS (cfg->cbb, lmf_ins);
1997 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1998 lmf_reg = ins->dreg;
1999 /* Save previous_lmf */
2000 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2002 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2005 * Store lmf_addr in a variable, so it can be allocated to a global register.
2007 if (!cfg->lmf_addr_var)
2008 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2011 ins = mono_get_jit_tls_intrinsic (cfg);
2013 int jit_tls_dreg = ins->dreg;
2015 MONO_ADD_INS (cfg->cbb, ins);
2016 lmf_reg = alloc_preg (cfg);
2017 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2022 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2024 MONO_ADD_INS (cfg->cbb, lmf_ins);
2027 MonoInst *args [16], *jit_tls_ins, *ins;
2029 /* Inline mono_get_lmf_addr () */
2030 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2032 /* Load mono_jit_tls_id */
2033 if (cfg->compile_aot)
2034 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2036 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2037 /* call pthread_getspecific () */
2038 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2039 /* lmf_addr = &jit_tls->lmf */
2040 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2043 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2047 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2049 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2050 lmf_reg = ins->dreg;
2052 prev_lmf_reg = alloc_preg (cfg);
2053 /* Save previous_lmf */
2054 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2055 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2057 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2064 * Emit IR to pop the current LMF from the LMF stack.
2067 emit_pop_lmf (MonoCompile *cfg)
2069 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2075 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2076 lmf_reg = ins->dreg;
2078 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2079 /* Load previous_lmf */
2080 prev_lmf_reg = alloc_preg (cfg);
2081 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2083 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2086 * Emit IR to pop the LMF:
2087 * *(lmf->lmf_addr) = lmf->prev_lmf
2089 /* This could be called before emit_push_lmf () */
2090 if (!cfg->lmf_addr_var)
2091 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2092 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2094 prev_lmf_reg = alloc_preg (cfg);
2095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2096 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2101 emit_instrumentation_call (MonoCompile *cfg, void *func)
2103 MonoInst *iargs [1];
2106 * Avoid instrumenting inlined methods since it can
2107 * distort profiling results.
2109 if (cfg->method != cfg->current_method)
2112 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2113 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2114 mono_emit_jit_icall (cfg, func, iargs);
2119 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2122 type = mini_get_underlying_type (type);
2123 switch (type->type) {
2124 case MONO_TYPE_VOID:
2125 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2132 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2136 case MONO_TYPE_FNPTR:
2137 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 case MONO_TYPE_CLASS:
2139 case MONO_TYPE_STRING:
2140 case MONO_TYPE_OBJECT:
2141 case MONO_TYPE_SZARRAY:
2142 case MONO_TYPE_ARRAY:
2143 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2146 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2149 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2151 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2153 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2154 case MONO_TYPE_VALUETYPE:
2155 if (type->data.klass->enumtype) {
2156 type = mono_class_enum_basetype (type->data.klass);
2159 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2160 case MONO_TYPE_TYPEDBYREF:
2161 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 case MONO_TYPE_GENERICINST:
2163 type = &type->data.generic_class->container_class->byval_arg;
2166 case MONO_TYPE_MVAR:
2168 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2170 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2176 * target_type_is_incompatible:
2177 * @cfg: MonoCompile context
2179 * Check that the item @arg on the evaluation stack can be stored
2180 * in the target type (can be a local, or field, etc).
2181 * The cfg arg can be used to check if we need verification or just
2184 * Returns: non-0 value if arg can't be stored on a target.
2187 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2189 MonoType *simple_type;
2192 if (target->byref) {
2193 /* FIXME: check that the pointed to types match */
2194 if (arg->type == STACK_MP)
2195 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2196 if (arg->type == STACK_PTR)
2201 simple_type = mini_get_underlying_type (target);
2202 switch (simple_type->type) {
2203 case MONO_TYPE_VOID:
2211 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2215 /* STACK_MP is needed when setting pinned locals */
2216 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2221 case MONO_TYPE_FNPTR:
2223 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2224 * in native int. (#688008).
2226 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2229 case MONO_TYPE_CLASS:
2230 case MONO_TYPE_STRING:
2231 case MONO_TYPE_OBJECT:
2232 case MONO_TYPE_SZARRAY:
2233 case MONO_TYPE_ARRAY:
2234 if (arg->type != STACK_OBJ)
2236 /* FIXME: check type compatibility */
2240 if (arg->type != STACK_I8)
2244 if (arg->type != cfg->r4_stack_type)
2248 if (arg->type != STACK_R8)
2251 case MONO_TYPE_VALUETYPE:
2252 if (arg->type != STACK_VTYPE)
2254 klass = mono_class_from_mono_type (simple_type);
2255 if (klass != arg->klass)
2258 case MONO_TYPE_TYPEDBYREF:
2259 if (arg->type != STACK_VTYPE)
2261 klass = mono_class_from_mono_type (simple_type);
2262 if (klass != arg->klass)
2265 case MONO_TYPE_GENERICINST:
2266 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2267 if (arg->type != STACK_VTYPE)
2269 klass = mono_class_from_mono_type (simple_type);
2270 /* The second cases is needed when doing partial sharing */
2271 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2275 if (arg->type != STACK_OBJ)
2277 /* FIXME: check type compatibility */
2281 case MONO_TYPE_MVAR:
2282 g_assert (cfg->gshared);
2283 if (mini_type_var_is_vt (simple_type)) {
2284 if (arg->type != STACK_VTYPE)
2287 if (arg->type != STACK_OBJ)
2292 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2298 * Prepare arguments for passing to a function call.
2299 * Return a non-zero value if the arguments can't be passed to the given
2301 * The type checks are not yet complete and some conversions may need
2302 * casts on 32 or 64 bit architectures.
2304 * FIXME: implement this using target_type_is_incompatible ()
2307 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2309 MonoType *simple_type;
2313 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2317 for (i = 0; i < sig->param_count; ++i) {
2318 if (sig->params [i]->byref) {
2319 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2323 simple_type = mini_get_underlying_type (sig->params [i]);
2325 switch (simple_type->type) {
2326 case MONO_TYPE_VOID:
2335 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2341 case MONO_TYPE_FNPTR:
2342 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2345 case MONO_TYPE_CLASS:
2346 case MONO_TYPE_STRING:
2347 case MONO_TYPE_OBJECT:
2348 case MONO_TYPE_SZARRAY:
2349 case MONO_TYPE_ARRAY:
2350 if (args [i]->type != STACK_OBJ)
2355 if (args [i]->type != STACK_I8)
2359 if (args [i]->type != cfg->r4_stack_type)
2363 if (args [i]->type != STACK_R8)
2366 case MONO_TYPE_VALUETYPE:
2367 if (simple_type->data.klass->enumtype) {
2368 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2371 if (args [i]->type != STACK_VTYPE)
2374 case MONO_TYPE_TYPEDBYREF:
2375 if (args [i]->type != STACK_VTYPE)
2378 case MONO_TYPE_GENERICINST:
2379 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2382 case MONO_TYPE_MVAR:
2384 if (args [i]->type != STACK_VTYPE)
2388 g_error ("unknown type 0x%02x in check_call_signature",
2396 callvirt_to_call (int opcode)
2399 case OP_CALL_MEMBASE:
2401 case OP_VOIDCALL_MEMBASE:
2403 case OP_FCALL_MEMBASE:
2405 case OP_RCALL_MEMBASE:
2407 case OP_VCALL_MEMBASE:
2409 case OP_LCALL_MEMBASE:
2412 g_assert_not_reached ();
2419 callvirt_to_call_reg (int opcode)
2422 case OP_CALL_MEMBASE:
2424 case OP_VOIDCALL_MEMBASE:
2425 return OP_VOIDCALL_REG;
2426 case OP_FCALL_MEMBASE:
2427 return OP_FCALL_REG;
2428 case OP_RCALL_MEMBASE:
2429 return OP_RCALL_REG;
2430 case OP_VCALL_MEMBASE:
2431 return OP_VCALL_REG;
2432 case OP_LCALL_MEMBASE:
2433 return OP_LCALL_REG;
2435 g_assert_not_reached ();
2441 /* Either METHOD or IMT_ARG needs to be set */
2443 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2447 if (COMPILE_LLVM (cfg)) {
2449 method_reg = alloc_preg (cfg);
2450 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2452 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2453 method_reg = ins->dreg;
2457 call->imt_arg_reg = method_reg;
2459 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2464 method_reg = alloc_preg (cfg);
2465 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2467 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2468 method_reg = ins->dreg;
2471 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2474 static MonoJumpInfo *
2475 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2477 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2481 ji->data.target = target;
2487 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2490 return mono_class_check_context_used (klass);
2496 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2499 return mono_method_check_context_used (method);
2505 * check_method_sharing:
2507 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2510 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2512 gboolean pass_vtable = FALSE;
2513 gboolean pass_mrgctx = FALSE;
2515 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2516 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2517 gboolean sharable = FALSE;
2519 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2523 * Pass vtable iff target method might
2524 * be shared, which means that sharing
2525 * is enabled for its class and its
2526 * context is sharable (and it's not a
2529 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2533 if (mini_method_get_context (cmethod) &&
2534 mini_method_get_context (cmethod)->method_inst) {
2535 g_assert (!pass_vtable);
2537 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2540 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2545 if (out_pass_vtable)
2546 *out_pass_vtable = pass_vtable;
2547 if (out_pass_mrgctx)
2548 *out_pass_mrgctx = pass_mrgctx;
2551 inline static MonoCallInst *
2552 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2553 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2557 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2565 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2567 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2569 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2572 call->signature = sig;
2573 call->rgctx_reg = rgctx;
2574 sig_ret = mini_get_underlying_type (sig->ret);
2576 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2579 if (mini_type_is_vtype (sig_ret)) {
2580 call->vret_var = cfg->vret_addr;
2581 //g_assert_not_reached ();
2583 } else if (mini_type_is_vtype (sig_ret)) {
2584 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2587 temp->backend.is_pinvoke = sig->pinvoke;
2590 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2591 * address of return value to increase optimization opportunities.
2592 * Before vtype decomposition, the dreg of the call ins itself represents the
2593 * fact the call modifies the return value. After decomposition, the call will
2594 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2595 * will be transformed into an LDADDR.
2597 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2598 loada->dreg = alloc_preg (cfg);
2599 loada->inst_p0 = temp;
2600 /* We reference the call too since call->dreg could change during optimization */
2601 loada->inst_p1 = call;
2602 MONO_ADD_INS (cfg->cbb, loada);
2604 call->inst.dreg = temp->dreg;
2606 call->vret_var = loada;
2607 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2608 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2610 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2611 if (COMPILE_SOFT_FLOAT (cfg)) {
2613 * If the call has a float argument, we would need to do an r8->r4 conversion using
2614 * an icall, but that cannot be done during the call sequence since it would clobber
2615 * the call registers + the stack. So we do it before emitting the call.
2617 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2619 MonoInst *in = call->args [i];
2621 if (i >= sig->hasthis)
2622 t = sig->params [i - sig->hasthis];
2624 t = &mono_defaults.int_class->byval_arg;
2625 t = mono_type_get_underlying_type (t);
2627 if (!t->byref && t->type == MONO_TYPE_R4) {
2628 MonoInst *iargs [1];
2632 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2634 /* The result will be in an int vreg */
2635 call->args [i] = conv;
2641 call->need_unbox_trampoline = unbox_trampoline;
2644 if (COMPILE_LLVM (cfg))
2645 mono_llvm_emit_call (cfg, call);
2647 mono_arch_emit_call (cfg, call);
2649 mono_arch_emit_call (cfg, call);
2652 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2653 cfg->flags |= MONO_CFG_HAS_CALLS;
2659 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2661 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2662 cfg->uses_rgctx_reg = TRUE;
2663 call->rgctx_reg = TRUE;
2665 call->rgctx_arg_reg = rgctx_reg;
2669 inline static MonoInst*
2670 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2675 gboolean check_sp = FALSE;
2677 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2678 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2680 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2685 rgctx_reg = mono_alloc_preg (cfg);
2686 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2690 if (!cfg->stack_inbalance_var)
2691 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2693 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2694 ins->dreg = cfg->stack_inbalance_var->dreg;
2695 MONO_ADD_INS (cfg->cbb, ins);
2698 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2700 call->inst.sreg1 = addr->dreg;
2703 emit_imt_argument (cfg, call, NULL, imt_arg);
2705 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2710 sp_reg = mono_alloc_preg (cfg);
2712 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2714 MONO_ADD_INS (cfg->cbb, ins);
2716 /* Restore the stack so we don't crash when throwing the exception */
2717 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2718 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2719 MONO_ADD_INS (cfg->cbb, ins);
2721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2722 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2726 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2728 return (MonoInst*)call;
2732 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2735 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2737 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2740 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2741 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2743 #ifndef DISABLE_REMOTING
2744 gboolean might_be_remote = FALSE;
2746 gboolean virtual = this_ins != NULL;
2747 gboolean enable_for_aot = TRUE;
2750 MonoInst *call_target = NULL;
2752 gboolean need_unbox_trampoline;
2755 sig = mono_method_signature (method);
2757 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2758 MonoInst *icall_args [16];
2761 // FIXME: Optimize this
2763 guint32 imt_slot = mono_method_get_imt_slot (method);
2765 icall_args [0] = this_ins;
2766 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2768 icall_args [2] = imt_arg;
2770 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2771 icall_args [2] = ins;
2773 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2775 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2779 rgctx_reg = mono_alloc_preg (cfg);
2780 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2783 if (method->string_ctor) {
2784 /* Create the real signature */
2785 /* FIXME: Cache these */
2786 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2787 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2792 context_used = mini_method_check_context_used (cfg, method);
2794 #ifndef DISABLE_REMOTING
2795 might_be_remote = this_ins && sig->hasthis &&
2796 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2797 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2799 if (might_be_remote && context_used) {
2802 g_assert (cfg->gshared);
2804 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2806 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2810 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2811 // FIXME: Vcall optimizations below
2812 MonoInst *icall_args [16];
2815 if (sig->generic_param_count) {
2817 * Generic virtual call, pass the concrete method as the imt argument.
2819 imt_arg = emit_get_rgctx_method (cfg, context_used,
2820 method, MONO_RGCTX_INFO_METHOD);
2823 // FIXME: Optimize this
2825 int slot = mono_method_get_vtable_index (method);
2827 icall_args [0] = this_ins;
2828 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2830 icall_args [2] = imt_arg;
2832 EMIT_NEW_PCONST (cfg, ins, NULL);
2833 icall_args [2] = ins;
2835 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2838 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2840 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2842 #ifndef DISABLE_REMOTING
2843 if (might_be_remote)
2844 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2847 call->method = method;
2848 call->inst.flags |= MONO_INST_HAS_METHOD;
2849 call->inst.inst_left = this_ins;
2850 call->tail_call = tail;
2853 int vtable_reg, slot_reg, this_reg;
2856 this_reg = this_ins->dreg;
2858 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2859 MonoInst *dummy_use;
2861 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2863 /* Make a call to delegate->invoke_impl */
2864 call->inst.inst_basereg = this_reg;
2865 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2866 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2868 /* We must emit a dummy use here because the delegate trampoline will
2869 replace the 'this' argument with the delegate target making this activation
2870 no longer a root for the delegate.
2871 This is an issue for delegates that target collectible code such as dynamic
2872 methods of GC'able assemblies.
2874 For a test case look into #667921.
2876 FIXME: a dummy use is not the best way to do it as the local register allocator
2877 will put it on a caller save register and spil it around the call.
2878 Ideally, we would either put it on a callee save register or only do the store part.
2880 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2882 return (MonoInst*)call;
2885 if ((!cfg->compile_aot || enable_for_aot) &&
2886 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2887 (MONO_METHOD_IS_FINAL (method) &&
2888 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2889 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2891 * the method is not virtual, we just need to ensure this is not null
2892 * and then we can call the method directly.
2894 #ifndef DISABLE_REMOTING
2895 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2897 * The check above ensures method is not gshared, this is needed since
2898 * gshared methods can't have wrappers.
2900 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2904 if (!method->string_ctor)
2905 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2907 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2908 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2910 * the method is virtual, but we can statically dispatch since either
2911 * it's class or the method itself are sealed.
2912 * But first we need to ensure it's not a null reference.
2914 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2916 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2917 } else if (call_target) {
2918 vtable_reg = alloc_preg (cfg);
2919 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2921 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2922 call->inst.sreg1 = call_target->dreg;
2923 call->inst.flags &= !MONO_INST_HAS_METHOD;
2925 vtable_reg = alloc_preg (cfg);
2926 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2927 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2928 guint32 imt_slot = mono_method_get_imt_slot (method);
2929 emit_imt_argument (cfg, call, call->method, imt_arg);
2930 slot_reg = vtable_reg;
2931 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2933 slot_reg = vtable_reg;
2934 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2935 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2937 g_assert (mono_method_signature (method)->generic_param_count);
2938 emit_imt_argument (cfg, call, call->method, imt_arg);
2942 call->inst.sreg1 = slot_reg;
2943 call->inst.inst_offset = offset;
2944 call->is_virtual = TRUE;
2948 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2951 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2953 return (MonoInst*)call;
2957 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2959 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2963 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2970 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2973 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2975 return (MonoInst*)call;
2979 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2981 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2985 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2989 * mono_emit_abs_call:
2991 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2993 inline static MonoInst*
2994 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2995 MonoMethodSignature *sig, MonoInst **args)
2997 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
3001 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3004 if (cfg->abs_patches == NULL)
3005 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3006 g_hash_table_insert (cfg->abs_patches, ji, ji);
3007 ins = mono_emit_native_call (cfg, ji, sig, args);
3008 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3013 direct_icalls_enabled (MonoCompile *cfg)
3015 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3017 if (cfg->compile_llvm)
3020 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3026 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3029 * Call the jit icall without a wrapper if possible.
3030 * The wrapper is needed for the following reasons:
3031 * - to handle exceptions thrown using mono_raise_exceptions () from the
3032 * icall function. The EH code needs the lmf frame pushed by the
3033 * wrapper to be able to unwind back to managed code.
3034 * - to be able to do stack walks for asynchronously suspended
3035 * threads when debugging.
3037 if (info->no_raise && direct_icalls_enabled (cfg)) {
3041 if (!info->wrapper_method) {
3042 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3043 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3045 mono_memory_barrier ();
3049 * Inline the wrapper method, which is basically a call to the C icall, and
3050 * an exception check.
3052 costs = inline_method (cfg, info->wrapper_method, NULL,
3053 args, NULL, cfg->real_offset, TRUE);
3054 g_assert (costs > 0);
3055 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3059 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3064 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3066 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3067 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3071 * Native code might return non register sized integers
3072 * without initializing the upper bits.
3074 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3075 case OP_LOADI1_MEMBASE:
3076 widen_op = OP_ICONV_TO_I1;
3078 case OP_LOADU1_MEMBASE:
3079 widen_op = OP_ICONV_TO_U1;
3081 case OP_LOADI2_MEMBASE:
3082 widen_op = OP_ICONV_TO_I2;
3084 case OP_LOADU2_MEMBASE:
3085 widen_op = OP_ICONV_TO_U2;
3091 if (widen_op != -1) {
3092 int dreg = alloc_preg (cfg);
3095 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3096 widen->type = ins->type;
3106 get_memcpy_method (void)
3108 static MonoMethod *memcpy_method = NULL;
3109 if (!memcpy_method) {
3110 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3112 g_error ("Old corlib found. Install a new one");
3114 return memcpy_method;
3118 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3120 MonoClassField *field;
3121 gpointer iter = NULL;
3123 while ((field = mono_class_get_fields (klass, &iter))) {
3126 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3128 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3129 if (mini_type_is_reference (mono_field_get_type (field))) {
3130 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3131 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3133 MonoClass *field_class = mono_class_from_mono_type (field->type);
3134 if (field_class->has_references)
3135 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3141 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3143 int card_table_shift_bits;
3144 gpointer card_table_mask;
3146 MonoInst *dummy_use;
3147 int nursery_shift_bits;
3148 size_t nursery_size;
3150 if (!cfg->gen_write_barriers)
3153 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3155 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3157 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3160 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3161 wbarrier->sreg1 = ptr->dreg;
3162 wbarrier->sreg2 = value->dreg;
3163 MONO_ADD_INS (cfg->cbb, wbarrier);
3164 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3165 int offset_reg = alloc_preg (cfg);
3169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3170 if (card_table_mask)
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3173 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3174 * IMM's larger than 32bits.
3176 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3177 card_reg = ins->dreg;
3179 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3180 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3182 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3183 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3186 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3190 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3192 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3193 unsigned need_wb = 0;
3198 /*types with references can't have alignment smaller than sizeof(void*) */
3199 if (align < SIZEOF_VOID_P)
3202 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3203 if (size > 32 * SIZEOF_VOID_P)
3206 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3208 /* We don't unroll more than 5 stores to avoid code bloat. */
3209 if (size > 5 * SIZEOF_VOID_P) {
3210 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3211 size += (SIZEOF_VOID_P - 1);
3212 size &= ~(SIZEOF_VOID_P - 1);
3214 EMIT_NEW_ICONST (cfg, iargs [2], size);
3215 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3216 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3220 destreg = iargs [0]->dreg;
3221 srcreg = iargs [1]->dreg;
3224 dest_ptr_reg = alloc_preg (cfg);
3225 tmp_reg = alloc_preg (cfg);
3228 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3230 while (size >= SIZEOF_VOID_P) {
3231 MonoInst *load_inst;
3232 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3233 load_inst->dreg = tmp_reg;
3234 load_inst->inst_basereg = srcreg;
3235 load_inst->inst_offset = offset;
3236 MONO_ADD_INS (cfg->cbb, load_inst);
3238 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3241 emit_write_barrier (cfg, iargs [0], load_inst);
3243 offset += SIZEOF_VOID_P;
3244 size -= SIZEOF_VOID_P;
3247 /*tmp += sizeof (void*)*/
3248 if (size >= SIZEOF_VOID_P) {
3249 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3250 MONO_ADD_INS (cfg->cbb, iargs [0]);
3254 /* Those cannot be references since size < sizeof (void*) */
3256 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3257 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3264 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3270 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3280 * Emit code to copy a valuetype of type @klass whose address is stored in
3281 * @src->dreg to memory whose address is stored at @dest->dreg.
3284 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3286 MonoInst *iargs [4];
3289 MonoMethod *memcpy_method;
3290 MonoInst *size_ins = NULL;
3291 MonoInst *memcpy_ins = NULL;
3295 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3298 * This check breaks with spilled vars... need to handle it during verification anyway.
3299 * g_assert (klass && klass == src->klass && klass == dest->klass);
3302 if (mini_is_gsharedvt_klass (klass)) {
3304 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3305 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3309 n = mono_class_native_size (klass, &align);
3311 n = mono_class_value_size (klass, &align);
3313 /* if native is true there should be no references in the struct */
3314 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3315 /* Avoid barriers when storing to the stack */
3316 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3317 (dest->opcode == OP_LDADDR))) {
3323 context_used = mini_class_check_context_used (cfg, klass);
3325 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3326 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3328 } else if (context_used) {
3329 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3331 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3332 if (!cfg->compile_aot)
3333 mono_class_compute_gc_descriptor (klass);
3337 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3339 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3344 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3345 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3346 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3351 iargs [2] = size_ins;
3353 EMIT_NEW_ICONST (cfg, iargs [2], n);
3355 memcpy_method = get_memcpy_method ();
3357 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3359 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3364 get_memset_method (void)
3366 static MonoMethod *memset_method = NULL;
3367 if (!memset_method) {
3368 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3370 g_error ("Old corlib found. Install a new one");
3372 return memset_method;
3376 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3378 MonoInst *iargs [3];
3381 MonoMethod *memset_method;
3382 MonoInst *size_ins = NULL;
3383 MonoInst *bzero_ins = NULL;
3384 static MonoMethod *bzero_method;
3386 /* FIXME: Optimize this for the case when dest is an LDADDR */
3387 mono_class_init (klass);
3388 if (mini_is_gsharedvt_klass (klass)) {
3389 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3390 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3392 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3393 g_assert (bzero_method);
3395 iargs [1] = size_ins;
3396 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3400 n = mono_class_value_size (klass, &align);
3402 if (n <= sizeof (gpointer) * 8) {
3403 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3406 memset_method = get_memset_method ();
3408 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3409 EMIT_NEW_ICONST (cfg, iargs [2], n);
3410 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3417 * Emit IR to return either the this pointer for instance method,
3418 * or the mrgctx for static methods.
3421 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3423 MonoInst *this_ins = NULL;
3425 g_assert (cfg->gshared);
3427 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3428 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3429 !method->klass->valuetype)
3430 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3432 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3433 MonoInst *mrgctx_loc, *mrgctx_var;
3435 g_assert (!this_ins);
3436 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3438 mrgctx_loc = mono_get_vtable_var (cfg);
3439 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3442 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3443 MonoInst *vtable_loc, *vtable_var;
3445 g_assert (!this_ins);
3447 vtable_loc = mono_get_vtable_var (cfg);
3448 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3450 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3451 MonoInst *mrgctx_var = vtable_var;
3454 vtable_reg = alloc_preg (cfg);
3455 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3456 vtable_var->type = STACK_PTR;
3464 vtable_reg = alloc_preg (cfg);
3465 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3470 static MonoJumpInfoRgctxEntry *
3471 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3473 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3474 res->method = method;
3475 res->in_mrgctx = in_mrgctx;
3476 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3477 res->data->type = patch_type;
3478 res->data->data.target = patch_data;
3479 res->info_type = info_type;
3484 static inline MonoInst*
3485 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3487 MonoInst *args [16];
3490 // FIXME: No fastpath since the slot is not a compile time constant
3492 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3493 if (entry->in_mrgctx)
3494 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3496 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3500 * FIXME: This can be called during decompose, which is a problem since it creates
3502 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3504 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3506 MonoBasicBlock *is_null_bb, *end_bb;
3507 MonoInst *res, *ins, *call;
3510 slot = mini_get_rgctx_entry_slot (entry);
3512 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3513 index = MONO_RGCTX_SLOT_INDEX (slot);
3515 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3516 for (depth = 0; ; ++depth) {
3517 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3519 if (index < size - 1)
3524 NEW_BBLOCK (cfg, end_bb);
3525 NEW_BBLOCK (cfg, is_null_bb);
3528 rgctx_reg = rgctx->dreg;
3530 rgctx_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3533 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3534 NEW_BBLOCK (cfg, is_null_bb);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3540 for (i = 0; i < depth; ++i) {
3541 int array_reg = alloc_preg (cfg);
3543 /* load ptr to next array */
3544 if (mrgctx && i == 0)
3545 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3548 rgctx_reg = array_reg;
3549 /* is the ptr null? */
3550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3551 /* if yes, jump to actual trampoline */
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3556 val_reg = alloc_preg (cfg);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3558 /* is the slot null? */
3559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3560 /* if yes, jump to actual trampoline */
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3564 res_reg = alloc_preg (cfg);
3565 MONO_INST_NEW (cfg, ins, OP_MOVE);
3566 ins->dreg = res_reg;
3567 ins->sreg1 = val_reg;
3568 MONO_ADD_INS (cfg->cbb, ins);
3570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3573 MONO_START_BB (cfg, is_null_bb);
3575 EMIT_NEW_ICONST (cfg, args [1], index);
3577 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3579 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3580 MONO_INST_NEW (cfg, ins, OP_MOVE);
3581 ins->dreg = res_reg;
3582 ins->sreg1 = call->dreg;
3583 MONO_ADD_INS (cfg->cbb, ins);
3584 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3586 MONO_START_BB (cfg, end_bb);
3595 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3598 static inline MonoInst*
3599 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3602 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3604 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3608 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3609 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3611 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3612 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3614 return emit_rgctx_fetch (cfg, rgctx, entry);
3618 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3619 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3621 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3622 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3624 return emit_rgctx_fetch (cfg, rgctx, entry);
3628 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3629 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3631 MonoJumpInfoGSharedVtCall *call_info;
3632 MonoJumpInfoRgctxEntry *entry;
3635 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3636 call_info->sig = sig;
3637 call_info->method = cmethod;
3639 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3640 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3642 return emit_rgctx_fetch (cfg, rgctx, entry);
3646 * emit_get_rgctx_virt_method:
3648 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3651 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3652 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3654 MonoJumpInfoVirtMethod *info;
3655 MonoJumpInfoRgctxEntry *entry;
3658 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3659 info->klass = klass;
3660 info->method = virt_method;
3662 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3663 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3665 return emit_rgctx_fetch (cfg, rgctx, entry);
3669 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3670 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3672 MonoJumpInfoRgctxEntry *entry;
3675 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3676 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3678 return emit_rgctx_fetch (cfg, rgctx, entry);
3682 * emit_get_rgctx_method:
3684 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3685 * normal constants, else emit a load from the rgctx.
3688 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3689 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3691 if (!context_used) {
3694 switch (rgctx_type) {
3695 case MONO_RGCTX_INFO_METHOD:
3696 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3698 case MONO_RGCTX_INFO_METHOD_RGCTX:
3699 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3702 g_assert_not_reached ();
3705 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3706 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3708 return emit_rgctx_fetch (cfg, rgctx, entry);
3713 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3714 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3716 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3717 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3719 return emit_rgctx_fetch (cfg, rgctx, entry);
3723 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3725 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3726 MonoRuntimeGenericContextInfoTemplate *template;
3731 for (i = 0; i < info->num_entries; ++i) {
3732 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3734 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3738 if (info->num_entries == info->count_entries) {
3739 MonoRuntimeGenericContextInfoTemplate *new_entries;
3740 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3742 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3744 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3745 info->entries = new_entries;
3746 info->count_entries = new_count_entries;
3749 idx = info->num_entries;
3750 template = &info->entries [idx];
3751 template->info_type = rgctx_type;
3752 template->data = data;
3754 info->num_entries ++;
3760 * emit_get_gsharedvt_info:
3762 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3765 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3770 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3771 /* Load info->entries [idx] */
3772 dreg = alloc_preg (cfg);
3773 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3779 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3781 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3785 * On return the caller must check @klass for load errors.
3788 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3790 MonoInst *vtable_arg;
3793 context_used = mini_class_check_context_used (cfg, klass);
3796 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3797 klass, MONO_RGCTX_INFO_VTABLE);
3799 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3803 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3806 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3810 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3811 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3813 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3814 ins->sreg1 = vtable_arg->dreg;
3815 MONO_ADD_INS (cfg->cbb, ins);
3817 static int byte_offset = -1;
3818 static guint8 bitmask;
3819 int bits_reg, inited_reg;
3820 MonoBasicBlock *inited_bb;
3821 MonoInst *args [16];
3823 if (byte_offset < 0)
3824 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3826 bits_reg = alloc_ireg (cfg);
3827 inited_reg = alloc_ireg (cfg);
3829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3832 NEW_BBLOCK (cfg, inited_bb);
3834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3835 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3837 args [0] = vtable_arg;
3838 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3840 MONO_START_BB (cfg, inited_bb);
3845 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3849 if (cfg->gen_seq_points && cfg->method == method) {
3850 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3852 ins->flags |= MONO_INST_NONEMPTY_STACK;
3853 MONO_ADD_INS (cfg->cbb, ins);
3858 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3860 if (mini_get_debug_options ()->better_cast_details) {
3861 int vtable_reg = alloc_preg (cfg);
3862 int klass_reg = alloc_preg (cfg);
3863 MonoBasicBlock *is_null_bb = NULL;
3865 int to_klass_reg, context_used;
3868 NEW_BBLOCK (cfg, is_null_bb);
3870 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3871 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3874 tls_get = mono_get_jit_tls_intrinsic (cfg);
3876 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3880 MONO_ADD_INS (cfg->cbb, tls_get);
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3882 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3884 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3886 context_used = mini_class_check_context_used (cfg, klass);
3888 MonoInst *class_ins;
3890 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3891 to_klass_reg = class_ins->dreg;
3893 to_klass_reg = alloc_preg (cfg);
3894 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3899 MONO_START_BB (cfg, is_null_bb);
3904 reset_cast_details (MonoCompile *cfg)
3906 /* Reset the variables holding the cast details */
3907 if (mini_get_debug_options ()->better_cast_details) {
3908 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3910 MONO_ADD_INS (cfg->cbb, tls_get);
3911 /* It is enough to reset the from field */
3912 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3917 * On return the caller must check @array_class for load errors
3920 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3922 int vtable_reg = alloc_preg (cfg);
3925 context_used = mini_class_check_context_used (cfg, array_class);
3927 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3929 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3931 if (cfg->opt & MONO_OPT_SHARED) {
3932 int class_reg = alloc_preg (cfg);
3935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3936 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3938 } else if (context_used) {
3939 MonoInst *vtable_ins;
3941 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3944 if (cfg->compile_aot) {
3948 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3950 vt_reg = alloc_preg (cfg);
3951 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3952 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3955 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3957 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3961 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3963 reset_cast_details (cfg);
3967 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3968 * generic code is generated.
3971 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3973 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3976 MonoInst *rgctx, *addr;
3978 /* FIXME: What if the class is shared? We might not
3979 have to get the address of the method from the
3981 addr = emit_get_rgctx_method (cfg, context_used, method,
3982 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3984 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3986 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3988 gboolean pass_vtable, pass_mrgctx;
3989 MonoInst *rgctx_arg = NULL;
3991 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3992 g_assert (!pass_mrgctx);
3995 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3998 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4001 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4006 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4010 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4011 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4012 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4013 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4015 obj_reg = sp [0]->dreg;
4016 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4017 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4019 /* FIXME: generics */
4020 g_assert (klass->rank == 0);
4023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4024 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4026 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4030 MonoInst *element_class;
4032 /* This assertion is from the unboxcast insn */
4033 g_assert (klass->rank == 0);
4035 element_class = emit_get_rgctx_klass (cfg, context_used,
4036 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4038 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4039 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4041 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4042 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4043 reset_cast_details (cfg);
4046 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4047 MONO_ADD_INS (cfg->cbb, add);
4048 add->type = STACK_MP;
4055 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4057 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4058 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4062 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4068 args [1] = klass_inst;
4071 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4073 NEW_BBLOCK (cfg, is_ref_bb);
4074 NEW_BBLOCK (cfg, is_nullable_bb);
4075 NEW_BBLOCK (cfg, end_bb);
4076 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4078 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4081 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4083 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4084 addr_reg = alloc_dreg (cfg, STACK_MP);
4088 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4089 MONO_ADD_INS (cfg->cbb, addr);
4091 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4094 MONO_START_BB (cfg, is_ref_bb);
4096 /* Save the ref to a temporary */
4097 dreg = alloc_ireg (cfg);
4098 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4099 addr->dreg = addr_reg;
4100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4101 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4104 MONO_START_BB (cfg, is_nullable_bb);
4107 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4108 MonoInst *unbox_call;
4109 MonoMethodSignature *unbox_sig;
4111 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4112 unbox_sig->ret = &klass->byval_arg;
4113 unbox_sig->param_count = 1;
4114 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4115 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4117 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4118 addr->dreg = addr_reg;
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4124 MONO_START_BB (cfg, end_bb);
4127 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4133 * Returns NULL and set the cfg exception on error.
4136 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4138 MonoInst *iargs [2];
4144 MonoInst *iargs [2];
4145 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4147 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4149 if (cfg->opt & MONO_OPT_SHARED)
4150 rgctx_info = MONO_RGCTX_INFO_KLASS;
4152 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4153 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4155 if (cfg->opt & MONO_OPT_SHARED) {
4156 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4158 alloc_ftn = mono_object_new;
4161 alloc_ftn = mono_object_new_specific;
4164 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4165 if (known_instance_size) {
4166 int size = mono_class_instance_size (klass);
4167 if (size < sizeof (MonoObject))
4168 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4170 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4172 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4175 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4178 if (cfg->opt & MONO_OPT_SHARED) {
4179 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4180 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4182 alloc_ftn = mono_object_new;
4183 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4184 /* This happens often in argument checking code, eg. throw new FooException... */
4185 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4186 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4187 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4189 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4190 MonoMethod *managed_alloc = NULL;
4194 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4195 cfg->exception_ptr = klass;
4199 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4201 if (managed_alloc) {
4202 int size = mono_class_instance_size (klass);
4203 if (size < sizeof (MonoObject))
4204 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4206 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4207 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4208 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4210 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4212 guint32 lw = vtable->klass->instance_size;
4213 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4214 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4215 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4218 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4222 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4226 * Returns NULL and set the cfg exception on error.
4229 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4231 MonoInst *alloc, *ins;
4233 if (mono_class_is_nullable (klass)) {
4234 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4237 /* FIXME: What if the class is shared? We might not
4238 have to get the method address from the RGCTX. */
4239 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4240 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4241 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4243 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4245 gboolean pass_vtable, pass_mrgctx;
4246 MonoInst *rgctx_arg = NULL;
4248 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4249 g_assert (!pass_mrgctx);
4252 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4255 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4258 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4262 if (mini_is_gsharedvt_klass (klass)) {
4263 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4264 MonoInst *res, *is_ref, *src_var, *addr;
4267 dreg = alloc_ireg (cfg);
4269 NEW_BBLOCK (cfg, is_ref_bb);
4270 NEW_BBLOCK (cfg, is_nullable_bb);
4271 NEW_BBLOCK (cfg, end_bb);
4272 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4280 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4283 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4284 ins->opcode = OP_STOREV_MEMBASE;
4286 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4287 res->type = STACK_OBJ;
4289 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4292 MONO_START_BB (cfg, is_ref_bb);
4294 /* val is a vtype, so has to load the value manually */
4295 src_var = get_vreg_to_inst (cfg, val->dreg);
4297 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4298 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4299 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4303 MONO_START_BB (cfg, is_nullable_bb);
4306 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4307 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4309 MonoMethodSignature *box_sig;
4312 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4313 * construct that method at JIT time, so have to do things by hand.
4315 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4316 box_sig->ret = &mono_defaults.object_class->byval_arg;
4317 box_sig->param_count = 1;
4318 box_sig->params [0] = &klass->byval_arg;
4319 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4320 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4321 res->type = STACK_OBJ;
4325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4327 MONO_START_BB (cfg, end_bb);
4331 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4335 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4341 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4344 MonoGenericContainer *container;
4345 MonoGenericInst *ginst;
4347 if (klass->generic_class) {
4348 container = klass->generic_class->container_class->generic_container;
4349 ginst = klass->generic_class->context.class_inst;
4350 } else if (klass->generic_container && context_used) {
4351 container = klass->generic_container;
4352 ginst = container->context.class_inst;
4357 for (i = 0; i < container->type_argc; ++i) {
4359 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4361 type = ginst->type_argv [i];
4362 if (mini_type_is_reference (type))
4368 static GHashTable* direct_icall_type_hash;
4371 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4373 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4374 if (!direct_icalls_enabled (cfg))
4378 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4379 * Whitelist a few icalls for now.
4381 if (!direct_icall_type_hash) {
4382 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4384 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4385 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4386 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4387 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4388 mono_memory_barrier ();
4389 direct_icall_type_hash = h;
4392 if (cmethod->klass == mono_defaults.math_class)
4394 /* No locking needed */
4395 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4400 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4403 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4405 MonoMethod *mono_castclass;
4408 mono_castclass = mono_marshal_get_castclass_with_cache ();
4410 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4411 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4412 reset_cast_details (cfg);
4418 get_castclass_cache_idx (MonoCompile *cfg)
4420 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4421 cfg->castclass_cache_index ++;
4422 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4426 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4435 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4438 idx = get_castclass_cache_idx (cfg);
4439 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4441 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4442 return emit_castclass_with_cache (cfg, klass, args);
4446 * Returns NULL and set the cfg exception on error.
4449 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4451 MonoBasicBlock *is_null_bb;
4452 int obj_reg = src->dreg;
4453 int vtable_reg = alloc_preg (cfg);
4455 MonoInst *klass_inst = NULL, *res;
4457 context_used = mini_class_check_context_used (cfg, klass);
4459 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4460 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4461 (*inline_costs) += 2;
4463 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4464 MonoMethod *mono_castclass;
4465 MonoInst *iargs [1];
4468 mono_castclass = mono_marshal_get_castclass (klass);
4471 save_cast_details (cfg, klass, src->dreg, TRUE);
4472 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4473 iargs, ip, cfg->real_offset, TRUE);
4474 reset_cast_details (cfg);
4475 CHECK_CFG_EXCEPTION;
4476 g_assert (costs > 0);
4478 cfg->real_offset += 5;
4480 (*inline_costs) += costs;
4488 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4489 MonoInst *cache_ins;
4491 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4496 /* klass - it's the second element of the cache entry*/
4497 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4500 args [2] = cache_ins;
4502 return emit_castclass_with_cache (cfg, klass, args);
4505 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4508 NEW_BBLOCK (cfg, is_null_bb);
4510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4513 save_cast_details (cfg, klass, obj_reg, FALSE);
4515 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4517 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4519 int klass_reg = alloc_preg (cfg);
4521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4523 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4524 /* the remoting code is broken, access the class for now */
4525 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4526 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4528 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4529 cfg->exception_ptr = klass;
4532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4537 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4540 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4544 MONO_START_BB (cfg, is_null_bb);
4546 reset_cast_details (cfg);
4555 * Returns NULL and set the cfg exception on error.
4558 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4561 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4562 int obj_reg = src->dreg;
4563 int vtable_reg = alloc_preg (cfg);
4564 int res_reg = alloc_ireg_ref (cfg);
4565 MonoInst *klass_inst = NULL;
4570 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4571 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4572 MonoInst *cache_ins;
4574 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4579 /* klass - it's the second element of the cache entry*/
4580 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4583 args [2] = cache_ins;
4585 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4588 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4591 NEW_BBLOCK (cfg, is_null_bb);
4592 NEW_BBLOCK (cfg, false_bb);
4593 NEW_BBLOCK (cfg, end_bb);
4595 /* Do the assignment at the beginning, so the other assignment can be if converted */
4596 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4597 ins->type = STACK_OBJ;
4600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4605 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4606 g_assert (!context_used);
4607 /* the is_null_bb target simply copies the input register to the output */
4608 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4610 int klass_reg = alloc_preg (cfg);
4613 int rank_reg = alloc_preg (cfg);
4614 int eclass_reg = alloc_preg (cfg);
4616 g_assert (!context_used);
4617 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4622 if (klass->cast_class == mono_defaults.object_class) {
4623 int parent_reg = alloc_preg (cfg);
4624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4625 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4626 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4627 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4628 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4629 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4630 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4632 } else if (klass->cast_class == mono_defaults.enum_class) {
4633 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4634 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4635 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4636 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4638 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4639 /* Check that the object is a vector too */
4640 int bounds_reg = alloc_preg (cfg);
4641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4646 /* the is_null_bb target simply copies the input register to the output */
4647 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4649 } else if (mono_class_is_nullable (klass)) {
4650 g_assert (!context_used);
4651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4652 /* the is_null_bb target simply copies the input register to the output */
4653 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4655 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4656 g_assert (!context_used);
4657 /* the remoting code is broken, access the class for now */
4658 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4659 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4661 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4662 cfg->exception_ptr = klass;
4665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4674 /* the is_null_bb target simply copies the input register to the output */
4675 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4680 MONO_START_BB (cfg, false_bb);
4682 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4685 MONO_START_BB (cfg, is_null_bb);
4687 MONO_START_BB (cfg, end_bb);
4693 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4695 /* This opcode takes as input an object reference and a class, and returns:
4696 0) if the object is an instance of the class,
4697 1) if the object is not instance of the class,
4698 2) if the object is a proxy whose type cannot be determined */
4701 #ifndef DISABLE_REMOTING
4702 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4704 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4706 int obj_reg = src->dreg;
4707 int dreg = alloc_ireg (cfg);
4709 #ifndef DISABLE_REMOTING
4710 int klass_reg = alloc_preg (cfg);
4713 NEW_BBLOCK (cfg, true_bb);
4714 NEW_BBLOCK (cfg, false_bb);
4715 NEW_BBLOCK (cfg, end_bb);
4716 #ifndef DISABLE_REMOTING
4717 NEW_BBLOCK (cfg, false2_bb);
4718 NEW_BBLOCK (cfg, no_proxy_bb);
4721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4724 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4725 #ifndef DISABLE_REMOTING
4726 NEW_BBLOCK (cfg, interface_fail_bb);
4729 tmp_reg = alloc_preg (cfg);
4730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4731 #ifndef DISABLE_REMOTING
4732 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4733 MONO_START_BB (cfg, interface_fail_bb);
4734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4736 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4738 tmp_reg = alloc_preg (cfg);
4739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4741 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4743 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4746 #ifndef DISABLE_REMOTING
4747 tmp_reg = alloc_preg (cfg);
4748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4751 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4752 tmp_reg = alloc_preg (cfg);
4753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4756 tmp_reg = alloc_preg (cfg);
4757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4761 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4764 MONO_START_BB (cfg, no_proxy_bb);
4766 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4768 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4772 MONO_START_BB (cfg, false_bb);
4774 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4777 #ifndef DISABLE_REMOTING
4778 MONO_START_BB (cfg, false2_bb);
4780 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4784 MONO_START_BB (cfg, true_bb);
4786 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4788 MONO_START_BB (cfg, end_bb);
4791 MONO_INST_NEW (cfg, ins, OP_ICONST);
4793 ins->type = STACK_I4;
4799 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4801 /* This opcode takes as input an object reference and a class, and returns:
4802 0) if the object is an instance of the class,
4803 1) if the object is a proxy whose type cannot be determined
4804 an InvalidCastException exception is thrown otherwhise*/
4807 #ifndef DISABLE_REMOTING
4808 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4810 MonoBasicBlock *ok_result_bb;
4812 int obj_reg = src->dreg;
4813 int dreg = alloc_ireg (cfg);
4814 int tmp_reg = alloc_preg (cfg);
4816 #ifndef DISABLE_REMOTING
4817 int klass_reg = alloc_preg (cfg);
4818 NEW_BBLOCK (cfg, end_bb);
4821 NEW_BBLOCK (cfg, ok_result_bb);
4823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4824 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4826 save_cast_details (cfg, klass, obj_reg, FALSE);
4828 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4829 #ifndef DISABLE_REMOTING
4830 NEW_BBLOCK (cfg, interface_fail_bb);
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4833 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4834 MONO_START_BB (cfg, interface_fail_bb);
4835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4837 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4839 tmp_reg = alloc_preg (cfg);
4840 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4842 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4844 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4847 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4848 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4852 #ifndef DISABLE_REMOTING
4853 NEW_BBLOCK (cfg, no_proxy_bb);
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4857 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4859 tmp_reg = alloc_preg (cfg);
4860 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4863 tmp_reg = alloc_preg (cfg);
4864 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4868 NEW_BBLOCK (cfg, fail_1_bb);
4870 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4872 MONO_START_BB (cfg, fail_1_bb);
4874 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4875 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4877 MONO_START_BB (cfg, no_proxy_bb);
4879 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4881 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4885 MONO_START_BB (cfg, ok_result_bb);
4887 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4889 #ifndef DISABLE_REMOTING
4890 MONO_START_BB (cfg, end_bb);
4894 MONO_INST_NEW (cfg, ins, OP_ICONST);
4896 ins->type = STACK_I4;
4901 static G_GNUC_UNUSED MonoInst*
4902 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4904 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4905 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4908 switch (enum_type->type) {
4911 #if SIZEOF_REGISTER == 8
4923 MonoInst *load, *and, *cmp, *ceq;
4924 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4925 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4926 int dest_reg = alloc_ireg (cfg);
4928 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4929 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4930 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4931 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4933 ceq->type = STACK_I4;
4936 load = mono_decompose_opcode (cfg, load);
4937 and = mono_decompose_opcode (cfg, and);
4938 cmp = mono_decompose_opcode (cfg, cmp);
4939 ceq = mono_decompose_opcode (cfg, ceq);
4947 * Returns NULL and set the cfg exception on error.
4949 static G_GNUC_UNUSED MonoInst*
4950 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4954 gpointer trampoline;
4955 MonoInst *obj, *method_ins, *tramp_ins;
4959 if (virtual && !cfg->llvm_only) {
4960 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4963 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4967 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4971 if (cfg->llvm_only) {
4972 MonoInst *args [16];
4975 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4976 * the address of a gshared method. So use a JIT icall.
4977 * FIXME: Optimize this.
4981 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4982 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4987 /* Inline the contents of mono_delegate_ctor */
4989 /* Set target field */
4990 /* Optimize away setting of NULL target */
4991 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4993 if (cfg->gen_write_barriers) {
4994 dreg = alloc_preg (cfg);
4995 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4996 emit_write_barrier (cfg, ptr, target);
5000 /* Set method field */
5001 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5002 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5005 * To avoid looking up the compiled code belonging to the target method
5006 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5007 * store it, and we fill it after the method has been compiled.
5009 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5010 MonoInst *code_slot_ins;
5013 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5015 domain = mono_domain_get ();
5016 mono_domain_lock (domain);
5017 if (!domain_jit_info (domain)->method_code_hash)
5018 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5019 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5021 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5022 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5024 mono_domain_unlock (domain);
5026 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5031 if (cfg->compile_aot) {
5032 MonoDelegateClassMethodPair *del_tramp;
5034 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5035 del_tramp->klass = klass;
5036 del_tramp->method = context_used ? NULL : method;
5037 del_tramp->is_virtual = virtual;
5038 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5041 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5043 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5044 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5047 /* Set invoke_impl field */
5049 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5051 dreg = alloc_preg (cfg);
5052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5053 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5055 dreg = alloc_preg (cfg);
5056 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5057 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5060 dreg = alloc_preg (cfg);
5061 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5062 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5064 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5070 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5072 MonoJitICallInfo *info;
5074 /* Need to register the icall so it gets an icall wrapper */
5075 info = mono_get_array_new_va_icall (rank);
5077 cfg->flags |= MONO_CFG_HAS_VARARGS;
5079 /* mono_array_new_va () needs a vararg calling convention */
5080 cfg->disable_llvm = TRUE;
5082 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5083 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5087 * handle_constrained_gsharedvt_call:
5089 * Handle constrained calls where the receiver is a gsharedvt type.
5090 * Return the instruction representing the call. Set the cfg exception on failure.
5093 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5094 gboolean *ref_emit_widen)
5096 MonoInst *ins = NULL;
5097 gboolean emit_widen = *ref_emit_widen;
5100 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5101 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5102 * pack the arguments into an array, and do the rest of the work in in an icall.
5104 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5105 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5106 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5107 MonoInst *args [16];
5110 * This case handles calls to
5111 * - object:ToString()/Equals()/GetHashCode(),
5112 * - System.IComparable<T>:CompareTo()
5113 * - System.IEquatable<T>:Equals ()
5114 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5118 if (mono_method_check_context_used (cmethod))
5119 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5121 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5122 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5124 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5125 if (fsig->hasthis && fsig->param_count) {
5126 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5127 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5128 ins->dreg = alloc_preg (cfg);
5129 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5130 MONO_ADD_INS (cfg->cbb, ins);
5133 if (mini_is_gsharedvt_type (fsig->params [0])) {
5134 int addr_reg, deref_arg_reg;
5136 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5137 deref_arg_reg = alloc_preg (cfg);
5138 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5139 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5141 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5142 addr_reg = ins->dreg;
5143 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5145 EMIT_NEW_ICONST (cfg, args [3], 0);
5146 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5149 EMIT_NEW_ICONST (cfg, args [3], 0);
5150 EMIT_NEW_ICONST (cfg, args [4], 0);
5152 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5155 if (mini_is_gsharedvt_type (fsig->ret)) {
5156 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5157 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5161 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5162 MONO_ADD_INS (cfg->cbb, add);
5164 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5165 MONO_ADD_INS (cfg->cbb, ins);
5166 /* ins represents the call result */
5169 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5172 *ref_emit_widen = emit_widen;
5181 mono_emit_load_got_addr (MonoCompile *cfg)
5183 MonoInst *getaddr, *dummy_use;
5185 if (!cfg->got_var || cfg->got_var_allocated)
5188 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5189 getaddr->cil_code = cfg->header->code;
5190 getaddr->dreg = cfg->got_var->dreg;
5192 /* Add it to the start of the first bblock */
5193 if (cfg->bb_entry->code) {
5194 getaddr->next = cfg->bb_entry->code;
5195 cfg->bb_entry->code = getaddr;
5198 MONO_ADD_INS (cfg->bb_entry, getaddr);
5200 cfg->got_var_allocated = TRUE;
5203 * Add a dummy use to keep the got_var alive, since real uses might
5204 * only be generated by the back ends.
5205 * Add it to end_bblock, so the variable's lifetime covers the whole
5207 * It would be better to make the usage of the got var explicit in all
5208 * cases when the backend needs it (i.e. calls, throw etc.), so this
5209 * wouldn't be needed.
5211 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5212 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5215 static int inline_limit;
5216 static gboolean inline_limit_inited;
5219 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5221 MonoMethodHeaderSummary header;
5223 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5224 MonoMethodSignature *sig = mono_method_signature (method);
5228 if (cfg->disable_inline)
5233 if (cfg->inline_depth > 10)
5236 if (!mono_method_get_header_summary (method, &header))
5239 /*runtime, icall and pinvoke are checked by summary call*/
5240 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5241 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5242 (mono_class_is_marshalbyref (method->klass)) ||
5246 /* also consider num_locals? */
5247 /* Do the size check early to avoid creating vtables */
5248 if (!inline_limit_inited) {
5249 if (g_getenv ("MONO_INLINELIMIT"))
5250 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5252 inline_limit = INLINE_LENGTH_LIMIT;
5253 inline_limit_inited = TRUE;
5255 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5259 * if we can initialize the class of the method right away, we do,
5260 * otherwise we don't allow inlining if the class needs initialization,
5261 * since it would mean inserting a call to mono_runtime_class_init()
5262 * inside the inlined code
5264 if (!(cfg->opt & MONO_OPT_SHARED)) {
5265 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5266 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5267 vtable = mono_class_vtable (cfg->domain, method->klass);
5270 if (!cfg->compile_aot)
5271 mono_runtime_class_init (vtable);
5272 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5273 if (cfg->run_cctors && method->klass->has_cctor) {
5274 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5275 if (!method->klass->runtime_info)
5276 /* No vtable created yet */
5278 vtable = mono_class_vtable (cfg->domain, method->klass);
5281 /* This makes so that inline cannot trigger */
5282 /* .cctors: too many apps depend on them */
5283 /* running with a specific order... */
5284 if (! vtable->initialized)
5286 mono_runtime_class_init (vtable);
5288 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5289 if (!method->klass->runtime_info)
5290 /* No vtable created yet */
5292 vtable = mono_class_vtable (cfg->domain, method->klass);
5295 if (!vtable->initialized)
5300 * If we're compiling for shared code
5301 * the cctor will need to be run at aot method load time, for example,
5302 * or at the end of the compilation of the inlining method.
5304 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5308 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5309 if (mono_arch_is_soft_float ()) {
5311 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5313 for (i = 0; i < sig->param_count; ++i)
5314 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5319 if (g_list_find (cfg->dont_inline, method))
5326 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5328 if (!cfg->compile_aot) {
5330 if (vtable->initialized)
5334 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5335 if (cfg->method == method)
5339 if (!mono_class_needs_cctor_run (klass, method))
5342 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5343 /* The initialization is already done before the method is called */
5350 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5354 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5357 if (mini_is_gsharedvt_variable_klass (klass)) {
5360 mono_class_init (klass);
5361 size = mono_class_array_element_size (klass);
5364 mult_reg = alloc_preg (cfg);
5365 array_reg = arr->dreg;
5366 index_reg = index->dreg;
5368 #if SIZEOF_REGISTER == 8
5369 /* The array reg is 64 bits but the index reg is only 32 */
5370 if (COMPILE_LLVM (cfg)) {
5372 index2_reg = index_reg;
5374 index2_reg = alloc_preg (cfg);
5375 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5378 if (index->type == STACK_I8) {
5379 index2_reg = alloc_preg (cfg);
5380 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5382 index2_reg = index_reg;
5387 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5389 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5390 if (size == 1 || size == 2 || size == 4 || size == 8) {
5391 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5393 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5394 ins->klass = mono_class_get_element_class (klass);
5395 ins->type = STACK_MP;
5401 add_reg = alloc_ireg_mp (cfg);
5404 MonoInst *rgctx_ins;
5407 g_assert (cfg->gshared);
5408 context_used = mini_class_check_context_used (cfg, klass);
5409 g_assert (context_used);
5410 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5411 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5415 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5416 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5417 ins->klass = mono_class_get_element_class (klass);
5418 ins->type = STACK_MP;
5419 MONO_ADD_INS (cfg->cbb, ins);
5425 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5427 int bounds_reg = alloc_preg (cfg);
5428 int add_reg = alloc_ireg_mp (cfg);
5429 int mult_reg = alloc_preg (cfg);
5430 int mult2_reg = alloc_preg (cfg);
5431 int low1_reg = alloc_preg (cfg);
5432 int low2_reg = alloc_preg (cfg);
5433 int high1_reg = alloc_preg (cfg);
5434 int high2_reg = alloc_preg (cfg);
5435 int realidx1_reg = alloc_preg (cfg);
5436 int realidx2_reg = alloc_preg (cfg);
5437 int sum_reg = alloc_preg (cfg);
5438 int index1, index2, tmpreg;
5442 mono_class_init (klass);
5443 size = mono_class_array_element_size (klass);
5445 index1 = index_ins1->dreg;
5446 index2 = index_ins2->dreg;
5448 #if SIZEOF_REGISTER == 8
5449 /* The array reg is 64 bits but the index reg is only 32 */
5450 if (COMPILE_LLVM (cfg)) {
5453 tmpreg = alloc_preg (cfg);
5454 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5456 tmpreg = alloc_preg (cfg);
5457 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5461 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5465 /* range checking */
5466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5467 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5469 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5470 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5471 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5472 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5473 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5474 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5475 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5478 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5479 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5480 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5481 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5482 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5483 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5485 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5486 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5488 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5489 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5491 ins->type = STACK_MP;
5493 MONO_ADD_INS (cfg->cbb, ins);
5499 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5503 MonoMethod *addr_method;
5505 MonoClass *eclass = cmethod->klass->element_class;
5507 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5510 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5512 /* emit_ldelema_2 depends on OP_LMUL */
5513 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5514 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5517 if (mini_is_gsharedvt_variable_klass (eclass))
5520 element_size = mono_class_array_element_size (eclass);
5521 addr_method = mono_marshal_get_array_address (rank, element_size);
5522 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5527 static MonoBreakPolicy
5528 always_insert_breakpoint (MonoMethod *method)
5530 return MONO_BREAK_POLICY_ALWAYS;
5533 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5536 * mono_set_break_policy:
5537 * policy_callback: the new callback function
5539 * Allow embedders to decide wherther to actually obey breakpoint instructions
5540 * (both break IL instructions and Debugger.Break () method calls), for example
5541 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5542 * untrusted or semi-trusted code.
5544 * @policy_callback will be called every time a break point instruction needs to
5545 * be inserted with the method argument being the method that calls Debugger.Break()
5546 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5547 * if it wants the breakpoint to not be effective in the given method.
5548 * #MONO_BREAK_POLICY_ALWAYS is the default.
5551 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5553 if (policy_callback)
5554 break_policy_func = policy_callback;
5556 break_policy_func = always_insert_breakpoint;
5560 should_insert_brekpoint (MonoMethod *method) {
5561 switch (break_policy_func (method)) {
5562 case MONO_BREAK_POLICY_ALWAYS:
5564 case MONO_BREAK_POLICY_NEVER:
5566 case MONO_BREAK_POLICY_ON_DBG:
5567 g_warning ("mdb no longer supported");
5570 g_warning ("Incorrect value returned from break policy callback");
5575 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5577 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5579 MonoInst *addr, *store, *load;
5580 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5582 /* the bounds check is already done by the callers */
5583 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5585 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5586 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5587 if (mini_type_is_reference (fsig->params [2]))
5588 emit_write_barrier (cfg, addr, load);
5590 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5591 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5598 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5600 return mini_type_is_reference (&klass->byval_arg);
5604 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5606 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5607 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5608 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5609 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5610 MonoInst *iargs [3];
5613 mono_class_setup_vtable (obj_array);
5614 g_assert (helper->slot);
5616 if (sp [0]->type != STACK_OBJ)
5618 if (sp [2]->type != STACK_OBJ)
5625 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5629 if (mini_is_gsharedvt_variable_klass (klass)) {
5632 // FIXME-VT: OP_ICONST optimization
5633 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5634 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5635 ins->opcode = OP_STOREV_MEMBASE;
5636 } else if (sp [1]->opcode == OP_ICONST) {
5637 int array_reg = sp [0]->dreg;
5638 int index_reg = sp [1]->dreg;
5639 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5642 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5643 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5645 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5646 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5647 if (generic_class_is_reference_type (cfg, klass))
5648 emit_write_barrier (cfg, addr, sp [2]);
5655 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5660 eklass = mono_class_from_mono_type (fsig->params [2]);
5662 eklass = mono_class_from_mono_type (fsig->ret);
5665 return emit_array_store (cfg, eklass, args, FALSE);
5667 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5668 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5674 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5677 int param_size, return_size;
5679 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5680 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5682 if (cfg->verbose_level > 3)
5683 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5685 //Don't allow mixing reference types with value types
5686 if (param_klass->valuetype != return_klass->valuetype) {
5687 if (cfg->verbose_level > 3)
5688 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5692 if (!param_klass->valuetype) {
5693 if (cfg->verbose_level > 3)
5694 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5699 if (param_klass->has_references || return_klass->has_references)
5702 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5703 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5704 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5705 if (cfg->verbose_level > 3)
5706 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5710 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5711 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5712 if (cfg->verbose_level > 3)
5713 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5717 param_size = mono_class_value_size (param_klass, &align);
5718 return_size = mono_class_value_size (return_klass, &align);
5720 //We can do it if sizes match
5721 if (param_size == return_size) {
5722 if (cfg->verbose_level > 3)
5723 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5727 //No simple way to handle struct if sizes don't match
5728 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5729 if (cfg->verbose_level > 3)
5730 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5735 * Same reg size category.
5736 * A quick note on why we don't require widening here.
5737 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5739 * Since the source value comes from a function argument, the JIT will already have
5740 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5742 if (param_size <= 4 && return_size <= 4) {
5743 if (cfg->verbose_level > 3)
5744 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5752 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5754 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5755 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5757 //Valuetypes that are semantically equivalent or numbers than can be widened to
5758 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5761 //Arrays of valuetypes that are semantically equivalent
5762 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5769 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5771 #ifdef MONO_ARCH_SIMD_INTRINSICS
5772 MonoInst *ins = NULL;
5774 if (cfg->opt & MONO_OPT_SIMD) {
5775 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5781 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5785 emit_memory_barrier (MonoCompile *cfg, int kind)
5787 MonoInst *ins = NULL;
5788 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5789 MONO_ADD_INS (cfg->cbb, ins);
5790 ins->backend.memory_barrier_kind = kind;
5796 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5798 MonoInst *ins = NULL;
5801 /* The LLVM backend supports these intrinsics */
5802 if (cmethod->klass == mono_defaults.math_class) {
5803 if (strcmp (cmethod->name, "Sin") == 0) {
5805 } else if (strcmp (cmethod->name, "Cos") == 0) {
5807 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5809 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5813 if (opcode && fsig->param_count == 1) {
5814 MONO_INST_NEW (cfg, ins, opcode);
5815 ins->type = STACK_R8;
5816 ins->dreg = mono_alloc_freg (cfg);
5817 ins->sreg1 = args [0]->dreg;
5818 MONO_ADD_INS (cfg->cbb, ins);
5822 if (cfg->opt & MONO_OPT_CMOV) {
5823 if (strcmp (cmethod->name, "Min") == 0) {
5824 if (fsig->params [0]->type == MONO_TYPE_I4)
5826 if (fsig->params [0]->type == MONO_TYPE_U4)
5827 opcode = OP_IMIN_UN;
5828 else if (fsig->params [0]->type == MONO_TYPE_I8)
5830 else if (fsig->params [0]->type == MONO_TYPE_U8)
5831 opcode = OP_LMIN_UN;
5832 } else if (strcmp (cmethod->name, "Max") == 0) {
5833 if (fsig->params [0]->type == MONO_TYPE_I4)
5835 if (fsig->params [0]->type == MONO_TYPE_U4)
5836 opcode = OP_IMAX_UN;
5837 else if (fsig->params [0]->type == MONO_TYPE_I8)
5839 else if (fsig->params [0]->type == MONO_TYPE_U8)
5840 opcode = OP_LMAX_UN;
5844 if (opcode && fsig->param_count == 2) {
5845 MONO_INST_NEW (cfg, ins, opcode);
5846 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5847 ins->dreg = mono_alloc_ireg (cfg);
5848 ins->sreg1 = args [0]->dreg;
5849 ins->sreg2 = args [1]->dreg;
5850 MONO_ADD_INS (cfg->cbb, ins);
5858 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5860 if (cmethod->klass == mono_defaults.array_class) {
5861 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5862 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5863 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5864 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5865 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5866 return emit_array_unsafe_mov (cfg, fsig, args);
5873 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5875 MonoInst *ins = NULL;
5877 static MonoClass *runtime_helpers_class = NULL;
5878 if (! runtime_helpers_class)
5879 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5880 "System.Runtime.CompilerServices", "RuntimeHelpers");
5882 if (cmethod->klass == mono_defaults.string_class) {
5883 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5884 int dreg = alloc_ireg (cfg);
5885 int index_reg = alloc_preg (cfg);
5886 int add_reg = alloc_preg (cfg);
5888 #if SIZEOF_REGISTER == 8
5889 /* The array reg is 64 bits but the index reg is only 32 */
5890 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5892 index_reg = args [1]->dreg;
5894 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5896 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5897 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5898 add_reg = ins->dreg;
5899 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5902 int mult_reg = alloc_preg (cfg);
5903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5904 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5906 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5908 type_from_op (cfg, ins, NULL, NULL);
5910 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5911 int dreg = alloc_ireg (cfg);
5912 /* Decompose later to allow more optimizations */
5913 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5914 ins->type = STACK_I4;
5915 ins->flags |= MONO_INST_FAULT;
5916 cfg->cbb->has_array_access = TRUE;
5917 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5922 } else if (cmethod->klass == mono_defaults.object_class) {
5924 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5925 int dreg = alloc_ireg_ref (cfg);
5926 int vt_reg = alloc_preg (cfg);
5927 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5928 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5929 type_from_op (cfg, ins, NULL, NULL);
5932 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5933 int dreg = alloc_ireg (cfg);
5934 int t1 = alloc_ireg (cfg);
5936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5937 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5938 ins->type = STACK_I4;
5941 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5942 MONO_INST_NEW (cfg, ins, OP_NOP);
5943 MONO_ADD_INS (cfg->cbb, ins);
5947 } else if (cmethod->klass == mono_defaults.array_class) {
5948 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5949 return emit_array_generic_access (cfg, fsig, args, FALSE);
5950 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5951 return emit_array_generic_access (cfg, fsig, args, TRUE);
5953 #ifndef MONO_BIG_ARRAYS
5955 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5958 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5959 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5960 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5961 int dreg = alloc_ireg (cfg);
5962 int bounds_reg = alloc_ireg_mp (cfg);
5963 MonoBasicBlock *end_bb, *szarray_bb;
5964 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5966 NEW_BBLOCK (cfg, end_bb);
5967 NEW_BBLOCK (cfg, szarray_bb);
5969 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5970 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5971 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5972 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5973 /* Non-szarray case */
5975 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5976 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5978 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5979 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5980 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5981 MONO_START_BB (cfg, szarray_bb);
5984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5985 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5987 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5988 MONO_START_BB (cfg, end_bb);
5990 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5991 ins->type = STACK_I4;
5997 if (cmethod->name [0] != 'g')
6000 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6001 int dreg = alloc_ireg (cfg);
6002 int vtable_reg = alloc_preg (cfg);
6003 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6004 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6005 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6006 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6007 type_from_op (cfg, ins, NULL, NULL);
6010 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6011 int dreg = alloc_ireg (cfg);
6013 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6014 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6015 type_from_op (cfg, ins, NULL, NULL);
6020 } else if (cmethod->klass == runtime_helpers_class) {
6022 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6023 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6027 } else if (cmethod->klass == mono_defaults.thread_class) {
6028 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6029 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6030 MONO_ADD_INS (cfg->cbb, ins);
6032 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6033 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6034 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6036 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6038 if (fsig->params [0]->type == MONO_TYPE_I1)
6039 opcode = OP_LOADI1_MEMBASE;
6040 else if (fsig->params [0]->type == MONO_TYPE_U1)
6041 opcode = OP_LOADU1_MEMBASE;
6042 else if (fsig->params [0]->type == MONO_TYPE_I2)
6043 opcode = OP_LOADI2_MEMBASE;
6044 else if (fsig->params [0]->type == MONO_TYPE_U2)
6045 opcode = OP_LOADU2_MEMBASE;
6046 else if (fsig->params [0]->type == MONO_TYPE_I4)
6047 opcode = OP_LOADI4_MEMBASE;
6048 else if (fsig->params [0]->type == MONO_TYPE_U4)
6049 opcode = OP_LOADU4_MEMBASE;
6050 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6051 opcode = OP_LOADI8_MEMBASE;
6052 else if (fsig->params [0]->type == MONO_TYPE_R4)
6053 opcode = OP_LOADR4_MEMBASE;
6054 else if (fsig->params [0]->type == MONO_TYPE_R8)
6055 opcode = OP_LOADR8_MEMBASE;
6056 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6057 opcode = OP_LOAD_MEMBASE;
6060 MONO_INST_NEW (cfg, ins, opcode);
6061 ins->inst_basereg = args [0]->dreg;
6062 ins->inst_offset = 0;
6063 MONO_ADD_INS (cfg->cbb, ins);
6065 switch (fsig->params [0]->type) {
6072 ins->dreg = mono_alloc_ireg (cfg);
6073 ins->type = STACK_I4;
6077 ins->dreg = mono_alloc_lreg (cfg);
6078 ins->type = STACK_I8;
6082 ins->dreg = mono_alloc_ireg (cfg);
6083 #if SIZEOF_REGISTER == 8
6084 ins->type = STACK_I8;
6086 ins->type = STACK_I4;
6091 ins->dreg = mono_alloc_freg (cfg);
6092 ins->type = STACK_R8;
6095 g_assert (mini_type_is_reference (fsig->params [0]));
6096 ins->dreg = mono_alloc_ireg_ref (cfg);
6097 ins->type = STACK_OBJ;
6101 if (opcode == OP_LOADI8_MEMBASE)
6102 ins = mono_decompose_opcode (cfg, ins);
6104 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6108 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6110 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6112 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6113 opcode = OP_STOREI1_MEMBASE_REG;
6114 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6115 opcode = OP_STOREI2_MEMBASE_REG;
6116 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6117 opcode = OP_STOREI4_MEMBASE_REG;
6118 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6119 opcode = OP_STOREI8_MEMBASE_REG;
6120 else if (fsig->params [0]->type == MONO_TYPE_R4)
6121 opcode = OP_STORER4_MEMBASE_REG;
6122 else if (fsig->params [0]->type == MONO_TYPE_R8)
6123 opcode = OP_STORER8_MEMBASE_REG;
6124 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6125 opcode = OP_STORE_MEMBASE_REG;
6128 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6130 MONO_INST_NEW (cfg, ins, opcode);
6131 ins->sreg1 = args [1]->dreg;
6132 ins->inst_destbasereg = args [0]->dreg;
6133 ins->inst_offset = 0;
6134 MONO_ADD_INS (cfg->cbb, ins);
6136 if (opcode == OP_STOREI8_MEMBASE_REG)
6137 ins = mono_decompose_opcode (cfg, ins);
6142 } else if (cmethod->klass->image == mono_defaults.corlib &&
6143 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6144 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6147 #if SIZEOF_REGISTER == 8
6148 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6149 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6150 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6151 ins->dreg = mono_alloc_preg (cfg);
6152 ins->sreg1 = args [0]->dreg;
6153 ins->type = STACK_I8;
6154 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6155 MONO_ADD_INS (cfg->cbb, ins);
6159 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6161 /* 64 bit reads are already atomic */
6162 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6163 load_ins->dreg = mono_alloc_preg (cfg);
6164 load_ins->inst_basereg = args [0]->dreg;
6165 load_ins->inst_offset = 0;
6166 load_ins->type = STACK_I8;
6167 MONO_ADD_INS (cfg->cbb, load_ins);
6169 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6176 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6177 MonoInst *ins_iconst;
6180 if (fsig->params [0]->type == MONO_TYPE_I4) {
6181 opcode = OP_ATOMIC_ADD_I4;
6182 cfg->has_atomic_add_i4 = TRUE;
6184 #if SIZEOF_REGISTER == 8
6185 else if (fsig->params [0]->type == MONO_TYPE_I8)
6186 opcode = OP_ATOMIC_ADD_I8;
6189 if (!mono_arch_opcode_supported (opcode))
6191 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6192 ins_iconst->inst_c0 = 1;
6193 ins_iconst->dreg = mono_alloc_ireg (cfg);
6194 MONO_ADD_INS (cfg->cbb, ins_iconst);
6196 MONO_INST_NEW (cfg, ins, opcode);
6197 ins->dreg = mono_alloc_ireg (cfg);
6198 ins->inst_basereg = args [0]->dreg;
6199 ins->inst_offset = 0;
6200 ins->sreg2 = ins_iconst->dreg;
6201 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6202 MONO_ADD_INS (cfg->cbb, ins);
6204 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6205 MonoInst *ins_iconst;
6208 if (fsig->params [0]->type == MONO_TYPE_I4) {
6209 opcode = OP_ATOMIC_ADD_I4;
6210 cfg->has_atomic_add_i4 = TRUE;
6212 #if SIZEOF_REGISTER == 8
6213 else if (fsig->params [0]->type == MONO_TYPE_I8)
6214 opcode = OP_ATOMIC_ADD_I8;
6217 if (!mono_arch_opcode_supported (opcode))
6219 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6220 ins_iconst->inst_c0 = -1;
6221 ins_iconst->dreg = mono_alloc_ireg (cfg);
6222 MONO_ADD_INS (cfg->cbb, ins_iconst);
6224 MONO_INST_NEW (cfg, ins, opcode);
6225 ins->dreg = mono_alloc_ireg (cfg);
6226 ins->inst_basereg = args [0]->dreg;
6227 ins->inst_offset = 0;
6228 ins->sreg2 = ins_iconst->dreg;
6229 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6230 MONO_ADD_INS (cfg->cbb, ins);
6232 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6235 if (fsig->params [0]->type == MONO_TYPE_I4) {
6236 opcode = OP_ATOMIC_ADD_I4;
6237 cfg->has_atomic_add_i4 = TRUE;
6239 #if SIZEOF_REGISTER == 8
6240 else if (fsig->params [0]->type == MONO_TYPE_I8)
6241 opcode = OP_ATOMIC_ADD_I8;
6244 if (!mono_arch_opcode_supported (opcode))
6246 MONO_INST_NEW (cfg, ins, opcode);
6247 ins->dreg = mono_alloc_ireg (cfg);
6248 ins->inst_basereg = args [0]->dreg;
6249 ins->inst_offset = 0;
6250 ins->sreg2 = args [1]->dreg;
6251 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6252 MONO_ADD_INS (cfg->cbb, ins);
6255 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6256 MonoInst *f2i = NULL, *i2f;
6257 guint32 opcode, f2i_opcode, i2f_opcode;
6258 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6259 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6261 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6262 fsig->params [0]->type == MONO_TYPE_R4) {
6263 opcode = OP_ATOMIC_EXCHANGE_I4;
6264 f2i_opcode = OP_MOVE_F_TO_I4;
6265 i2f_opcode = OP_MOVE_I4_TO_F;
6266 cfg->has_atomic_exchange_i4 = TRUE;
6268 #if SIZEOF_REGISTER == 8
6270 fsig->params [0]->type == MONO_TYPE_I8 ||
6271 fsig->params [0]->type == MONO_TYPE_R8 ||
6272 fsig->params [0]->type == MONO_TYPE_I) {
6273 opcode = OP_ATOMIC_EXCHANGE_I8;
6274 f2i_opcode = OP_MOVE_F_TO_I8;
6275 i2f_opcode = OP_MOVE_I8_TO_F;
6278 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6279 opcode = OP_ATOMIC_EXCHANGE_I4;
6280 cfg->has_atomic_exchange_i4 = TRUE;
6286 if (!mono_arch_opcode_supported (opcode))
6290 /* TODO: Decompose these opcodes instead of bailing here. */
6291 if (COMPILE_SOFT_FLOAT (cfg))
6294 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6295 f2i->dreg = mono_alloc_ireg (cfg);
6296 f2i->sreg1 = args [1]->dreg;
6297 if (f2i_opcode == OP_MOVE_F_TO_I4)
6298 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6299 MONO_ADD_INS (cfg->cbb, f2i);
6302 MONO_INST_NEW (cfg, ins, opcode);
6303 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6304 ins->inst_basereg = args [0]->dreg;
6305 ins->inst_offset = 0;
6306 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6307 MONO_ADD_INS (cfg->cbb, ins);
6309 switch (fsig->params [0]->type) {
6311 ins->type = STACK_I4;
6314 ins->type = STACK_I8;
6317 #if SIZEOF_REGISTER == 8
6318 ins->type = STACK_I8;
6320 ins->type = STACK_I4;
6325 ins->type = STACK_R8;
6328 g_assert (mini_type_is_reference (fsig->params [0]));
6329 ins->type = STACK_OBJ;
6334 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6335 i2f->dreg = mono_alloc_freg (cfg);
6336 i2f->sreg1 = ins->dreg;
6337 i2f->type = STACK_R8;
6338 if (i2f_opcode == OP_MOVE_I4_TO_F)
6339 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6340 MONO_ADD_INS (cfg->cbb, i2f);
6345 if (cfg->gen_write_barriers && is_ref)
6346 emit_write_barrier (cfg, args [0], args [1]);
6348 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6349 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6350 guint32 opcode, f2i_opcode, i2f_opcode;
6351 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6352 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6354 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6355 fsig->params [1]->type == MONO_TYPE_R4) {
6356 opcode = OP_ATOMIC_CAS_I4;
6357 f2i_opcode = OP_MOVE_F_TO_I4;
6358 i2f_opcode = OP_MOVE_I4_TO_F;
6359 cfg->has_atomic_cas_i4 = TRUE;
6361 #if SIZEOF_REGISTER == 8
6363 fsig->params [1]->type == MONO_TYPE_I8 ||
6364 fsig->params [1]->type == MONO_TYPE_R8 ||
6365 fsig->params [1]->type == MONO_TYPE_I) {
6366 opcode = OP_ATOMIC_CAS_I8;
6367 f2i_opcode = OP_MOVE_F_TO_I8;
6368 i2f_opcode = OP_MOVE_I8_TO_F;
6371 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6372 opcode = OP_ATOMIC_CAS_I4;
6373 cfg->has_atomic_cas_i4 = TRUE;
6379 if (!mono_arch_opcode_supported (opcode))
6383 /* TODO: Decompose these opcodes instead of bailing here. */
6384 if (COMPILE_SOFT_FLOAT (cfg))
6387 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6388 f2i_new->dreg = mono_alloc_ireg (cfg);
6389 f2i_new->sreg1 = args [1]->dreg;
6390 if (f2i_opcode == OP_MOVE_F_TO_I4)
6391 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6392 MONO_ADD_INS (cfg->cbb, f2i_new);
6394 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6395 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6396 f2i_cmp->sreg1 = args [2]->dreg;
6397 if (f2i_opcode == OP_MOVE_F_TO_I4)
6398 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6399 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6402 MONO_INST_NEW (cfg, ins, opcode);
6403 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6404 ins->sreg1 = args [0]->dreg;
6405 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6406 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6407 MONO_ADD_INS (cfg->cbb, ins);
6409 switch (fsig->params [1]->type) {
6411 ins->type = STACK_I4;
6414 ins->type = STACK_I8;
6417 #if SIZEOF_REGISTER == 8
6418 ins->type = STACK_I8;
6420 ins->type = STACK_I4;
6424 ins->type = cfg->r4_stack_type;
6427 ins->type = STACK_R8;
6430 g_assert (mini_type_is_reference (fsig->params [1]));
6431 ins->type = STACK_OBJ;
6436 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6437 i2f->dreg = mono_alloc_freg (cfg);
6438 i2f->sreg1 = ins->dreg;
6439 i2f->type = STACK_R8;
6440 if (i2f_opcode == OP_MOVE_I4_TO_F)
6441 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6442 MONO_ADD_INS (cfg->cbb, i2f);
6447 if (cfg->gen_write_barriers && is_ref)
6448 emit_write_barrier (cfg, args [0], args [1]);
6450 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6451 fsig->params [1]->type == MONO_TYPE_I4) {
6452 MonoInst *cmp, *ceq;
6454 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6457 /* int32 r = CAS (location, value, comparand); */
6458 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6459 ins->dreg = alloc_ireg (cfg);
6460 ins->sreg1 = args [0]->dreg;
6461 ins->sreg2 = args [1]->dreg;
6462 ins->sreg3 = args [2]->dreg;
6463 ins->type = STACK_I4;
6464 MONO_ADD_INS (cfg->cbb, ins);
6466 /* bool result = r == comparand; */
6467 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6468 cmp->sreg1 = ins->dreg;
6469 cmp->sreg2 = args [2]->dreg;
6470 cmp->type = STACK_I4;
6471 MONO_ADD_INS (cfg->cbb, cmp);
6473 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6474 ceq->dreg = alloc_ireg (cfg);
6475 ceq->type = STACK_I4;
6476 MONO_ADD_INS (cfg->cbb, ceq);
6478 /* *success = result; */
6479 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6481 cfg->has_atomic_cas_i4 = TRUE;
6483 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6484 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6488 } else if (cmethod->klass->image == mono_defaults.corlib &&
6489 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6490 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6493 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6495 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6496 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6498 if (fsig->params [0]->type == MONO_TYPE_I1)
6499 opcode = OP_ATOMIC_LOAD_I1;
6500 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6501 opcode = OP_ATOMIC_LOAD_U1;
6502 else if (fsig->params [0]->type == MONO_TYPE_I2)
6503 opcode = OP_ATOMIC_LOAD_I2;
6504 else if (fsig->params [0]->type == MONO_TYPE_U2)
6505 opcode = OP_ATOMIC_LOAD_U2;
6506 else if (fsig->params [0]->type == MONO_TYPE_I4)
6507 opcode = OP_ATOMIC_LOAD_I4;
6508 else if (fsig->params [0]->type == MONO_TYPE_U4)
6509 opcode = OP_ATOMIC_LOAD_U4;
6510 else if (fsig->params [0]->type == MONO_TYPE_R4)
6511 opcode = OP_ATOMIC_LOAD_R4;
6512 else if (fsig->params [0]->type == MONO_TYPE_R8)
6513 opcode = OP_ATOMIC_LOAD_R8;
6514 #if SIZEOF_REGISTER == 8
6515 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6516 opcode = OP_ATOMIC_LOAD_I8;
6517 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6518 opcode = OP_ATOMIC_LOAD_U8;
6520 else if (fsig->params [0]->type == MONO_TYPE_I)
6521 opcode = OP_ATOMIC_LOAD_I4;
6522 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6523 opcode = OP_ATOMIC_LOAD_U4;
6527 if (!mono_arch_opcode_supported (opcode))
6530 MONO_INST_NEW (cfg, ins, opcode);
6531 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6532 ins->sreg1 = args [0]->dreg;
6533 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6534 MONO_ADD_INS (cfg->cbb, ins);
6536 switch (fsig->params [0]->type) {
6537 case MONO_TYPE_BOOLEAN:
6544 ins->type = STACK_I4;
6548 ins->type = STACK_I8;
6552 #if SIZEOF_REGISTER == 8
6553 ins->type = STACK_I8;
6555 ins->type = STACK_I4;
6559 ins->type = cfg->r4_stack_type;
6562 ins->type = STACK_R8;
6565 g_assert (mini_type_is_reference (fsig->params [0]));
6566 ins->type = STACK_OBJ;
6572 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6574 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6576 if (fsig->params [0]->type == MONO_TYPE_I1)
6577 opcode = OP_ATOMIC_STORE_I1;
6578 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6579 opcode = OP_ATOMIC_STORE_U1;
6580 else if (fsig->params [0]->type == MONO_TYPE_I2)
6581 opcode = OP_ATOMIC_STORE_I2;
6582 else if (fsig->params [0]->type == MONO_TYPE_U2)
6583 opcode = OP_ATOMIC_STORE_U2;
6584 else if (fsig->params [0]->type == MONO_TYPE_I4)
6585 opcode = OP_ATOMIC_STORE_I4;
6586 else if (fsig->params [0]->type == MONO_TYPE_U4)
6587 opcode = OP_ATOMIC_STORE_U4;
6588 else if (fsig->params [0]->type == MONO_TYPE_R4)
6589 opcode = OP_ATOMIC_STORE_R4;
6590 else if (fsig->params [0]->type == MONO_TYPE_R8)
6591 opcode = OP_ATOMIC_STORE_R8;
6592 #if SIZEOF_REGISTER == 8
6593 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6594 opcode = OP_ATOMIC_STORE_I8;
6595 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6596 opcode = OP_ATOMIC_STORE_U8;
6598 else if (fsig->params [0]->type == MONO_TYPE_I)
6599 opcode = OP_ATOMIC_STORE_I4;
6600 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6601 opcode = OP_ATOMIC_STORE_U4;
6605 if (!mono_arch_opcode_supported (opcode))
6608 MONO_INST_NEW (cfg, ins, opcode);
6609 ins->dreg = args [0]->dreg;
6610 ins->sreg1 = args [1]->dreg;
6611 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6612 MONO_ADD_INS (cfg->cbb, ins);
6614 if (cfg->gen_write_barriers && is_ref)
6615 emit_write_barrier (cfg, args [0], args [1]);
6621 } else if (cmethod->klass->image == mono_defaults.corlib &&
6622 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6623 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6624 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6625 if (should_insert_brekpoint (cfg->method)) {
6626 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6628 MONO_INST_NEW (cfg, ins, OP_NOP);
6629 MONO_ADD_INS (cfg->cbb, ins);
6633 } else if (cmethod->klass->image == mono_defaults.corlib &&
6634 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6635 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6636 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6638 EMIT_NEW_ICONST (cfg, ins, 1);
6640 EMIT_NEW_ICONST (cfg, ins, 0);
6643 } else if (cmethod->klass->image == mono_defaults.corlib &&
6644 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6645 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6646 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6647 /* No stack walks are current available, so implement this as an intrinsic */
6648 MonoInst *assembly_ins;
6650 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6651 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6654 } else if (cmethod->klass == mono_defaults.math_class) {
6656 * There is general branchless code for Min/Max, but it does not work for
6658 * http://everything2.com/?node_id=1051618
6660 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6661 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6662 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6663 !strcmp (cmethod->klass->name, "Selector")) ||
6664 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6665 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6666 !strcmp (cmethod->klass->name, "Selector"))
6668 if (cfg->backend->have_objc_get_selector &&
6669 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6670 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6673 MonoJumpInfoToken *ji;
6676 cfg->disable_llvm = TRUE;
6678 if (args [0]->opcode == OP_GOT_ENTRY) {
6679 pi = args [0]->inst_p1;
6680 g_assert (pi->opcode == OP_PATCH_INFO);
6681 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6684 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6685 ji = args [0]->inst_p0;
6688 NULLIFY_INS (args [0]);
6691 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6692 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6693 ins->dreg = mono_alloc_ireg (cfg);
6695 ins->inst_p0 = mono_string_to_utf8 (s);
6696 MONO_ADD_INS (cfg->cbb, ins);
6701 #ifdef MONO_ARCH_SIMD_INTRINSICS
6702 if (cfg->opt & MONO_OPT_SIMD) {
6703 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6709 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6713 if (COMPILE_LLVM (cfg)) {
6714 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6719 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6723 * This entry point could be used later for arbitrary method
6726 inline static MonoInst*
6727 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6728 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6730 if (method->klass == mono_defaults.string_class) {
6731 /* managed string allocation support */
6732 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6733 MonoInst *iargs [2];
6734 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6735 MonoMethod *managed_alloc = NULL;
6737 g_assert (vtable); /*Should not fail since it System.String*/
6738 #ifndef MONO_CROSS_COMPILE
6739 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6743 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6744 iargs [1] = args [0];
6745 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6752 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6754 MonoInst *store, *temp;
6757 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6758 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6761 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6762 * would be different than the MonoInst's used to represent arguments, and
6763 * the ldelema implementation can't deal with that.
6764 * Solution: When ldelema is used on an inline argument, create a var for
6765 * it, emit ldelema on that var, and emit the saving code below in
6766 * inline_method () if needed.
6768 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6769 cfg->args [i] = temp;
6770 /* This uses cfg->args [i] which is set by the preceeding line */
6771 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6772 store->cil_code = sp [0]->cil_code;
6777 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6778 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6780 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6782 check_inline_called_method_name_limit (MonoMethod *called_method)
6785 static const char *limit = NULL;
6787 if (limit == NULL) {
6788 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6790 if (limit_string != NULL)
6791 limit = limit_string;
6796 if (limit [0] != '\0') {
6797 char *called_method_name = mono_method_full_name (called_method, TRUE);
6799 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6800 g_free (called_method_name);
6802 //return (strncmp_result <= 0);
6803 return (strncmp_result == 0);
6810 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6812 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6815 static const char *limit = NULL;
6817 if (limit == NULL) {
6818 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6819 if (limit_string != NULL) {
6820 limit = limit_string;
6826 if (limit [0] != '\0') {
6827 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6829 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6830 g_free (caller_method_name);
6832 //return (strncmp_result <= 0);
6833 return (strncmp_result == 0);
6841 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6843 static double r8_0 = 0.0;
6844 static float r4_0 = 0.0;
6848 rtype = mini_get_underlying_type (rtype);
6852 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6853 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6854 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6855 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6856 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6857 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6858 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6859 ins->type = STACK_R4;
6860 ins->inst_p0 = (void*)&r4_0;
6862 MONO_ADD_INS (cfg->cbb, ins);
6863 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6864 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6865 ins->type = STACK_R8;
6866 ins->inst_p0 = (void*)&r8_0;
6868 MONO_ADD_INS (cfg->cbb, ins);
6869 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6870 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6871 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6872 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6873 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6875 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6880 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6884 rtype = mini_get_underlying_type (rtype);
6888 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6889 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6890 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6891 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6892 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6893 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6894 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6895 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6896 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6897 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6898 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6899 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6900 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6901 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6903 emit_init_rvar (cfg, dreg, rtype);
6907 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6909 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6911 MonoInst *var = cfg->locals [local];
6912 if (COMPILE_SOFT_FLOAT (cfg)) {
6914 int reg = alloc_dreg (cfg, var->type);
6915 emit_init_rvar (cfg, reg, type);
6916 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6919 emit_init_rvar (cfg, var->dreg, type);
6921 emit_dummy_init_rvar (cfg, var->dreg, type);
6928 * Return the cost of inlining CMETHOD.
6931 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6932 guchar *ip, guint real_offset, gboolean inline_always)
6934 MonoInst *ins, *rvar = NULL;
6935 MonoMethodHeader *cheader;
6936 MonoBasicBlock *ebblock, *sbblock;
6938 MonoMethod *prev_inlined_method;
6939 MonoInst **prev_locals, **prev_args;
6940 MonoType **prev_arg_types;
6941 guint prev_real_offset;
6942 GHashTable *prev_cbb_hash;
6943 MonoBasicBlock **prev_cil_offset_to_bb;
6944 MonoBasicBlock *prev_cbb;
6945 unsigned char* prev_cil_start;
6946 guint32 prev_cil_offset_to_bb_len;
6947 MonoMethod *prev_current_method;
6948 MonoGenericContext *prev_generic_context;
6949 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6951 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6953 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6954 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6957 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6958 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6963 fsig = mono_method_signature (cmethod);
6965 if (cfg->verbose_level > 2)
6966 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6968 if (!cmethod->inline_info) {
6969 cfg->stat_inlineable_methods++;
6970 cmethod->inline_info = 1;
6973 /* allocate local variables */
6974 cheader = mono_method_get_header (cmethod);
6976 if (cheader == NULL || mono_loader_get_last_error ()) {
6977 MonoLoaderError *error = mono_loader_get_last_error ();
6980 mono_metadata_free_mh (cheader);
6981 if (inline_always && error)
6982 mono_cfg_set_exception (cfg, error->exception_type);
6984 mono_loader_clear_error ();
6988 /*Must verify before creating locals as it can cause the JIT to assert.*/
6989 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6990 mono_metadata_free_mh (cheader);
6994 /* allocate space to store the return value */
6995 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6996 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6999 prev_locals = cfg->locals;
7000 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7001 for (i = 0; i < cheader->num_locals; ++i)
7002 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7004 /* allocate start and end blocks */
7005 /* This is needed so if the inline is aborted, we can clean up */
7006 NEW_BBLOCK (cfg, sbblock);
7007 sbblock->real_offset = real_offset;
7009 NEW_BBLOCK (cfg, ebblock);
7010 ebblock->block_num = cfg->num_bblocks++;
7011 ebblock->real_offset = real_offset;
7013 prev_args = cfg->args;
7014 prev_arg_types = cfg->arg_types;
7015 prev_inlined_method = cfg->inlined_method;
7016 cfg->inlined_method = cmethod;
7017 cfg->ret_var_set = FALSE;
7018 cfg->inline_depth ++;
7019 prev_real_offset = cfg->real_offset;
7020 prev_cbb_hash = cfg->cbb_hash;
7021 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7022 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7023 prev_cil_start = cfg->cil_start;
7024 prev_cbb = cfg->cbb;
7025 prev_current_method = cfg->current_method;
7026 prev_generic_context = cfg->generic_context;
7027 prev_ret_var_set = cfg->ret_var_set;
7028 prev_disable_inline = cfg->disable_inline;
7030 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7033 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
7035 ret_var_set = cfg->ret_var_set;
7037 cfg->inlined_method = prev_inlined_method;
7038 cfg->real_offset = prev_real_offset;
7039 cfg->cbb_hash = prev_cbb_hash;
7040 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7041 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7042 cfg->cil_start = prev_cil_start;
7043 cfg->locals = prev_locals;
7044 cfg->args = prev_args;
7045 cfg->arg_types = prev_arg_types;
7046 cfg->current_method = prev_current_method;
7047 cfg->generic_context = prev_generic_context;
7048 cfg->ret_var_set = prev_ret_var_set;
7049 cfg->disable_inline = prev_disable_inline;
7050 cfg->inline_depth --;
7052 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7053 if (cfg->verbose_level > 2)
7054 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7056 cfg->stat_inlined_methods++;
7058 /* always add some code to avoid block split failures */
7059 MONO_INST_NEW (cfg, ins, OP_NOP);
7060 MONO_ADD_INS (prev_cbb, ins);
7062 prev_cbb->next_bb = sbblock;
7063 link_bblock (cfg, prev_cbb, sbblock);
7066 * Get rid of the begin and end bblocks if possible to aid local
7069 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7071 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7072 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7074 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7075 MonoBasicBlock *prev = ebblock->in_bb [0];
7076 mono_merge_basic_blocks (cfg, prev, ebblock);
7078 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7079 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7080 cfg->cbb = prev_cbb;
7084 * Its possible that the rvar is set in some prev bblock, but not in others.
7090 for (i = 0; i < ebblock->in_count; ++i) {
7091 bb = ebblock->in_bb [i];
7093 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7096 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7106 * If the inlined method contains only a throw, then the ret var is not
7107 * set, so set it to a dummy value.
7110 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7112 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7115 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7118 if (cfg->verbose_level > 2)
7119 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7120 cfg->exception_type = MONO_EXCEPTION_NONE;
7121 mono_loader_clear_error ();
7123 /* This gets rid of the newly added bblocks */
7124 cfg->cbb = prev_cbb;
7126 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7131 * Some of these comments may well be out-of-date.
7132 * Design decisions: we do a single pass over the IL code (and we do bblock
7133 * splitting/merging in the few cases when it's required: a back jump to an IL
7134 * address that was not already seen as bblock starting point).
7135 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7136 * Complex operations are decomposed in simpler ones right away. We need to let the
7137 * arch-specific code peek and poke inside this process somehow (except when the
7138 * optimizations can take advantage of the full semantic info of coarse opcodes).
7139 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7140 * MonoInst->opcode initially is the IL opcode or some simplification of that
7141 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7142 * opcode with value bigger than OP_LAST.
7143 * At this point the IR can be handed over to an interpreter, a dumb code generator
7144 * or to the optimizing code generator that will translate it to SSA form.
7146 * Profiling directed optimizations.
7147 * We may compile by default with few or no optimizations and instrument the code
7148 * or the user may indicate what methods to optimize the most either in a config file
7149 * or through repeated runs where the compiler applies offline the optimizations to
7150 * each method and then decides if it was worth it.
7153 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7154 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7155 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7156 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7157 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7158 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7159 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7160 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7162 /* offset from br.s -> br like opcodes */
7163 #define BIG_BRANCH_OFFSET 13
7166 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7168 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7170 return b == NULL || b == bb;
7174 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7176 unsigned char *ip = start;
7177 unsigned char *target;
7180 MonoBasicBlock *bblock;
7181 const MonoOpcode *opcode;
7184 cli_addr = ip - start;
7185 i = mono_opcode_value ((const guint8 **)&ip, end);
7188 opcode = &mono_opcodes [i];
7189 switch (opcode->argument) {
7190 case MonoInlineNone:
7193 case MonoInlineString:
7194 case MonoInlineType:
7195 case MonoInlineField:
7196 case MonoInlineMethod:
7199 case MonoShortInlineR:
7206 case MonoShortInlineVar:
7207 case MonoShortInlineI:
7210 case MonoShortInlineBrTarget:
7211 target = start + cli_addr + 2 + (signed char)ip [1];
7212 GET_BBLOCK (cfg, bblock, target);
7215 GET_BBLOCK (cfg, bblock, ip);
7217 case MonoInlineBrTarget:
7218 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7219 GET_BBLOCK (cfg, bblock, target);
7222 GET_BBLOCK (cfg, bblock, ip);
7224 case MonoInlineSwitch: {
7225 guint32 n = read32 (ip + 1);
7228 cli_addr += 5 + 4 * n;
7229 target = start + cli_addr;
7230 GET_BBLOCK (cfg, bblock, target);
7232 for (j = 0; j < n; ++j) {
7233 target = start + cli_addr + (gint32)read32 (ip);
7234 GET_BBLOCK (cfg, bblock, target);
7244 g_assert_not_reached ();
7247 if (i == CEE_THROW) {
7248 unsigned char *bb_start = ip - 1;
7250 /* Find the start of the bblock containing the throw */
7252 while ((bb_start >= start) && !bblock) {
7253 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7257 bblock->out_of_line = 1;
7267 static inline MonoMethod *
7268 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7272 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7273 method = mono_method_get_wrapper_data (m, token);
7276 method = mono_class_inflate_generic_method_checked (method, context, &error);
7277 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7280 method = mono_get_method_full (m->klass->image, token, klass, context);
7286 static inline MonoMethod *
7287 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7289 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7291 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7297 static inline MonoClass*
7298 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7303 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7304 klass = mono_method_get_wrapper_data (method, token);
7306 klass = mono_class_inflate_generic_class (klass, context);
7308 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7309 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7312 mono_class_init (klass);
7316 static inline MonoMethodSignature*
7317 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7319 MonoMethodSignature *fsig;
7321 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7322 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7324 fsig = mono_metadata_parse_signature (method->klass->image, token);
7328 fsig = mono_inflate_generic_signature(fsig, context, &error);
7330 g_assert(mono_error_ok(&error));
7336 throw_exception (void)
7338 static MonoMethod *method = NULL;
7341 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7342 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7349 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7351 MonoMethod *thrower = throw_exception ();
7354 EMIT_NEW_PCONST (cfg, args [0], ex);
7355 mono_emit_method_call (cfg, thrower, args, NULL);
7359 * Return the original method is a wrapper is specified. We can only access
7360 * the custom attributes from the original method.
7363 get_original_method (MonoMethod *method)
7365 if (method->wrapper_type == MONO_WRAPPER_NONE)
7368 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7369 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7372 /* in other cases we need to find the original method */
7373 return mono_marshal_method_from_wrapper (method);
7377 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7379 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7380 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7382 emit_throw_exception (cfg, ex);
7386 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7388 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7389 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7391 emit_throw_exception (cfg, ex);
7395 * Check that the IL instructions at ip are the array initialization
7396 * sequence and return the pointer to the data and the size.
7399 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7402 * newarr[System.Int32]
7404 * ldtoken field valuetype ...
7405 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7407 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7409 guint32 token = read32 (ip + 7);
7410 guint32 field_token = read32 (ip + 2);
7411 guint32 field_index = field_token & 0xffffff;
7413 const char *data_ptr;
7415 MonoMethod *cmethod;
7416 MonoClass *dummy_class;
7417 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7421 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7425 *out_field_token = field_token;
7427 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7430 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7432 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7433 case MONO_TYPE_BOOLEAN:
7437 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7438 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7439 case MONO_TYPE_CHAR:
7456 if (size > mono_type_size (field->type, &dummy_align))
7459 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7460 if (!image_is_dynamic (method->klass->image)) {
7461 field_index = read32 (ip + 2) & 0xffffff;
7462 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7463 data_ptr = mono_image_rva_map (method->klass->image, rva);
7464 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7465 /* for aot code we do the lookup on load */
7466 if (aot && data_ptr)
7467 return GUINT_TO_POINTER (rva);
7469 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7471 data_ptr = mono_field_get_data (field);
7479 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7481 char *method_fname = mono_method_full_name (method, TRUE);
7483 MonoMethodHeader *header = mono_method_get_header (method);
7485 if (header->code_size == 0)
7486 method_code = g_strdup ("method body is empty.");
7488 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7489 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7490 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7491 g_free (method_fname);
7492 g_free (method_code);
7493 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7497 set_exception_object (MonoCompile *cfg, MonoException *exception)
7499 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7500 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7501 cfg->exception_ptr = exception;
7505 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7508 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7509 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7510 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7511 /* Optimize reg-reg moves away */
7513 * Can't optimize other opcodes, since sp[0] might point to
7514 * the last ins of a decomposed opcode.
7516 sp [0]->dreg = (cfg)->locals [n]->dreg;
7518 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7523 * ldloca inhibits many optimizations so try to get rid of it in common
7526 static inline unsigned char *
7527 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7537 local = read16 (ip + 2);
7541 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7542 /* From the INITOBJ case */
7543 token = read32 (ip + 2);
7544 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7545 CHECK_TYPELOAD (klass);
7546 type = mini_get_underlying_type (&klass->byval_arg);
7547 emit_init_local (cfg, local, type, TRUE);
7555 is_exception_class (MonoClass *klass)
7558 if (klass == mono_defaults.exception_class)
7560 klass = klass->parent;
7566 * is_jit_optimizer_disabled:
7568 * Determine whenever M's assembly has a DebuggableAttribute with the
7569 * IsJITOptimizerDisabled flag set.
7572 is_jit_optimizer_disabled (MonoMethod *m)
7574 MonoAssembly *ass = m->klass->image->assembly;
7575 MonoCustomAttrInfo* attrs;
7576 static MonoClass *klass;
7578 gboolean val = FALSE;
7581 if (ass->jit_optimizer_disabled_inited)
7582 return ass->jit_optimizer_disabled;
7585 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7588 ass->jit_optimizer_disabled = FALSE;
7589 mono_memory_barrier ();
7590 ass->jit_optimizer_disabled_inited = TRUE;
7594 attrs = mono_custom_attrs_from_assembly (ass);
7596 for (i = 0; i < attrs->num_attrs; ++i) {
7597 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7599 MonoMethodSignature *sig;
7601 if (!attr->ctor || attr->ctor->klass != klass)
7603 /* Decode the attribute. See reflection.c */
7604 p = (const char*)attr->data;
7605 g_assert (read16 (p) == 0x0001);
7608 // FIXME: Support named parameters
7609 sig = mono_method_signature (attr->ctor);
7610 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7612 /* Two boolean arguments */
7616 mono_custom_attrs_free (attrs);
7619 ass->jit_optimizer_disabled = val;
7620 mono_memory_barrier ();
7621 ass->jit_optimizer_disabled_inited = TRUE;
7627 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7629 gboolean supported_tail_call;
7632 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7634 for (i = 0; i < fsig->param_count; ++i) {
7635 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7636 /* These can point to the current method's stack */
7637 supported_tail_call = FALSE;
7639 if (fsig->hasthis && cmethod->klass->valuetype)
7640 /* this might point to the current method's stack */
7641 supported_tail_call = FALSE;
7642 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7643 supported_tail_call = FALSE;
7644 if (cfg->method->save_lmf)
7645 supported_tail_call = FALSE;
7646 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7647 supported_tail_call = FALSE;
7648 if (call_opcode != CEE_CALL)
7649 supported_tail_call = FALSE;
7651 /* Debugging support */
7653 if (supported_tail_call) {
7654 if (!mono_debug_count ())
7655 supported_tail_call = FALSE;
7659 return supported_tail_call;
7665 * Handle calls made to ctors from NEWOBJ opcodes.
7668 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7669 MonoInst **sp, guint8 *ip, int *inline_costs)
7671 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7673 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7674 mono_method_is_generic_sharable (cmethod, TRUE)) {
7675 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7676 mono_class_vtable (cfg->domain, cmethod->klass);
7677 CHECK_TYPELOAD (cmethod->klass);
7679 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7680 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7683 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7684 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7686 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7688 CHECK_TYPELOAD (cmethod->klass);
7689 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7694 /* Avoid virtual calls to ctors if possible */
7695 if (mono_class_is_marshalbyref (cmethod->klass))
7696 callvirt_this_arg = sp [0];
7698 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7699 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7700 CHECK_CFG_EXCEPTION;
7701 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7702 mono_method_check_inlining (cfg, cmethod) &&
7703 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7706 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7707 cfg->real_offset += 5;
7709 *inline_costs += costs - 5;
7711 INLINE_FAILURE ("inline failure");
7712 // FIXME-VT: Clean this up
7713 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7714 GSHAREDVT_FAILURE(*ip);
7715 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7717 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7720 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7721 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7722 } else if (context_used &&
7723 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7724 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7725 MonoInst *cmethod_addr;
7727 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7729 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7730 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7732 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7734 INLINE_FAILURE ("ctor call");
7735 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7736 callvirt_this_arg, NULL, vtable_arg);
7743 emit_setret (MonoCompile *cfg, MonoInst *val)
7745 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7748 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7751 if (!cfg->vret_addr) {
7752 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7754 EMIT_NEW_RETLOADA (cfg, ret_addr);
7756 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7757 ins->klass = mono_class_from_mono_type (ret_type);
7760 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7761 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7762 MonoInst *iargs [1];
7766 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7767 mono_arch_emit_setret (cfg, cfg->method, conv);
7769 mono_arch_emit_setret (cfg, cfg->method, val);
7772 mono_arch_emit_setret (cfg, cfg->method, val);
7777 static MonoMethodSignature*
7778 sig_to_rgctx_sig (MonoMethodSignature *sig)
7780 // FIXME: memory allocation
7781 MonoMethodSignature *res;
7784 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7785 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7786 res->param_count = sig->param_count + 1;
7787 for (i = 0; i < sig->param_count; ++i)
7788 res->params [i] = sig->params [i];
7789 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7794 * mono_method_to_ir:
7796 * Translate the .net IL into linear IR.
7799 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7800 MonoInst *return_var, MonoInst **inline_args,
7801 guint inline_offset, gboolean is_virtual_call)
7804 MonoInst *ins, **sp, **stack_start;
7805 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7806 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7807 MonoMethod *cmethod, *method_definition;
7808 MonoInst **arg_array;
7809 MonoMethodHeader *header;
7811 guint32 token, ins_flag;
7813 MonoClass *constrained_class = NULL;
7814 unsigned char *ip, *end, *target, *err_pos;
7815 MonoMethodSignature *sig;
7816 MonoGenericContext *generic_context = NULL;
7817 MonoGenericContainer *generic_container = NULL;
7818 MonoType **param_types;
7819 int i, n, start_new_bblock, dreg;
7820 int num_calls = 0, inline_costs = 0;
7821 int breakpoint_id = 0;
7823 GSList *class_inits = NULL;
7824 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7826 gboolean init_locals, seq_points, skip_dead_blocks;
7827 gboolean sym_seq_points = FALSE;
7828 MonoDebugMethodInfo *minfo;
7829 MonoBitSet *seq_point_locs = NULL;
7830 MonoBitSet *seq_point_set_locs = NULL;
7832 cfg->disable_inline = is_jit_optimizer_disabled (method);
7834 /* serialization and xdomain stuff may need access to private fields and methods */
7835 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7836 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7837 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7838 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7839 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7840 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7842 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7843 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7844 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7845 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7846 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7848 image = method->klass->image;
7849 header = mono_method_get_header (method);
7851 MonoLoaderError *error;
7853 if ((error = mono_loader_get_last_error ())) {
7854 mono_cfg_set_exception (cfg, error->exception_type);
7856 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7857 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7859 goto exception_exit;
7861 generic_container = mono_method_get_generic_container (method);
7862 sig = mono_method_signature (method);
7863 num_args = sig->hasthis + sig->param_count;
7864 ip = (unsigned char*)header->code;
7865 cfg->cil_start = ip;
7866 end = ip + header->code_size;
7867 cfg->stat_cil_code_size += header->code_size;
7869 seq_points = cfg->gen_seq_points && cfg->method == method;
7871 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7872 /* We could hit a seq point before attaching to the JIT (#8338) */
7876 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7877 minfo = mono_debug_lookup_method (method);
7879 MonoSymSeqPoint *sps;
7880 int i, n_il_offsets;
7882 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7883 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7884 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7885 sym_seq_points = TRUE;
7886 for (i = 0; i < n_il_offsets; ++i) {
7887 if (sps [i].il_offset < header->code_size)
7888 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7891 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7892 /* Methods without line number info like auto-generated property accessors */
7893 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7894 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7895 sym_seq_points = TRUE;
7900 * Methods without init_locals set could cause asserts in various passes
7901 * (#497220). To work around this, we emit dummy initialization opcodes
7902 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7903 * on some platforms.
7905 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7906 init_locals = header->init_locals;
7910 method_definition = method;
7911 while (method_definition->is_inflated) {
7912 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7913 method_definition = imethod->declaring;
7916 /* SkipVerification is not allowed if core-clr is enabled */
7917 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7919 dont_verify_stloc = TRUE;
7922 if (sig->is_inflated)
7923 generic_context = mono_method_get_context (method);
7924 else if (generic_container)
7925 generic_context = &generic_container->context;
7926 cfg->generic_context = generic_context;
7929 g_assert (!sig->has_type_parameters);
7931 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7932 g_assert (method->is_inflated);
7933 g_assert (mono_method_get_context (method)->method_inst);
7935 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7936 g_assert (sig->generic_param_count);
7938 if (cfg->method == method) {
7939 cfg->real_offset = 0;
7941 cfg->real_offset = inline_offset;
7944 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7945 cfg->cil_offset_to_bb_len = header->code_size;
7947 cfg->current_method = method;
7949 if (cfg->verbose_level > 2)
7950 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7952 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7954 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7955 for (n = 0; n < sig->param_count; ++n)
7956 param_types [n + sig->hasthis] = sig->params [n];
7957 cfg->arg_types = param_types;
7959 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7960 if (cfg->method == method) {
7962 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7963 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7966 NEW_BBLOCK (cfg, start_bblock);
7967 cfg->bb_entry = start_bblock;
7968 start_bblock->cil_code = NULL;
7969 start_bblock->cil_length = 0;
7972 NEW_BBLOCK (cfg, end_bblock);
7973 cfg->bb_exit = end_bblock;
7974 end_bblock->cil_code = NULL;
7975 end_bblock->cil_length = 0;
7976 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7977 g_assert (cfg->num_bblocks == 2);
7979 arg_array = cfg->args;
7981 if (header->num_clauses) {
7982 cfg->spvars = g_hash_table_new (NULL, NULL);
7983 cfg->exvars = g_hash_table_new (NULL, NULL);
7985 /* handle exception clauses */
7986 for (i = 0; i < header->num_clauses; ++i) {
7987 MonoBasicBlock *try_bb;
7988 MonoExceptionClause *clause = &header->clauses [i];
7989 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7991 try_bb->real_offset = clause->try_offset;
7992 try_bb->try_start = TRUE;
7993 try_bb->region = ((i + 1) << 8) | clause->flags;
7994 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7995 tblock->real_offset = clause->handler_offset;
7996 tblock->flags |= BB_EXCEPTION_HANDLER;
7999 * Linking the try block with the EH block hinders inlining as we won't be able to
8000 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8002 if (COMPILE_LLVM (cfg))
8003 link_bblock (cfg, try_bb, tblock);
8005 if (*(ip + clause->handler_offset) == CEE_POP)
8006 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8008 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8009 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8010 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8011 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8012 MONO_ADD_INS (tblock, ins);
8014 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8015 /* finally clauses already have a seq point */
8016 /* seq points for filter clauses are emitted below */
8017 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8018 MONO_ADD_INS (tblock, ins);
8021 /* todo: is a fault block unsafe to optimize? */
8022 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8023 tblock->flags |= BB_EXCEPTION_UNSAFE;
8026 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8028 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8030 /* catch and filter blocks get the exception object on the stack */
8031 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8032 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8034 /* mostly like handle_stack_args (), but just sets the input args */
8035 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8036 tblock->in_scount = 1;
8037 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8038 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8042 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8043 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8044 if (!cfg->compile_llvm) {
8045 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8046 ins->dreg = tblock->in_stack [0]->dreg;
8047 MONO_ADD_INS (tblock, ins);
8050 MonoInst *dummy_use;
8053 * Add a dummy use for the exvar so its liveness info will be
8056 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8059 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8060 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8061 MONO_ADD_INS (tblock, ins);
8064 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8065 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8066 tblock->flags |= BB_EXCEPTION_HANDLER;
8067 tblock->real_offset = clause->data.filter_offset;
8068 tblock->in_scount = 1;
8069 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8070 /* The filter block shares the exvar with the handler block */
8071 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8072 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8073 MONO_ADD_INS (tblock, ins);
8077 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8078 clause->data.catch_class &&
8080 mono_class_check_context_used (clause->data.catch_class)) {
8082 * In shared generic code with catch
8083 * clauses containing type variables
8084 * the exception handling code has to
8085 * be able to get to the rgctx.
8086 * Therefore we have to make sure that
8087 * the vtable/mrgctx argument (for
8088 * static or generic methods) or the
8089 * "this" argument (for non-static
8090 * methods) are live.
8092 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8093 mini_method_get_context (method)->method_inst ||
8094 method->klass->valuetype) {
8095 mono_get_vtable_var (cfg);
8097 MonoInst *dummy_use;
8099 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8104 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8105 cfg->cbb = start_bblock;
8106 cfg->args = arg_array;
8107 mono_save_args (cfg, sig, inline_args);
8110 /* FIRST CODE BLOCK */
8111 NEW_BBLOCK (cfg, tblock);
8112 tblock->cil_code = ip;
8116 ADD_BBLOCK (cfg, tblock);
8118 if (cfg->method == method) {
8119 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8120 if (breakpoint_id) {
8121 MONO_INST_NEW (cfg, ins, OP_BREAK);
8122 MONO_ADD_INS (cfg->cbb, ins);
8126 /* we use a separate basic block for the initialization code */
8127 NEW_BBLOCK (cfg, init_localsbb);
8128 cfg->bb_init = init_localsbb;
8129 init_localsbb->real_offset = cfg->real_offset;
8130 start_bblock->next_bb = init_localsbb;
8131 init_localsbb->next_bb = cfg->cbb;
8132 link_bblock (cfg, start_bblock, init_localsbb);
8133 link_bblock (cfg, init_localsbb, cfg->cbb);
8135 cfg->cbb = init_localsbb;
8137 if (cfg->gsharedvt && cfg->method == method) {
8138 MonoGSharedVtMethodInfo *info;
8139 MonoInst *var, *locals_var;
8142 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8143 info->method = cfg->method;
8144 info->count_entries = 16;
8145 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8146 cfg->gsharedvt_info = info;
8148 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8149 /* prevent it from being register allocated */
8150 //var->flags |= MONO_INST_VOLATILE;
8151 cfg->gsharedvt_info_var = var;
8153 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8156 /* Allocate locals */
8157 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8158 /* prevent it from being register allocated */
8159 //locals_var->flags |= MONO_INST_VOLATILE;
8160 cfg->gsharedvt_locals_var = locals_var;
8162 dreg = alloc_ireg (cfg);
8163 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8165 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8166 ins->dreg = locals_var->dreg;
8168 MONO_ADD_INS (cfg->cbb, ins);
8169 cfg->gsharedvt_locals_var_ins = ins;
8171 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8174 ins->flags |= MONO_INST_INIT;
8178 if (mono_security_core_clr_enabled ()) {
8179 /* check if this is native code, e.g. an icall or a p/invoke */
8180 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8181 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8183 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8184 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8186 /* if this ia a native call then it can only be JITted from platform code */
8187 if ((icall || pinvk) && method->klass && method->klass->image) {
8188 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8189 MonoException *ex = icall ? mono_get_exception_security () :
8190 mono_get_exception_method_access ();
8191 emit_throw_exception (cfg, ex);
8198 CHECK_CFG_EXCEPTION;
8200 if (header->code_size == 0)
8203 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8208 if (cfg->method == method)
8209 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8211 for (n = 0; n < header->num_locals; ++n) {
8212 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8217 /* We force the vtable variable here for all shared methods
8218 for the possibility that they might show up in a stack
8219 trace where their exact instantiation is needed. */
8220 if (cfg->gshared && method == cfg->method) {
8221 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8222 mini_method_get_context (method)->method_inst ||
8223 method->klass->valuetype) {
8224 mono_get_vtable_var (cfg);
8226 /* FIXME: Is there a better way to do this?
8227 We need the variable live for the duration
8228 of the whole method. */
8229 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8233 /* add a check for this != NULL to inlined methods */
8234 if (is_virtual_call) {
8237 NEW_ARGLOAD (cfg, arg_ins, 0);
8238 MONO_ADD_INS (cfg->cbb, arg_ins);
8239 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8242 skip_dead_blocks = !dont_verify;
8243 if (skip_dead_blocks) {
8244 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8249 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8250 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8253 start_new_bblock = 0;
8255 if (cfg->method == method)
8256 cfg->real_offset = ip - header->code;
8258 cfg->real_offset = inline_offset;
8263 if (start_new_bblock) {
8264 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8265 if (start_new_bblock == 2) {
8266 g_assert (ip == tblock->cil_code);
8268 GET_BBLOCK (cfg, tblock, ip);
8270 cfg->cbb->next_bb = tblock;
8272 start_new_bblock = 0;
8273 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8274 if (cfg->verbose_level > 3)
8275 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8276 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8280 g_slist_free (class_inits);
8283 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8284 link_bblock (cfg, cfg->cbb, tblock);
8285 if (sp != stack_start) {
8286 handle_stack_args (cfg, stack_start, sp - stack_start);
8288 CHECK_UNVERIFIABLE (cfg);
8290 cfg->cbb->next_bb = tblock;
8292 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8293 if (cfg->verbose_level > 3)
8294 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8295 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8298 g_slist_free (class_inits);
8303 if (skip_dead_blocks) {
8304 int ip_offset = ip - header->code;
8306 if (ip_offset == bb->end)
8310 int op_size = mono_opcode_size (ip, end);
8311 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8313 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8315 if (ip_offset + op_size == bb->end) {
8316 MONO_INST_NEW (cfg, ins, OP_NOP);
8317 MONO_ADD_INS (cfg->cbb, ins);
8318 start_new_bblock = 1;
8326 * Sequence points are points where the debugger can place a breakpoint.
8327 * Currently, we generate these automatically at points where the IL
8330 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8332 * Make methods interruptable at the beginning, and at the targets of
8333 * backward branches.
8334 * Also, do this at the start of every bblock in methods with clauses too,
8335 * to be able to handle instructions with inprecise control flow like
8337 * Backward branches are handled at the end of method-to-ir ().
8339 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8340 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8342 /* Avoid sequence points on empty IL like .volatile */
8343 // FIXME: Enable this
8344 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8345 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8346 if ((sp != stack_start) && !sym_seq_point)
8347 ins->flags |= MONO_INST_NONEMPTY_STACK;
8348 MONO_ADD_INS (cfg->cbb, ins);
8351 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8354 cfg->cbb->real_offset = cfg->real_offset;
8356 if ((cfg->method == method) && cfg->coverage_info) {
8357 guint32 cil_offset = ip - header->code;
8358 cfg->coverage_info->data [cil_offset].cil_code = ip;
8360 /* TODO: Use an increment here */
8361 #if defined(TARGET_X86)
8362 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8363 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8365 MONO_ADD_INS (cfg->cbb, ins);
8367 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8368 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8372 if (cfg->verbose_level > 3)
8373 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8377 if (seq_points && !sym_seq_points && sp != stack_start) {
8379 * The C# compiler uses these nops to notify the JIT that it should
8380 * insert seq points.
8382 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8383 MONO_ADD_INS (cfg->cbb, ins);
8385 if (cfg->keep_cil_nops)
8386 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8388 MONO_INST_NEW (cfg, ins, OP_NOP);
8390 MONO_ADD_INS (cfg->cbb, ins);
8393 if (should_insert_brekpoint (cfg->method)) {
8394 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8396 MONO_INST_NEW (cfg, ins, OP_NOP);
8399 MONO_ADD_INS (cfg->cbb, ins);
8405 CHECK_STACK_OVF (1);
8406 n = (*ip)-CEE_LDARG_0;
8408 EMIT_NEW_ARGLOAD (cfg, ins, n);
8416 CHECK_STACK_OVF (1);
8417 n = (*ip)-CEE_LDLOC_0;
8419 EMIT_NEW_LOCLOAD (cfg, ins, n);
8428 n = (*ip)-CEE_STLOC_0;
8431 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8433 emit_stloc_ir (cfg, sp, header, n);
8440 CHECK_STACK_OVF (1);
8443 EMIT_NEW_ARGLOAD (cfg, ins, n);
8449 CHECK_STACK_OVF (1);
8452 NEW_ARGLOADA (cfg, ins, n);
8453 MONO_ADD_INS (cfg->cbb, ins);
8463 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8465 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8470 CHECK_STACK_OVF (1);
8473 EMIT_NEW_LOCLOAD (cfg, ins, n);
8477 case CEE_LDLOCA_S: {
8478 unsigned char *tmp_ip;
8480 CHECK_STACK_OVF (1);
8481 CHECK_LOCAL (ip [1]);
8483 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8489 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8498 CHECK_LOCAL (ip [1]);
8499 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8501 emit_stloc_ir (cfg, sp, header, ip [1]);
8506 CHECK_STACK_OVF (1);
8507 EMIT_NEW_PCONST (cfg, ins, NULL);
8508 ins->type = STACK_OBJ;
8513 CHECK_STACK_OVF (1);
8514 EMIT_NEW_ICONST (cfg, ins, -1);
8527 CHECK_STACK_OVF (1);
8528 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8534 CHECK_STACK_OVF (1);
8536 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8542 CHECK_STACK_OVF (1);
8543 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8549 CHECK_STACK_OVF (1);
8550 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8551 ins->type = STACK_I8;
8552 ins->dreg = alloc_dreg (cfg, STACK_I8);
8554 ins->inst_l = (gint64)read64 (ip);
8555 MONO_ADD_INS (cfg->cbb, ins);
8561 gboolean use_aotconst = FALSE;
8563 #ifdef TARGET_POWERPC
8564 /* FIXME: Clean this up */
8565 if (cfg->compile_aot)
8566 use_aotconst = TRUE;
8569 /* FIXME: we should really allocate this only late in the compilation process */
8570 f = mono_domain_alloc (cfg->domain, sizeof (float));
8572 CHECK_STACK_OVF (1);
8578 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8580 dreg = alloc_freg (cfg);
8581 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8582 ins->type = cfg->r4_stack_type;
8584 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8585 ins->type = cfg->r4_stack_type;
8586 ins->dreg = alloc_dreg (cfg, STACK_R8);
8588 MONO_ADD_INS (cfg->cbb, ins);
8598 gboolean use_aotconst = FALSE;
8600 #ifdef TARGET_POWERPC
8601 /* FIXME: Clean this up */
8602 if (cfg->compile_aot)
8603 use_aotconst = TRUE;
8606 /* FIXME: we should really allocate this only late in the compilation process */
8607 d = mono_domain_alloc (cfg->domain, sizeof (double));
8609 CHECK_STACK_OVF (1);
8615 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8617 dreg = alloc_freg (cfg);
8618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8619 ins->type = STACK_R8;
8621 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8622 ins->type = STACK_R8;
8623 ins->dreg = alloc_dreg (cfg, STACK_R8);
8625 MONO_ADD_INS (cfg->cbb, ins);
8634 MonoInst *temp, *store;
8636 CHECK_STACK_OVF (1);
8640 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8641 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8643 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8646 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8659 if (sp [0]->type == STACK_R8)
8660 /* we need to pop the value from the x86 FP stack */
8661 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8666 MonoMethodSignature *fsig;
8669 INLINE_FAILURE ("jmp");
8670 GSHAREDVT_FAILURE (*ip);
8673 if (stack_start != sp)
8675 token = read32 (ip + 1);
8676 /* FIXME: check the signature matches */
8677 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8679 if (!cmethod || mono_loader_get_last_error ())
8682 if (cfg->gshared && mono_method_check_context_used (cmethod))
8683 GENERIC_SHARING_FAILURE (CEE_JMP);
8685 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8687 fsig = mono_method_signature (cmethod);
8688 n = fsig->param_count + fsig->hasthis;
8689 if (cfg->llvm_only) {
8692 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8693 for (i = 0; i < n; ++i)
8694 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8695 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8697 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8698 * have to emit a normal return since llvm expects it.
8701 emit_setret (cfg, ins);
8702 MONO_INST_NEW (cfg, ins, OP_BR);
8703 ins->inst_target_bb = end_bblock;
8704 MONO_ADD_INS (cfg->cbb, ins);
8705 link_bblock (cfg, cfg->cbb, end_bblock);
8708 } else if (cfg->backend->have_op_tail_call) {
8709 /* Handle tail calls similarly to calls */
8712 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8713 call->method = cmethod;
8714 call->tail_call = TRUE;
8715 call->signature = mono_method_signature (cmethod);
8716 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8717 call->inst.inst_p0 = cmethod;
8718 for (i = 0; i < n; ++i)
8719 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8721 mono_arch_emit_call (cfg, call);
8722 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8723 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8725 for (i = 0; i < num_args; ++i)
8726 /* Prevent arguments from being optimized away */
8727 arg_array [i]->flags |= MONO_INST_VOLATILE;
8729 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8730 ins = (MonoInst*)call;
8731 ins->inst_p0 = cmethod;
8732 MONO_ADD_INS (cfg->cbb, ins);
8736 start_new_bblock = 1;
8741 MonoMethodSignature *fsig;
8744 token = read32 (ip + 1);
8748 //GSHAREDVT_FAILURE (*ip);
8753 fsig = mini_get_signature (method, token, generic_context);
8755 if (method->dynamic && fsig->pinvoke) {
8759 * This is a call through a function pointer using a pinvoke
8760 * signature. Have to create a wrapper and call that instead.
8761 * FIXME: This is very slow, need to create a wrapper at JIT time
8762 * instead based on the signature.
8764 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8765 EMIT_NEW_PCONST (cfg, args [1], fsig);
8767 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8770 n = fsig->param_count + fsig->hasthis;
8774 //g_assert (!virtual || fsig->hasthis);
8778 inline_costs += 10 * num_calls++;
8781 * Making generic calls out of gsharedvt methods.
8782 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8783 * patching gshared method addresses into a gsharedvt method.
8785 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8787 * We pass the address to the gsharedvt trampoline in the rgctx reg
8789 MonoInst *callee = addr;
8791 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8793 GSHAREDVT_FAILURE (*ip);
8795 addr = emit_get_rgctx_sig (cfg, context_used,
8796 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8797 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8801 /* Prevent inlining of methods with indirect calls */
8802 INLINE_FAILURE ("indirect call");
8804 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8809 * Instead of emitting an indirect call, emit a direct call
8810 * with the contents of the aotconst as the patch info.
8812 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8813 info_type = addr->inst_c1;
8814 info_data = addr->inst_p0;
8816 info_type = addr->inst_right->inst_c1;
8817 info_data = addr->inst_right->inst_left;
8820 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8821 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8826 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8830 /* End of call, INS should contain the result of the call, if any */
8832 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8834 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8837 CHECK_CFG_EXCEPTION;
8841 constrained_class = NULL;
8845 case CEE_CALLVIRT: {
8846 MonoInst *addr = NULL;
8847 MonoMethodSignature *fsig = NULL;
8849 int virtual = *ip == CEE_CALLVIRT;
8850 gboolean pass_imt_from_rgctx = FALSE;
8851 MonoInst *imt_arg = NULL;
8852 MonoInst *keep_this_alive = NULL;
8853 gboolean pass_vtable = FALSE;
8854 gboolean pass_mrgctx = FALSE;
8855 MonoInst *vtable_arg = NULL;
8856 gboolean check_this = FALSE;
8857 gboolean supported_tail_call = FALSE;
8858 gboolean tail_call = FALSE;
8859 gboolean need_seq_point = FALSE;
8860 guint32 call_opcode = *ip;
8861 gboolean emit_widen = TRUE;
8862 gboolean push_res = TRUE;
8863 gboolean skip_ret = FALSE;
8864 gboolean delegate_invoke = FALSE;
8865 gboolean direct_icall = FALSE;
8866 gboolean constrained_partial_call = FALSE;
8867 MonoMethod *cil_method;
8870 token = read32 (ip + 1);
8874 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8875 cil_method = cmethod;
8877 if (constrained_class) {
8878 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8879 if (!mini_is_gsharedvt_klass (constrained_class)) {
8880 g_assert (!cmethod->klass->valuetype);
8881 if (!mini_type_is_reference (&constrained_class->byval_arg))
8882 constrained_partial_call = TRUE;
8886 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8887 if (cfg->verbose_level > 2)
8888 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8889 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8890 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8892 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8896 if (cfg->verbose_level > 2)
8897 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8899 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8901 * This is needed since get_method_constrained can't find
8902 * the method in klass representing a type var.
8903 * The type var is guaranteed to be a reference type in this
8906 if (!mini_is_gsharedvt_klass (constrained_class))
8907 g_assert (!cmethod->klass->valuetype);
8909 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8915 if (!cmethod || mono_loader_get_last_error ())
8917 if (!dont_verify && !cfg->skip_visibility) {
8918 MonoMethod *target_method = cil_method;
8919 if (method->is_inflated) {
8920 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8922 if (!mono_method_can_access_method (method_definition, target_method) &&
8923 !mono_method_can_access_method (method, cil_method))
8924 METHOD_ACCESS_FAILURE (method, cil_method);
8927 if (mono_security_core_clr_enabled ())
8928 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8930 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8931 /* MS.NET seems to silently convert this to a callvirt */
8936 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8937 * converts to a callvirt.
8939 * tests/bug-515884.il is an example of this behavior
8941 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8942 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8943 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8947 if (!cmethod->klass->inited)
8948 if (!mono_class_init (cmethod->klass))
8949 TYPE_LOAD_ERROR (cmethod->klass);
8951 fsig = mono_method_signature (cmethod);
8954 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8955 mini_class_is_system_array (cmethod->klass)) {
8956 array_rank = cmethod->klass->rank;
8957 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8958 direct_icall = TRUE;
8959 } else if (fsig->pinvoke) {
8960 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8961 fsig = mono_method_signature (wrapper);
8962 } else if (constrained_class) {
8964 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8968 /* See code below */
8969 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8970 MonoBasicBlock *tbb;
8972 GET_BBLOCK (cfg, tbb, ip + 5);
8973 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8975 * We want to extend the try block to cover the call, but we can't do it if the
8976 * call is made directly since its followed by an exception check.
8978 direct_icall = FALSE;
8982 mono_save_token_info (cfg, image, token, cil_method);
8984 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8985 need_seq_point = TRUE;
8987 /* Don't support calls made using type arguments for now */
8989 if (cfg->gsharedvt) {
8990 if (mini_is_gsharedvt_signature (fsig))
8991 GSHAREDVT_FAILURE (*ip);
8995 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8996 g_assert_not_reached ();
8998 n = fsig->param_count + fsig->hasthis;
9000 if (!cfg->gshared && cmethod->klass->generic_container)
9004 g_assert (!mono_method_check_context_used (cmethod));
9008 //g_assert (!virtual || fsig->hasthis);
9012 if (constrained_class) {
9013 if (mini_is_gsharedvt_klass (constrained_class)) {
9014 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9015 /* The 'Own method' case below */
9016 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9017 /* 'The type parameter is instantiated as a reference type' case below. */
9019 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9020 CHECK_CFG_EXCEPTION;
9027 * We have the `constrained.' prefix opcode.
9029 if (constrained_partial_call) {
9030 gboolean need_box = TRUE;
9033 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9034 * called method is not known at compile time either. The called method could end up being
9035 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9036 * to box the receiver.
9037 * A simple solution would be to box always and make a normal virtual call, but that would
9038 * be bad performance wise.
9040 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9042 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9047 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9048 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9049 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9050 ins->klass = constrained_class;
9051 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9052 CHECK_CFG_EXCEPTION;
9053 } else if (need_box) {
9055 MonoBasicBlock *is_ref_bb, *end_bb;
9056 MonoInst *nonbox_call;
9059 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9061 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9062 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9064 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9066 NEW_BBLOCK (cfg, is_ref_bb);
9067 NEW_BBLOCK (cfg, end_bb);
9069 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9074 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9076 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9079 MONO_START_BB (cfg, is_ref_bb);
9080 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9081 ins->klass = constrained_class;
9082 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9083 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9085 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9087 MONO_START_BB (cfg, end_bb);
9090 nonbox_call->dreg = ins->dreg;
9093 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9094 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9095 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9098 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9100 * The type parameter is instantiated as a valuetype,
9101 * but that type doesn't override the method we're
9102 * calling, so we need to box `this'.
9104 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9105 ins->klass = constrained_class;
9106 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9107 CHECK_CFG_EXCEPTION;
9108 } else if (!constrained_class->valuetype) {
9109 int dreg = alloc_ireg_ref (cfg);
9112 * The type parameter is instantiated as a reference
9113 * type. We have a managed pointer on the stack, so
9114 * we need to dereference it here.
9116 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9117 ins->type = STACK_OBJ;
9120 if (cmethod->klass->valuetype) {
9123 /* Interface method */
9126 mono_class_setup_vtable (constrained_class);
9127 CHECK_TYPELOAD (constrained_class);
9128 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9130 TYPE_LOAD_ERROR (constrained_class);
9131 slot = mono_method_get_vtable_slot (cmethod);
9133 TYPE_LOAD_ERROR (cmethod->klass);
9134 cmethod = constrained_class->vtable [ioffset + slot];
9136 if (cmethod->klass == mono_defaults.enum_class) {
9137 /* Enum implements some interfaces, so treat this as the first case */
9138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9139 ins->klass = constrained_class;
9140 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9141 CHECK_CFG_EXCEPTION;
9146 constrained_class = NULL;
9149 if (check_call_signature (cfg, fsig, sp))
9152 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9153 delegate_invoke = TRUE;
9155 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9156 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9157 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9165 * If the callee is a shared method, then its static cctor
9166 * might not get called after the call was patched.
9168 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9169 emit_class_init (cfg, cmethod->klass);
9170 CHECK_TYPELOAD (cmethod->klass);
9173 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9176 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9178 context_used = mini_method_check_context_used (cfg, cmethod);
9180 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9181 /* Generic method interface
9182 calls are resolved via a
9183 helper function and don't
9185 if (!cmethod_context || !cmethod_context->method_inst)
9186 pass_imt_from_rgctx = TRUE;
9190 * If a shared method calls another
9191 * shared method then the caller must
9192 * have a generic sharing context
9193 * because the magic trampoline
9194 * requires it. FIXME: We shouldn't
9195 * have to force the vtable/mrgctx
9196 * variable here. Instead there
9197 * should be a flag in the cfg to
9198 * request a generic sharing context.
9201 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9202 mono_get_vtable_var (cfg);
9207 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9209 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9211 CHECK_TYPELOAD (cmethod->klass);
9212 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9217 g_assert (!vtable_arg);
9219 if (!cfg->compile_aot) {
9221 * emit_get_rgctx_method () calls mono_class_vtable () so check
9222 * for type load errors before.
9224 mono_class_setup_vtable (cmethod->klass);
9225 CHECK_TYPELOAD (cmethod->klass);
9228 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9230 /* !marshalbyref is needed to properly handle generic methods + remoting */
9231 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9232 MONO_METHOD_IS_FINAL (cmethod)) &&
9233 !mono_class_is_marshalbyref (cmethod->klass)) {
9240 if (pass_imt_from_rgctx) {
9241 g_assert (!pass_vtable);
9243 imt_arg = emit_get_rgctx_method (cfg, context_used,
9244 cmethod, MONO_RGCTX_INFO_METHOD);
9248 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9250 /* Calling virtual generic methods */
9251 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9252 !(MONO_METHOD_IS_FINAL (cmethod) &&
9253 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9254 fsig->generic_param_count &&
9255 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9257 MonoInst *this_temp, *this_arg_temp, *store;
9258 MonoInst *iargs [4];
9260 g_assert (fsig->is_inflated);
9262 /* Prevent inlining of methods that contain indirect calls */
9263 INLINE_FAILURE ("virtual generic call");
9265 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9266 GSHAREDVT_FAILURE (*ip);
9268 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9269 g_assert (!imt_arg);
9271 g_assert (cmethod->is_inflated);
9272 imt_arg = emit_get_rgctx_method (cfg, context_used,
9273 cmethod, MONO_RGCTX_INFO_METHOD);
9274 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9276 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9277 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9278 MONO_ADD_INS (cfg->cbb, store);
9280 /* FIXME: This should be a managed pointer */
9281 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9283 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9284 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9285 cmethod, MONO_RGCTX_INFO_METHOD);
9286 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9287 addr = mono_emit_jit_icall (cfg,
9288 mono_helper_compile_generic_method, iargs);
9290 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9292 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9299 * Implement a workaround for the inherent races involved in locking:
9305 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9306 * try block, the Exit () won't be executed, see:
9307 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9308 * To work around this, we extend such try blocks to include the last x bytes
9309 * of the Monitor.Enter () call.
9311 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9312 MonoBasicBlock *tbb;
9314 GET_BBLOCK (cfg, tbb, ip + 5);
9316 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9317 * from Monitor.Enter like ArgumentNullException.
9319 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9320 /* Mark this bblock as needing to be extended */
9321 tbb->extend_try_block = TRUE;
9325 /* Conversion to a JIT intrinsic */
9326 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9327 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9328 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9335 if ((cfg->opt & MONO_OPT_INLINE) &&
9336 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9337 mono_method_check_inlining (cfg, cmethod)) {
9339 gboolean always = FALSE;
9341 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9342 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9343 /* Prevent inlining of methods that call wrappers */
9344 INLINE_FAILURE ("wrapper call");
9345 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9349 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9351 cfg->real_offset += 5;
9353 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9354 /* *sp is already set by inline_method */
9359 inline_costs += costs;
9365 /* Tail recursion elimination */
9366 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9367 gboolean has_vtargs = FALSE;
9370 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9371 INLINE_FAILURE ("tail call");
9373 /* keep it simple */
9374 for (i = fsig->param_count - 1; i >= 0; i--) {
9375 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9380 for (i = 0; i < n; ++i)
9381 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9382 MONO_INST_NEW (cfg, ins, OP_BR);
9383 MONO_ADD_INS (cfg->cbb, ins);
9384 tblock = start_bblock->out_bb [0];
9385 link_bblock (cfg, cfg->cbb, tblock);
9386 ins->inst_target_bb = tblock;
9387 start_new_bblock = 1;
9389 /* skip the CEE_RET, too */
9390 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9397 inline_costs += 10 * num_calls++;
9400 * Making generic calls out of gsharedvt methods.
9401 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9402 * patching gshared method addresses into a gsharedvt method.
9404 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9405 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9406 MonoRgctxInfoType info_type;
9409 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9410 //GSHAREDVT_FAILURE (*ip);
9411 // disable for possible remoting calls
9412 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9413 GSHAREDVT_FAILURE (*ip);
9414 if (fsig->generic_param_count) {
9415 /* virtual generic call */
9416 g_assert (!imt_arg);
9417 /* Same as the virtual generic case above */
9418 imt_arg = emit_get_rgctx_method (cfg, context_used,
9419 cmethod, MONO_RGCTX_INFO_METHOD);
9420 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9422 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9423 /* This can happen when we call a fully instantiated iface method */
9424 imt_arg = emit_get_rgctx_method (cfg, context_used,
9425 cmethod, MONO_RGCTX_INFO_METHOD);
9430 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9431 keep_this_alive = sp [0];
9433 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9434 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9436 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9437 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9439 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9443 /* Generic sharing */
9446 * Use this if the callee is gsharedvt sharable too, since
9447 * at runtime we might find an instantiation so the call cannot
9448 * be patched (the 'no_patch' code path in mini-trampolines.c).
9450 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9451 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9452 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9453 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9454 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9455 INLINE_FAILURE ("gshared");
9457 g_assert (cfg->gshared && cmethod);
9461 * We are compiling a call to a
9462 * generic method from shared code,
9463 * which means that we have to look up
9464 * the method in the rgctx and do an
9468 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9470 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9471 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9475 /* Direct calls to icalls */
9477 MonoMethod *wrapper;
9480 /* Inline the wrapper */
9481 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9483 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9484 g_assert (costs > 0);
9485 cfg->real_offset += 5;
9487 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9488 /* *sp is already set by inline_method */
9493 inline_costs += costs;
9502 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9503 MonoInst *val = sp [fsig->param_count];
9505 if (val->type == STACK_OBJ) {
9506 MonoInst *iargs [2];
9511 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9514 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9515 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9516 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9517 emit_write_barrier (cfg, addr, val);
9518 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9519 GSHAREDVT_FAILURE (*ip);
9520 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9521 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9523 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9524 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9525 if (!cmethod->klass->element_class->valuetype && !readonly)
9526 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9527 CHECK_TYPELOAD (cmethod->klass);
9530 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9533 g_assert_not_reached ();
9540 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9544 /* Tail prefix / tail call optimization */
9546 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9547 /* FIXME: runtime generic context pointer for jumps? */
9548 /* FIXME: handle this for generic sharing eventually */
9549 if ((ins_flag & MONO_INST_TAILCALL) &&
9550 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9551 supported_tail_call = TRUE;
9553 if (supported_tail_call) {
9556 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9557 INLINE_FAILURE ("tail call");
9559 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9561 if (cfg->backend->have_op_tail_call) {
9562 /* Handle tail calls similarly to normal calls */
9565 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9567 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9568 call->tail_call = TRUE;
9569 call->method = cmethod;
9570 call->signature = mono_method_signature (cmethod);
9573 * We implement tail calls by storing the actual arguments into the
9574 * argument variables, then emitting a CEE_JMP.
9576 for (i = 0; i < n; ++i) {
9577 /* Prevent argument from being register allocated */
9578 arg_array [i]->flags |= MONO_INST_VOLATILE;
9579 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9581 ins = (MonoInst*)call;
9582 ins->inst_p0 = cmethod;
9583 ins->inst_p1 = arg_array [0];
9584 MONO_ADD_INS (cfg->cbb, ins);
9585 link_bblock (cfg, cfg->cbb, end_bblock);
9586 start_new_bblock = 1;
9588 // FIXME: Eliminate unreachable epilogs
9591 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9592 * only reachable from this call.
9594 GET_BBLOCK (cfg, tblock, ip + 5);
9595 if (tblock == cfg->cbb || tblock->in_count == 0)
9604 * Synchronized wrappers.
9605 * Its hard to determine where to replace a method with its synchronized
9606 * wrapper without causing an infinite recursion. The current solution is
9607 * to add the synchronized wrapper in the trampolines, and to
9608 * change the called method to a dummy wrapper, and resolve that wrapper
9609 * to the real method in mono_jit_compile_method ().
9611 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9612 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9613 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9614 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9618 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9619 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9620 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9621 * based on whenever there is an rgctx or not.
9623 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9624 MonoInst *args_buf [16], *icall_args [16];
9626 MonoBasicBlock *rgctx_bb, *end_bb;
9627 MonoInst *call1, *call2, *call_target;
9628 MonoMethodSignature *rgctx_sig;
9629 int rgctx_reg, tmp_reg;
9631 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9633 NEW_BBLOCK (cfg, rgctx_bb);
9634 NEW_BBLOCK (cfg, end_bb);
9636 // FIXME: Optimize this
9638 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9640 icall_args [0] = sp [0];
9641 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9643 icall_args [2] = imt_arg;
9645 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9646 icall_args [2] = ins;
9649 rgctx_reg = alloc_preg (cfg);
9650 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9651 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9652 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9654 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9656 // FIXME: Only do this if needed (generic calls)
9658 // Check whenever to pass an rgctx
9659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9660 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9661 /* Non rgctx case */
9662 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9663 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9665 MONO_START_BB (cfg, rgctx_bb);
9666 /* Make a call with an rgctx */
9667 if (fsig->param_count + 2 < 16)
9670 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9672 for (i = 0; i < fsig->param_count; ++i)
9673 args [i + 1] = sp [i + 1];
9674 tmp_reg = alloc_preg (cfg);
9675 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9676 rgctx_sig = sig_to_rgctx_sig (fsig);
9677 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9678 call2->dreg = call1->dreg;
9679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9681 MONO_START_BB (cfg, end_bb);
9687 INLINE_FAILURE ("call");
9688 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9689 imt_arg, vtable_arg);
9691 if (tail_call && !cfg->llvm_only) {
9692 link_bblock (cfg, cfg->cbb, end_bblock);
9693 start_new_bblock = 1;
9695 // FIXME: Eliminate unreachable epilogs
9698 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9699 * only reachable from this call.
9701 GET_BBLOCK (cfg, tblock, ip + 5);
9702 if (tblock == cfg->cbb || tblock->in_count == 0)
9709 /* End of call, INS should contain the result of the call, if any */
9711 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9714 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9719 if (keep_this_alive) {
9720 MonoInst *dummy_use;
9722 /* See mono_emit_method_call_full () */
9723 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9726 CHECK_CFG_EXCEPTION;
9730 g_assert (*ip == CEE_RET);
9734 constrained_class = NULL;
9736 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9740 if (cfg->method != method) {
9741 /* return from inlined method */
9743 * If in_count == 0, that means the ret is unreachable due to
9744 * being preceeded by a throw. In that case, inline_method () will
9745 * handle setting the return value
9746 * (test case: test_0_inline_throw ()).
9748 if (return_var && cfg->cbb->in_count) {
9749 MonoType *ret_type = mono_method_signature (method)->ret;
9755 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9758 //g_assert (returnvar != -1);
9759 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9760 cfg->ret_var_set = TRUE;
9763 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9765 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9769 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9771 if (seq_points && !sym_seq_points) {
9773 * Place a seq point here too even through the IL stack is not
9774 * empty, so a step over on
9777 * will work correctly.
9779 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9780 MONO_ADD_INS (cfg->cbb, ins);
9783 g_assert (!return_var);
9787 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9790 emit_setret (cfg, *sp);
9793 if (sp != stack_start)
9795 MONO_INST_NEW (cfg, ins, OP_BR);
9797 ins->inst_target_bb = end_bblock;
9798 MONO_ADD_INS (cfg->cbb, ins);
9799 link_bblock (cfg, cfg->cbb, end_bblock);
9800 start_new_bblock = 1;
9804 MONO_INST_NEW (cfg, ins, OP_BR);
9806 target = ip + 1 + (signed char)(*ip);
9808 GET_BBLOCK (cfg, tblock, target);
9809 link_bblock (cfg, cfg->cbb, tblock);
9810 ins->inst_target_bb = tblock;
9811 if (sp != stack_start) {
9812 handle_stack_args (cfg, stack_start, sp - stack_start);
9814 CHECK_UNVERIFIABLE (cfg);
9816 MONO_ADD_INS (cfg->cbb, ins);
9817 start_new_bblock = 1;
9818 inline_costs += BRANCH_COST;
9832 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9834 target = ip + 1 + *(signed char*)ip;
9840 inline_costs += BRANCH_COST;
9844 MONO_INST_NEW (cfg, ins, OP_BR);
9847 target = ip + 4 + (gint32)read32(ip);
9849 GET_BBLOCK (cfg, tblock, target);
9850 link_bblock (cfg, cfg->cbb, tblock);
9851 ins->inst_target_bb = tblock;
9852 if (sp != stack_start) {
9853 handle_stack_args (cfg, stack_start, sp - stack_start);
9855 CHECK_UNVERIFIABLE (cfg);
9858 MONO_ADD_INS (cfg->cbb, ins);
9860 start_new_bblock = 1;
9861 inline_costs += BRANCH_COST;
9868 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9869 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9870 guint32 opsize = is_short ? 1 : 4;
9872 CHECK_OPSIZE (opsize);
9874 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9877 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9882 GET_BBLOCK (cfg, tblock, target);
9883 link_bblock (cfg, cfg->cbb, tblock);
9884 GET_BBLOCK (cfg, tblock, ip);
9885 link_bblock (cfg, cfg->cbb, tblock);
9887 if (sp != stack_start) {
9888 handle_stack_args (cfg, stack_start, sp - stack_start);
9889 CHECK_UNVERIFIABLE (cfg);
9892 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9893 cmp->sreg1 = sp [0]->dreg;
9894 type_from_op (cfg, cmp, sp [0], NULL);
9897 #if SIZEOF_REGISTER == 4
9898 if (cmp->opcode == OP_LCOMPARE_IMM) {
9899 /* Convert it to OP_LCOMPARE */
9900 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9901 ins->type = STACK_I8;
9902 ins->dreg = alloc_dreg (cfg, STACK_I8);
9904 MONO_ADD_INS (cfg->cbb, ins);
9905 cmp->opcode = OP_LCOMPARE;
9906 cmp->sreg2 = ins->dreg;
9909 MONO_ADD_INS (cfg->cbb, cmp);
9911 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9912 type_from_op (cfg, ins, sp [0], NULL);
9913 MONO_ADD_INS (cfg->cbb, ins);
9914 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9915 GET_BBLOCK (cfg, tblock, target);
9916 ins->inst_true_bb = tblock;
9917 GET_BBLOCK (cfg, tblock, ip);
9918 ins->inst_false_bb = tblock;
9919 start_new_bblock = 2;
9922 inline_costs += BRANCH_COST;
9937 MONO_INST_NEW (cfg, ins, *ip);
9939 target = ip + 4 + (gint32)read32(ip);
9945 inline_costs += BRANCH_COST;
9949 MonoBasicBlock **targets;
9950 MonoBasicBlock *default_bblock;
9951 MonoJumpInfoBBTable *table;
9952 int offset_reg = alloc_preg (cfg);
9953 int target_reg = alloc_preg (cfg);
9954 int table_reg = alloc_preg (cfg);
9955 int sum_reg = alloc_preg (cfg);
9956 gboolean use_op_switch;
9960 n = read32 (ip + 1);
9963 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9967 CHECK_OPSIZE (n * sizeof (guint32));
9968 target = ip + n * sizeof (guint32);
9970 GET_BBLOCK (cfg, default_bblock, target);
9971 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9973 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9974 for (i = 0; i < n; ++i) {
9975 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9976 targets [i] = tblock;
9977 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9981 if (sp != stack_start) {
9983 * Link the current bb with the targets as well, so handle_stack_args
9984 * will set their in_stack correctly.
9986 link_bblock (cfg, cfg->cbb, default_bblock);
9987 for (i = 0; i < n; ++i)
9988 link_bblock (cfg, cfg->cbb, targets [i]);
9990 handle_stack_args (cfg, stack_start, sp - stack_start);
9992 CHECK_UNVERIFIABLE (cfg);
9994 /* Undo the links */
9995 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9996 for (i = 0; i < n; ++i)
9997 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10003 for (i = 0; i < n; ++i)
10004 link_bblock (cfg, cfg->cbb, targets [i]);
10006 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10007 table->table = targets;
10008 table->table_size = n;
10010 use_op_switch = FALSE;
10012 /* ARM implements SWITCH statements differently */
10013 /* FIXME: Make it use the generic implementation */
10014 if (!cfg->compile_aot)
10015 use_op_switch = TRUE;
10018 if (COMPILE_LLVM (cfg))
10019 use_op_switch = TRUE;
10021 cfg->cbb->has_jump_table = 1;
10023 if (use_op_switch) {
10024 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10025 ins->sreg1 = src1->dreg;
10026 ins->inst_p0 = table;
10027 ins->inst_many_bb = targets;
10028 ins->klass = GUINT_TO_POINTER (n);
10029 MONO_ADD_INS (cfg->cbb, ins);
10031 if (sizeof (gpointer) == 8)
10032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10034 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10036 #if SIZEOF_REGISTER == 8
10037 /* The upper word might not be zero, and we add it to a 64 bit address later */
10038 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10041 if (cfg->compile_aot) {
10042 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10044 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10045 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10046 ins->inst_p0 = table;
10047 ins->dreg = table_reg;
10048 MONO_ADD_INS (cfg->cbb, ins);
10051 /* FIXME: Use load_memindex */
10052 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10053 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10054 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10056 start_new_bblock = 1;
10057 inline_costs += (BRANCH_COST * 2);
10070 case CEE_LDIND_REF:
10077 dreg = alloc_freg (cfg);
10080 dreg = alloc_lreg (cfg);
10082 case CEE_LDIND_REF:
10083 dreg = alloc_ireg_ref (cfg);
10086 dreg = alloc_preg (cfg);
10089 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10090 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10091 if (*ip == CEE_LDIND_R4)
10092 ins->type = cfg->r4_stack_type;
10093 ins->flags |= ins_flag;
10094 MONO_ADD_INS (cfg->cbb, ins);
10096 if (ins_flag & MONO_INST_VOLATILE) {
10097 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10098 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10103 case CEE_STIND_REF:
10114 if (ins_flag & MONO_INST_VOLATILE) {
10115 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10116 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10119 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10120 ins->flags |= ins_flag;
10123 MONO_ADD_INS (cfg->cbb, ins);
10125 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10126 emit_write_barrier (cfg, sp [0], sp [1]);
10135 MONO_INST_NEW (cfg, ins, (*ip));
10137 ins->sreg1 = sp [0]->dreg;
10138 ins->sreg2 = sp [1]->dreg;
10139 type_from_op (cfg, ins, sp [0], sp [1]);
10141 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10143 /* Use the immediate opcodes if possible */
10144 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10145 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10146 if (imm_opcode != -1) {
10147 ins->opcode = imm_opcode;
10148 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10151 NULLIFY_INS (sp [1]);
10155 MONO_ADD_INS ((cfg)->cbb, (ins));
10157 *sp++ = mono_decompose_opcode (cfg, ins);
10174 MONO_INST_NEW (cfg, ins, (*ip));
10176 ins->sreg1 = sp [0]->dreg;
10177 ins->sreg2 = sp [1]->dreg;
10178 type_from_op (cfg, ins, sp [0], sp [1]);
10180 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10181 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10183 /* FIXME: Pass opcode to is_inst_imm */
10185 /* Use the immediate opcodes if possible */
10186 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10189 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10190 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10191 /* Keep emulated opcodes which are optimized away later */
10192 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10193 imm_opcode = mono_op_to_op_imm (ins->opcode);
10196 if (imm_opcode != -1) {
10197 ins->opcode = imm_opcode;
10198 if (sp [1]->opcode == OP_I8CONST) {
10199 #if SIZEOF_REGISTER == 8
10200 ins->inst_imm = sp [1]->inst_l;
10202 ins->inst_ls_word = sp [1]->inst_ls_word;
10203 ins->inst_ms_word = sp [1]->inst_ms_word;
10207 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10210 /* Might be followed by an instruction added by add_widen_op */
10211 if (sp [1]->next == NULL)
10212 NULLIFY_INS (sp [1]);
10215 MONO_ADD_INS ((cfg)->cbb, (ins));
10217 *sp++ = mono_decompose_opcode (cfg, ins);
10230 case CEE_CONV_OVF_I8:
10231 case CEE_CONV_OVF_U8:
10232 case CEE_CONV_R_UN:
10235 /* Special case this earlier so we have long constants in the IR */
10236 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10237 int data = sp [-1]->inst_c0;
10238 sp [-1]->opcode = OP_I8CONST;
10239 sp [-1]->type = STACK_I8;
10240 #if SIZEOF_REGISTER == 8
10241 if ((*ip) == CEE_CONV_U8)
10242 sp [-1]->inst_c0 = (guint32)data;
10244 sp [-1]->inst_c0 = data;
10246 sp [-1]->inst_ls_word = data;
10247 if ((*ip) == CEE_CONV_U8)
10248 sp [-1]->inst_ms_word = 0;
10250 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10252 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10259 case CEE_CONV_OVF_I4:
10260 case CEE_CONV_OVF_I1:
10261 case CEE_CONV_OVF_I2:
10262 case CEE_CONV_OVF_I:
10263 case CEE_CONV_OVF_U:
10266 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10267 ADD_UNOP (CEE_CONV_OVF_I8);
10274 case CEE_CONV_OVF_U1:
10275 case CEE_CONV_OVF_U2:
10276 case CEE_CONV_OVF_U4:
10279 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10280 ADD_UNOP (CEE_CONV_OVF_U8);
10287 case CEE_CONV_OVF_I1_UN:
10288 case CEE_CONV_OVF_I2_UN:
10289 case CEE_CONV_OVF_I4_UN:
10290 case CEE_CONV_OVF_I8_UN:
10291 case CEE_CONV_OVF_U1_UN:
10292 case CEE_CONV_OVF_U2_UN:
10293 case CEE_CONV_OVF_U4_UN:
10294 case CEE_CONV_OVF_U8_UN:
10295 case CEE_CONV_OVF_I_UN:
10296 case CEE_CONV_OVF_U_UN:
10303 CHECK_CFG_EXCEPTION;
10307 case CEE_ADD_OVF_UN:
10309 case CEE_MUL_OVF_UN:
10311 case CEE_SUB_OVF_UN:
10317 GSHAREDVT_FAILURE (*ip);
10320 token = read32 (ip + 1);
10321 klass = mini_get_class (method, token, generic_context);
10322 CHECK_TYPELOAD (klass);
10324 if (generic_class_is_reference_type (cfg, klass)) {
10325 MonoInst *store, *load;
10326 int dreg = alloc_ireg_ref (cfg);
10328 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10329 load->flags |= ins_flag;
10330 MONO_ADD_INS (cfg->cbb, load);
10332 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10333 store->flags |= ins_flag;
10334 MONO_ADD_INS (cfg->cbb, store);
10336 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10337 emit_write_barrier (cfg, sp [0], sp [1]);
10339 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10345 int loc_index = -1;
10351 token = read32 (ip + 1);
10352 klass = mini_get_class (method, token, generic_context);
10353 CHECK_TYPELOAD (klass);
10355 /* Optimize the common ldobj+stloc combination */
10358 loc_index = ip [6];
10365 loc_index = ip [5] - CEE_STLOC_0;
10372 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10373 CHECK_LOCAL (loc_index);
10375 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10376 ins->dreg = cfg->locals [loc_index]->dreg;
10377 ins->flags |= ins_flag;
10380 if (ins_flag & MONO_INST_VOLATILE) {
10381 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10382 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10388 /* Optimize the ldobj+stobj combination */
10389 /* The reference case ends up being a load+store anyway */
10390 /* Skip this if the operation is volatile. */
10391 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10396 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10403 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10404 ins->flags |= ins_flag;
10407 if (ins_flag & MONO_INST_VOLATILE) {
10408 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10409 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10418 CHECK_STACK_OVF (1);
10420 n = read32 (ip + 1);
10422 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10423 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10424 ins->type = STACK_OBJ;
10427 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10428 MonoInst *iargs [1];
10429 char *str = mono_method_get_wrapper_data (method, n);
10431 if (cfg->compile_aot)
10432 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10434 EMIT_NEW_PCONST (cfg, iargs [0], str);
10435 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10437 if (cfg->opt & MONO_OPT_SHARED) {
10438 MonoInst *iargs [3];
10440 if (cfg->compile_aot) {
10441 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10443 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10444 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10445 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10446 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10447 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10449 if (cfg->cbb->out_of_line) {
10450 MonoInst *iargs [2];
10452 if (image == mono_defaults.corlib) {
10454 * Avoid relocations in AOT and save some space by using a
10455 * version of helper_ldstr specialized to mscorlib.
10457 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10458 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10460 /* Avoid creating the string object */
10461 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10462 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10463 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10467 if (cfg->compile_aot) {
10468 NEW_LDSTRCONST (cfg, ins, image, n);
10470 MONO_ADD_INS (cfg->cbb, ins);
10473 NEW_PCONST (cfg, ins, NULL);
10474 ins->type = STACK_OBJ;
10475 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10477 OUT_OF_MEMORY_FAILURE;
10480 MONO_ADD_INS (cfg->cbb, ins);
10489 MonoInst *iargs [2];
10490 MonoMethodSignature *fsig;
10493 MonoInst *vtable_arg = NULL;
10496 token = read32 (ip + 1);
10497 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10498 if (!cmethod || mono_loader_get_last_error ())
10500 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10503 mono_save_token_info (cfg, image, token, cmethod);
10505 if (!mono_class_init (cmethod->klass))
10506 TYPE_LOAD_ERROR (cmethod->klass);
10508 context_used = mini_method_check_context_used (cfg, cmethod);
10510 if (mono_security_core_clr_enabled ())
10511 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10513 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10514 emit_class_init (cfg, cmethod->klass);
10515 CHECK_TYPELOAD (cmethod->klass);
10519 if (cfg->gsharedvt) {
10520 if (mini_is_gsharedvt_variable_signature (sig))
10521 GSHAREDVT_FAILURE (*ip);
10525 n = fsig->param_count;
10529 * Generate smaller code for the common newobj <exception> instruction in
10530 * argument checking code.
10532 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10533 is_exception_class (cmethod->klass) && n <= 2 &&
10534 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10535 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10536 MonoInst *iargs [3];
10540 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10543 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10546 iargs [1] = sp [0];
10547 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10550 iargs [1] = sp [0];
10551 iargs [2] = sp [1];
10552 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10555 g_assert_not_reached ();
10563 /* move the args to allow room for 'this' in the first position */
10569 /* check_call_signature () requires sp[0] to be set */
10570 this_ins.type = STACK_OBJ;
10571 sp [0] = &this_ins;
10572 if (check_call_signature (cfg, fsig, sp))
10577 if (mini_class_is_system_array (cmethod->klass)) {
10578 *sp = emit_get_rgctx_method (cfg, context_used,
10579 cmethod, MONO_RGCTX_INFO_METHOD);
10581 /* Avoid varargs in the common case */
10582 if (fsig->param_count == 1)
10583 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10584 else if (fsig->param_count == 2)
10585 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10586 else if (fsig->param_count == 3)
10587 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10588 else if (fsig->param_count == 4)
10589 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10591 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10592 } else if (cmethod->string_ctor) {
10593 g_assert (!context_used);
10594 g_assert (!vtable_arg);
10595 /* we simply pass a null pointer */
10596 EMIT_NEW_PCONST (cfg, *sp, NULL);
10597 /* now call the string ctor */
10598 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10600 if (cmethod->klass->valuetype) {
10601 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10602 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10603 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10608 * The code generated by mini_emit_virtual_call () expects
10609 * iargs [0] to be a boxed instance, but luckily the vcall
10610 * will be transformed into a normal call there.
10612 } else if (context_used) {
10613 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10616 MonoVTable *vtable = NULL;
10618 if (!cfg->compile_aot)
10619 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10620 CHECK_TYPELOAD (cmethod->klass);
10623 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10624 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10625 * As a workaround, we call class cctors before allocating objects.
10627 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10628 emit_class_init (cfg, cmethod->klass);
10629 if (cfg->verbose_level > 2)
10630 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10631 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10634 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10637 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10640 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10642 /* Now call the actual ctor */
10643 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10644 CHECK_CFG_EXCEPTION;
10647 if (alloc == NULL) {
10649 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10650 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10658 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10659 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10662 case CEE_CASTCLASS:
10666 token = read32 (ip + 1);
10667 klass = mini_get_class (method, token, generic_context);
10668 CHECK_TYPELOAD (klass);
10669 if (sp [0]->type != STACK_OBJ)
10672 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10673 CHECK_CFG_EXCEPTION;
10682 token = read32 (ip + 1);
10683 klass = mini_get_class (method, token, generic_context);
10684 CHECK_TYPELOAD (klass);
10685 if (sp [0]->type != STACK_OBJ)
10688 context_used = mini_class_check_context_used (cfg, klass);
10690 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10691 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10692 MonoInst *args [3];
10699 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10702 idx = get_castclass_cache_idx (cfg);
10703 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10705 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10708 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10709 MonoMethod *mono_isinst;
10710 MonoInst *iargs [1];
10713 mono_isinst = mono_marshal_get_isinst (klass);
10714 iargs [0] = sp [0];
10716 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10717 iargs, ip, cfg->real_offset, TRUE);
10718 CHECK_CFG_EXCEPTION;
10719 g_assert (costs > 0);
10722 cfg->real_offset += 5;
10726 inline_costs += costs;
10729 ins = handle_isinst (cfg, klass, *sp, context_used);
10730 CHECK_CFG_EXCEPTION;
10736 case CEE_UNBOX_ANY: {
10737 MonoInst *res, *addr;
10742 token = read32 (ip + 1);
10743 klass = mini_get_class (method, token, generic_context);
10744 CHECK_TYPELOAD (klass);
10746 mono_save_token_info (cfg, image, token, klass);
10748 context_used = mini_class_check_context_used (cfg, klass);
10750 if (mini_is_gsharedvt_klass (klass)) {
10751 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10753 } else if (generic_class_is_reference_type (cfg, klass)) {
10754 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10755 CHECK_CFG_EXCEPTION;
10756 } else if (mono_class_is_nullable (klass)) {
10757 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10759 addr = handle_unbox (cfg, klass, sp, context_used);
10761 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10772 MonoClass *enum_class;
10773 MonoMethod *has_flag;
10779 token = read32 (ip + 1);
10780 klass = mini_get_class (method, token, generic_context);
10781 CHECK_TYPELOAD (klass);
10783 mono_save_token_info (cfg, image, token, klass);
10785 context_used = mini_class_check_context_used (cfg, klass);
10787 if (generic_class_is_reference_type (cfg, klass)) {
10793 if (klass == mono_defaults.void_class)
10795 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10797 /* frequent check in generic code: box (struct), brtrue */
10802 * <push int/long ptr>
10805 * constrained. MyFlags
10806 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10808 * If we find this sequence and the operand types on box and constrained
10809 * are equal, we can emit a specialized instruction sequence instead of
10810 * the very slow HasFlag () call.
10812 if ((cfg->opt & MONO_OPT_INTRINS) &&
10813 /* Cheap checks first. */
10814 ip + 5 + 6 + 5 < end &&
10815 ip [5] == CEE_PREFIX1 &&
10816 ip [6] == CEE_CONSTRAINED_ &&
10817 ip [11] == CEE_CALLVIRT &&
10818 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10819 mono_class_is_enum (klass) &&
10820 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10821 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10822 has_flag->klass == mono_defaults.enum_class &&
10823 !strcmp (has_flag->name, "HasFlag") &&
10824 has_flag->signature->hasthis &&
10825 has_flag->signature->param_count == 1) {
10826 CHECK_TYPELOAD (enum_class);
10828 if (enum_class == klass) {
10829 MonoInst *enum_this, *enum_flag;
10834 enum_this = sp [0];
10835 enum_flag = sp [1];
10837 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10842 // FIXME: LLVM can't handle the inconsistent bb linking
10843 if (!mono_class_is_nullable (klass) &&
10844 !mini_is_gsharedvt_klass (klass) &&
10845 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10846 (ip [5] == CEE_BRTRUE ||
10847 ip [5] == CEE_BRTRUE_S ||
10848 ip [5] == CEE_BRFALSE ||
10849 ip [5] == CEE_BRFALSE_S)) {
10850 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10852 MonoBasicBlock *true_bb, *false_bb;
10856 if (cfg->verbose_level > 3) {
10857 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10858 printf ("<box+brtrue opt>\n");
10863 case CEE_BRFALSE_S:
10866 target = ip + 1 + (signed char)(*ip);
10873 target = ip + 4 + (gint)(read32 (ip));
10877 g_assert_not_reached ();
10881 * We need to link both bblocks, since it is needed for handling stack
10882 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10883 * Branching to only one of them would lead to inconsistencies, so
10884 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10886 GET_BBLOCK (cfg, true_bb, target);
10887 GET_BBLOCK (cfg, false_bb, ip);
10889 mono_link_bblock (cfg, cfg->cbb, true_bb);
10890 mono_link_bblock (cfg, cfg->cbb, false_bb);
10892 if (sp != stack_start) {
10893 handle_stack_args (cfg, stack_start, sp - stack_start);
10895 CHECK_UNVERIFIABLE (cfg);
10898 if (COMPILE_LLVM (cfg)) {
10899 dreg = alloc_ireg (cfg);
10900 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10901 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10903 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10905 /* The JIT can't eliminate the iconst+compare */
10906 MONO_INST_NEW (cfg, ins, OP_BR);
10907 ins->inst_target_bb = is_true ? true_bb : false_bb;
10908 MONO_ADD_INS (cfg->cbb, ins);
10911 start_new_bblock = 1;
10915 *sp++ = handle_box (cfg, val, klass, context_used);
10917 CHECK_CFG_EXCEPTION;
10926 token = read32 (ip + 1);
10927 klass = mini_get_class (method, token, generic_context);
10928 CHECK_TYPELOAD (klass);
10930 mono_save_token_info (cfg, image, token, klass);
10932 context_used = mini_class_check_context_used (cfg, klass);
10934 if (mono_class_is_nullable (klass)) {
10937 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10938 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10942 ins = handle_unbox (cfg, klass, sp, context_used);
10955 MonoClassField *field;
10956 #ifndef DISABLE_REMOTING
10960 gboolean is_instance;
10962 gpointer addr = NULL;
10963 gboolean is_special_static;
10965 MonoInst *store_val = NULL;
10966 MonoInst *thread_ins;
10969 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10971 if (op == CEE_STFLD) {
10974 store_val = sp [1];
10979 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10981 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10984 if (op == CEE_STSFLD) {
10987 store_val = sp [0];
10992 token = read32 (ip + 1);
10993 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10994 field = mono_method_get_wrapper_data (method, token);
10995 klass = field->parent;
10998 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11001 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11002 FIELD_ACCESS_FAILURE (method, field);
11003 mono_class_init (klass);
11005 /* if the class is Critical then transparent code cannot access it's fields */
11006 if (!is_instance && mono_security_core_clr_enabled ())
11007 ensure_method_is_allowed_to_access_field (cfg, method, field);
11009 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11010 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11011 if (mono_security_core_clr_enabled ())
11012 ensure_method_is_allowed_to_access_field (cfg, method, field);
11015 ftype = mono_field_get_type (field);
11018 * LDFLD etc. is usable on static fields as well, so convert those cases to
11021 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11033 g_assert_not_reached ();
11035 is_instance = FALSE;
11038 context_used = mini_class_check_context_used (cfg, klass);
11040 /* INSTANCE CASE */
11042 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11043 if (op == CEE_STFLD) {
11044 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11046 #ifndef DISABLE_REMOTING
11047 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11048 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11049 MonoInst *iargs [5];
11051 GSHAREDVT_FAILURE (op);
11053 iargs [0] = sp [0];
11054 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11055 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11056 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11058 iargs [4] = sp [1];
11060 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11061 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11062 iargs, ip, cfg->real_offset, TRUE);
11063 CHECK_CFG_EXCEPTION;
11064 g_assert (costs > 0);
11066 cfg->real_offset += 5;
11068 inline_costs += costs;
11070 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11077 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11079 if (mini_is_gsharedvt_klass (klass)) {
11080 MonoInst *offset_ins;
11082 context_used = mini_class_check_context_used (cfg, klass);
11084 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11085 /* The value is offset by 1 */
11086 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11087 dreg = alloc_ireg_mp (cfg);
11088 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11089 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11090 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11092 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11094 if (sp [0]->opcode != OP_LDADDR)
11095 store->flags |= MONO_INST_FAULT;
11097 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11098 /* insert call to write barrier */
11102 dreg = alloc_ireg_mp (cfg);
11103 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11104 emit_write_barrier (cfg, ptr, sp [1]);
11107 store->flags |= ins_flag;
11114 #ifndef DISABLE_REMOTING
11115 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11116 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11117 MonoInst *iargs [4];
11119 GSHAREDVT_FAILURE (op);
11121 iargs [0] = sp [0];
11122 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11123 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11124 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11125 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11126 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11127 iargs, ip, cfg->real_offset, TRUE);
11128 CHECK_CFG_EXCEPTION;
11129 g_assert (costs > 0);
11131 cfg->real_offset += 5;
11135 inline_costs += costs;
11137 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11143 if (sp [0]->type == STACK_VTYPE) {
11146 /* Have to compute the address of the variable */
11148 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11150 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11152 g_assert (var->klass == klass);
11154 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11158 if (op == CEE_LDFLDA) {
11159 if (sp [0]->type == STACK_OBJ) {
11160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11161 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11164 dreg = alloc_ireg_mp (cfg);
11166 if (mini_is_gsharedvt_klass (klass)) {
11167 MonoInst *offset_ins;
11169 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11170 /* The value is offset by 1 */
11171 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11172 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11174 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11176 ins->klass = mono_class_from_mono_type (field->type);
11177 ins->type = STACK_MP;
11182 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11184 if (mini_is_gsharedvt_klass (klass)) {
11185 MonoInst *offset_ins;
11187 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11188 /* The value is offset by 1 */
11189 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11190 dreg = alloc_ireg_mp (cfg);
11191 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11192 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11194 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11196 load->flags |= ins_flag;
11197 if (sp [0]->opcode != OP_LDADDR)
11198 load->flags |= MONO_INST_FAULT;
11210 context_used = mini_class_check_context_used (cfg, klass);
11212 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11215 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11216 * to be called here.
11218 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11219 mono_class_vtable (cfg->domain, klass);
11220 CHECK_TYPELOAD (klass);
11222 mono_domain_lock (cfg->domain);
11223 if (cfg->domain->special_static_fields)
11224 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11225 mono_domain_unlock (cfg->domain);
11227 is_special_static = mono_class_field_is_special_static (field);
11229 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11230 thread_ins = mono_get_thread_intrinsic (cfg);
11234 /* Generate IR to compute the field address */
11235 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11237 * Fast access to TLS data
11238 * Inline version of get_thread_static_data () in
11242 int idx, static_data_reg, array_reg, dreg;
11244 GSHAREDVT_FAILURE (op);
11246 MONO_ADD_INS (cfg->cbb, thread_ins);
11247 static_data_reg = alloc_ireg (cfg);
11248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11250 if (cfg->compile_aot) {
11251 int offset_reg, offset2_reg, idx_reg;
11253 /* For TLS variables, this will return the TLS offset */
11254 EMIT_NEW_SFLDACONST (cfg, ins, field);
11255 offset_reg = ins->dreg;
11256 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11257 idx_reg = alloc_ireg (cfg);
11258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11259 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11260 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11261 array_reg = alloc_ireg (cfg);
11262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11263 offset2_reg = alloc_ireg (cfg);
11264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11266 dreg = alloc_ireg (cfg);
11267 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11269 offset = (gsize)addr & 0x7fffffff;
11270 idx = offset & 0x3f;
11272 array_reg = alloc_ireg (cfg);
11273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11274 dreg = alloc_ireg (cfg);
11275 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11277 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11278 (cfg->compile_aot && is_special_static) ||
11279 (context_used && is_special_static)) {
11280 MonoInst *iargs [2];
11282 g_assert (field->parent);
11283 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11284 if (context_used) {
11285 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11286 field, MONO_RGCTX_INFO_CLASS_FIELD);
11288 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11290 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11291 } else if (context_used) {
11292 MonoInst *static_data;
11295 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11296 method->klass->name_space, method->klass->name, method->name,
11297 depth, field->offset);
11300 if (mono_class_needs_cctor_run (klass, method))
11301 emit_class_init (cfg, klass);
11304 * The pointer we're computing here is
11306 * super_info.static_data + field->offset
11308 static_data = emit_get_rgctx_klass (cfg, context_used,
11309 klass, MONO_RGCTX_INFO_STATIC_DATA);
11311 if (mini_is_gsharedvt_klass (klass)) {
11312 MonoInst *offset_ins;
11314 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11315 /* The value is offset by 1 */
11316 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11317 dreg = alloc_ireg_mp (cfg);
11318 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11319 } else if (field->offset == 0) {
11322 int addr_reg = mono_alloc_preg (cfg);
11323 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11325 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11326 MonoInst *iargs [2];
11328 g_assert (field->parent);
11329 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11330 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11331 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11333 MonoVTable *vtable = NULL;
11335 if (!cfg->compile_aot)
11336 vtable = mono_class_vtable (cfg->domain, klass);
11337 CHECK_TYPELOAD (klass);
11340 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11341 if (!(g_slist_find (class_inits, klass))) {
11342 emit_class_init (cfg, klass);
11343 if (cfg->verbose_level > 2)
11344 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11345 class_inits = g_slist_prepend (class_inits, klass);
11348 if (cfg->run_cctors) {
11350 /* This makes so that inline cannot trigger */
11351 /* .cctors: too many apps depend on them */
11352 /* running with a specific order... */
11354 if (! vtable->initialized)
11355 INLINE_FAILURE ("class init");
11356 ex = mono_runtime_class_init_full (vtable, FALSE);
11358 set_exception_object (cfg, ex);
11359 goto exception_exit;
11363 if (cfg->compile_aot)
11364 EMIT_NEW_SFLDACONST (cfg, ins, field);
11367 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11369 EMIT_NEW_PCONST (cfg, ins, addr);
11372 MonoInst *iargs [1];
11373 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11374 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11378 /* Generate IR to do the actual load/store operation */
11380 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11381 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11382 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11385 if (op == CEE_LDSFLDA) {
11386 ins->klass = mono_class_from_mono_type (ftype);
11387 ins->type = STACK_PTR;
11389 } else if (op == CEE_STSFLD) {
11392 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11393 store->flags |= ins_flag;
11395 gboolean is_const = FALSE;
11396 MonoVTable *vtable = NULL;
11397 gpointer addr = NULL;
11399 if (!context_used) {
11400 vtable = mono_class_vtable (cfg->domain, klass);
11401 CHECK_TYPELOAD (klass);
11403 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11404 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11405 int ro_type = ftype->type;
11407 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11408 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11409 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11412 GSHAREDVT_FAILURE (op);
11414 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11417 case MONO_TYPE_BOOLEAN:
11419 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11423 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11426 case MONO_TYPE_CHAR:
11428 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11432 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11437 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11441 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11446 case MONO_TYPE_PTR:
11447 case MONO_TYPE_FNPTR:
11448 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11449 type_to_eval_stack_type ((cfg), field->type, *sp);
11452 case MONO_TYPE_STRING:
11453 case MONO_TYPE_OBJECT:
11454 case MONO_TYPE_CLASS:
11455 case MONO_TYPE_SZARRAY:
11456 case MONO_TYPE_ARRAY:
11457 if (!mono_gc_is_moving ()) {
11458 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11459 type_to_eval_stack_type ((cfg), field->type, *sp);
11467 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11472 case MONO_TYPE_VALUETYPE:
11482 CHECK_STACK_OVF (1);
11484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11485 load->flags |= ins_flag;
11491 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11492 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11493 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11504 token = read32 (ip + 1);
11505 klass = mini_get_class (method, token, generic_context);
11506 CHECK_TYPELOAD (klass);
11507 if (ins_flag & MONO_INST_VOLATILE) {
11508 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11509 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11511 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11512 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11513 ins->flags |= ins_flag;
11514 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11515 generic_class_is_reference_type (cfg, klass)) {
11516 /* insert call to write barrier */
11517 emit_write_barrier (cfg, sp [0], sp [1]);
11529 const char *data_ptr;
11531 guint32 field_token;
11537 token = read32 (ip + 1);
11539 klass = mini_get_class (method, token, generic_context);
11540 CHECK_TYPELOAD (klass);
11542 context_used = mini_class_check_context_used (cfg, klass);
11544 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11545 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11546 ins->sreg1 = sp [0]->dreg;
11547 ins->type = STACK_I4;
11548 ins->dreg = alloc_ireg (cfg);
11549 MONO_ADD_INS (cfg->cbb, ins);
11550 *sp = mono_decompose_opcode (cfg, ins);
11553 if (context_used) {
11554 MonoInst *args [3];
11555 MonoClass *array_class = mono_array_class_get (klass, 1);
11556 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11558 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11561 args [0] = emit_get_rgctx_klass (cfg, context_used,
11562 array_class, MONO_RGCTX_INFO_VTABLE);
11567 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11569 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11571 if (cfg->opt & MONO_OPT_SHARED) {
11572 /* Decompose now to avoid problems with references to the domainvar */
11573 MonoInst *iargs [3];
11575 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11576 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11577 iargs [2] = sp [0];
11579 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11581 /* Decompose later since it is needed by abcrem */
11582 MonoClass *array_type = mono_array_class_get (klass, 1);
11583 mono_class_vtable (cfg->domain, array_type);
11584 CHECK_TYPELOAD (array_type);
11586 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11587 ins->dreg = alloc_ireg_ref (cfg);
11588 ins->sreg1 = sp [0]->dreg;
11589 ins->inst_newa_class = klass;
11590 ins->type = STACK_OBJ;
11591 ins->klass = array_type;
11592 MONO_ADD_INS (cfg->cbb, ins);
11593 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11594 cfg->cbb->has_array_access = TRUE;
11596 /* Needed so mono_emit_load_get_addr () gets called */
11597 mono_get_got_var (cfg);
11607 * we inline/optimize the initialization sequence if possible.
11608 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11609 * for small sizes open code the memcpy
11610 * ensure the rva field is big enough
11612 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11613 MonoMethod *memcpy_method = get_memcpy_method ();
11614 MonoInst *iargs [3];
11615 int add_reg = alloc_ireg_mp (cfg);
11617 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11618 if (cfg->compile_aot) {
11619 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11621 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11623 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11624 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11633 if (sp [0]->type != STACK_OBJ)
11636 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11637 ins->dreg = alloc_preg (cfg);
11638 ins->sreg1 = sp [0]->dreg;
11639 ins->type = STACK_I4;
11640 /* This flag will be inherited by the decomposition */
11641 ins->flags |= MONO_INST_FAULT;
11642 MONO_ADD_INS (cfg->cbb, ins);
11643 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11644 cfg->cbb->has_array_access = TRUE;
11652 if (sp [0]->type != STACK_OBJ)
11655 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11657 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11658 CHECK_TYPELOAD (klass);
11659 /* we need to make sure that this array is exactly the type it needs
11660 * to be for correctness. the wrappers are lax with their usage
11661 * so we need to ignore them here
11663 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11664 MonoClass *array_class = mono_array_class_get (klass, 1);
11665 mini_emit_check_array_type (cfg, sp [0], array_class);
11666 CHECK_TYPELOAD (array_class);
11670 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11675 case CEE_LDELEM_I1:
11676 case CEE_LDELEM_U1:
11677 case CEE_LDELEM_I2:
11678 case CEE_LDELEM_U2:
11679 case CEE_LDELEM_I4:
11680 case CEE_LDELEM_U4:
11681 case CEE_LDELEM_I8:
11683 case CEE_LDELEM_R4:
11684 case CEE_LDELEM_R8:
11685 case CEE_LDELEM_REF: {
11691 if (*ip == CEE_LDELEM) {
11693 token = read32 (ip + 1);
11694 klass = mini_get_class (method, token, generic_context);
11695 CHECK_TYPELOAD (klass);
11696 mono_class_init (klass);
11699 klass = array_access_to_klass (*ip);
11701 if (sp [0]->type != STACK_OBJ)
11704 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11706 if (mini_is_gsharedvt_variable_klass (klass)) {
11707 // FIXME-VT: OP_ICONST optimization
11708 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11709 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11710 ins->opcode = OP_LOADV_MEMBASE;
11711 } else if (sp [1]->opcode == OP_ICONST) {
11712 int array_reg = sp [0]->dreg;
11713 int index_reg = sp [1]->dreg;
11714 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11716 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11717 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11719 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11720 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11723 if (*ip == CEE_LDELEM)
11730 case CEE_STELEM_I1:
11731 case CEE_STELEM_I2:
11732 case CEE_STELEM_I4:
11733 case CEE_STELEM_I8:
11734 case CEE_STELEM_R4:
11735 case CEE_STELEM_R8:
11736 case CEE_STELEM_REF:
11741 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11743 if (*ip == CEE_STELEM) {
11745 token = read32 (ip + 1);
11746 klass = mini_get_class (method, token, generic_context);
11747 CHECK_TYPELOAD (klass);
11748 mono_class_init (klass);
11751 klass = array_access_to_klass (*ip);
11753 if (sp [0]->type != STACK_OBJ)
11756 emit_array_store (cfg, klass, sp, TRUE);
11758 if (*ip == CEE_STELEM)
11765 case CEE_CKFINITE: {
11769 if (cfg->llvm_only) {
11770 MonoInst *iargs [1];
11772 iargs [0] = sp [0];
11773 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11775 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11776 ins->sreg1 = sp [0]->dreg;
11777 ins->dreg = alloc_freg (cfg);
11778 ins->type = STACK_R8;
11779 MONO_ADD_INS (cfg->cbb, ins);
11781 *sp++ = mono_decompose_opcode (cfg, ins);
11787 case CEE_REFANYVAL: {
11788 MonoInst *src_var, *src;
11790 int klass_reg = alloc_preg (cfg);
11791 int dreg = alloc_preg (cfg);
11793 GSHAREDVT_FAILURE (*ip);
11796 MONO_INST_NEW (cfg, ins, *ip);
11799 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11800 CHECK_TYPELOAD (klass);
11802 context_used = mini_class_check_context_used (cfg, klass);
11805 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11807 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11808 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11809 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11811 if (context_used) {
11812 MonoInst *klass_ins;
11814 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11815 klass, MONO_RGCTX_INFO_KLASS);
11818 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11819 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11821 mini_emit_class_check (cfg, klass_reg, klass);
11823 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11824 ins->type = STACK_MP;
11825 ins->klass = klass;
11830 case CEE_MKREFANY: {
11831 MonoInst *loc, *addr;
11833 GSHAREDVT_FAILURE (*ip);
11836 MONO_INST_NEW (cfg, ins, *ip);
11839 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11840 CHECK_TYPELOAD (klass);
11842 context_used = mini_class_check_context_used (cfg, klass);
11844 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11845 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11847 if (context_used) {
11848 MonoInst *const_ins;
11849 int type_reg = alloc_preg (cfg);
11851 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11852 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11855 } else if (cfg->compile_aot) {
11856 int const_reg = alloc_preg (cfg);
11857 int type_reg = alloc_preg (cfg);
11859 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11864 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11865 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11869 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11870 ins->type = STACK_VTYPE;
11871 ins->klass = mono_defaults.typed_reference_class;
11876 case CEE_LDTOKEN: {
11878 MonoClass *handle_class;
11880 CHECK_STACK_OVF (1);
11883 n = read32 (ip + 1);
11885 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11886 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11887 handle = mono_method_get_wrapper_data (method, n);
11888 handle_class = mono_method_get_wrapper_data (method, n + 1);
11889 if (handle_class == mono_defaults.typehandle_class)
11890 handle = &((MonoClass*)handle)->byval_arg;
11893 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11898 mono_class_init (handle_class);
11899 if (cfg->gshared) {
11900 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11901 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11902 /* This case handles ldtoken
11903 of an open type, like for
11906 } else if (handle_class == mono_defaults.typehandle_class) {
11907 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11908 } else if (handle_class == mono_defaults.fieldhandle_class)
11909 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11910 else if (handle_class == mono_defaults.methodhandle_class)
11911 context_used = mini_method_check_context_used (cfg, handle);
11913 g_assert_not_reached ();
11916 if ((cfg->opt & MONO_OPT_SHARED) &&
11917 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11918 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11919 MonoInst *addr, *vtvar, *iargs [3];
11920 int method_context_used;
11922 method_context_used = mini_method_check_context_used (cfg, method);
11924 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11926 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11927 EMIT_NEW_ICONST (cfg, iargs [1], n);
11928 if (method_context_used) {
11929 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11930 method, MONO_RGCTX_INFO_METHOD);
11931 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11933 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11934 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11936 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11940 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11942 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11943 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11944 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11945 (cmethod->klass == mono_defaults.systemtype_class) &&
11946 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11947 MonoClass *tclass = mono_class_from_mono_type (handle);
11949 mono_class_init (tclass);
11950 if (context_used) {
11951 ins = emit_get_rgctx_klass (cfg, context_used,
11952 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11953 } else if (cfg->compile_aot) {
11954 if (method->wrapper_type) {
11955 mono_error_init (&error); //got to do it since there are multiple conditionals below
11956 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11957 /* Special case for static synchronized wrappers */
11958 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11960 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11961 /* FIXME: n is not a normal token */
11963 EMIT_NEW_PCONST (cfg, ins, NULL);
11966 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11969 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11971 ins->type = STACK_OBJ;
11972 ins->klass = cmethod->klass;
11975 MonoInst *addr, *vtvar;
11977 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11979 if (context_used) {
11980 if (handle_class == mono_defaults.typehandle_class) {
11981 ins = emit_get_rgctx_klass (cfg, context_used,
11982 mono_class_from_mono_type (handle),
11983 MONO_RGCTX_INFO_TYPE);
11984 } else if (handle_class == mono_defaults.methodhandle_class) {
11985 ins = emit_get_rgctx_method (cfg, context_used,
11986 handle, MONO_RGCTX_INFO_METHOD);
11987 } else if (handle_class == mono_defaults.fieldhandle_class) {
11988 ins = emit_get_rgctx_field (cfg, context_used,
11989 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11991 g_assert_not_reached ();
11993 } else if (cfg->compile_aot) {
11994 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11996 EMIT_NEW_PCONST (cfg, ins, handle);
11998 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11999 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12000 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12010 MONO_INST_NEW (cfg, ins, OP_THROW);
12012 ins->sreg1 = sp [0]->dreg;
12014 cfg->cbb->out_of_line = TRUE;
12015 MONO_ADD_INS (cfg->cbb, ins);
12016 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12017 MONO_ADD_INS (cfg->cbb, ins);
12020 link_bblock (cfg, cfg->cbb, end_bblock);
12021 start_new_bblock = 1;
12022 /* This can complicate code generation for llvm since the return value might not be defined */
12023 if (COMPILE_LLVM (cfg))
12024 INLINE_FAILURE ("throw");
12026 case CEE_ENDFINALLY:
12027 /* mono_save_seq_point_info () depends on this */
12028 if (sp != stack_start)
12029 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12030 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12031 MONO_ADD_INS (cfg->cbb, ins);
12033 start_new_bblock = 1;
12036 * Control will leave the method so empty the stack, otherwise
12037 * the next basic block will start with a nonempty stack.
12039 while (sp != stack_start) {
12044 case CEE_LEAVE_S: {
12047 if (*ip == CEE_LEAVE) {
12049 target = ip + 5 + (gint32)read32(ip + 1);
12052 target = ip + 2 + (signed char)(ip [1]);
12055 /* empty the stack */
12056 while (sp != stack_start) {
12061 * If this leave statement is in a catch block, check for a
12062 * pending exception, and rethrow it if necessary.
12063 * We avoid doing this in runtime invoke wrappers, since those are called
12064 * by native code which excepts the wrapper to catch all exceptions.
12066 for (i = 0; i < header->num_clauses; ++i) {
12067 MonoExceptionClause *clause = &header->clauses [i];
12070 * Use <= in the final comparison to handle clauses with multiple
12071 * leave statements, like in bug #78024.
12072 * The ordering of the exception clauses guarantees that we find the
12073 * innermost clause.
12075 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12077 MonoBasicBlock *dont_throw;
12082 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12085 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12087 NEW_BBLOCK (cfg, dont_throw);
12090 * Currently, we always rethrow the abort exception, despite the
12091 * fact that this is not correct. See thread6.cs for an example.
12092 * But propagating the abort exception is more important than
12093 * getting the sematics right.
12095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12096 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12097 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12099 MONO_START_BB (cfg, dont_throw);
12104 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12107 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12109 MonoExceptionClause *clause;
12111 for (tmp = handlers; tmp; tmp = tmp->next) {
12112 clause = tmp->data;
12113 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12115 link_bblock (cfg, cfg->cbb, tblock);
12116 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12117 ins->inst_target_bb = tblock;
12118 ins->inst_eh_block = clause;
12119 MONO_ADD_INS (cfg->cbb, ins);
12120 cfg->cbb->has_call_handler = 1;
12121 if (COMPILE_LLVM (cfg)) {
12122 MonoBasicBlock *target_bb;
12125 * Link the finally bblock with the target, since it will
12126 * conceptually branch there.
12127 * FIXME: Have to link the bblock containing the endfinally.
12129 GET_BBLOCK (cfg, target_bb, target);
12130 link_bblock (cfg, tblock, target_bb);
12133 g_list_free (handlers);
12136 MONO_INST_NEW (cfg, ins, OP_BR);
12137 MONO_ADD_INS (cfg->cbb, ins);
12138 GET_BBLOCK (cfg, tblock, target);
12139 link_bblock (cfg, cfg->cbb, tblock);
12140 ins->inst_target_bb = tblock;
12142 start_new_bblock = 1;
12144 if (*ip == CEE_LEAVE)
12153 * Mono specific opcodes
12155 case MONO_CUSTOM_PREFIX: {
12157 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12161 case CEE_MONO_ICALL: {
12163 MonoJitICallInfo *info;
12165 token = read32 (ip + 2);
12166 func = mono_method_get_wrapper_data (method, token);
12167 info = mono_find_jit_icall_by_addr (func);
12169 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12172 CHECK_STACK (info->sig->param_count);
12173 sp -= info->sig->param_count;
12175 ins = mono_emit_jit_icall (cfg, info->func, sp);
12176 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12180 inline_costs += 10 * num_calls++;
12184 case CEE_MONO_LDPTR_CARD_TABLE:
12185 case CEE_MONO_LDPTR_NURSERY_START:
12186 case CEE_MONO_LDPTR_NURSERY_BITS:
12187 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12188 CHECK_STACK_OVF (1);
12191 case CEE_MONO_LDPTR_CARD_TABLE:
12192 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12194 case CEE_MONO_LDPTR_NURSERY_START:
12195 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12197 case CEE_MONO_LDPTR_NURSERY_BITS:
12198 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12200 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12201 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12207 inline_costs += 10 * num_calls++;
12210 case CEE_MONO_LDPTR: {
12213 CHECK_STACK_OVF (1);
12215 token = read32 (ip + 2);
12217 ptr = mono_method_get_wrapper_data (method, token);
12218 EMIT_NEW_PCONST (cfg, ins, ptr);
12221 inline_costs += 10 * num_calls++;
12222 /* Can't embed random pointers into AOT code */
12226 case CEE_MONO_JIT_ICALL_ADDR: {
12227 MonoJitICallInfo *callinfo;
12230 CHECK_STACK_OVF (1);
12232 token = read32 (ip + 2);
12234 ptr = mono_method_get_wrapper_data (method, token);
12235 callinfo = mono_find_jit_icall_by_addr (ptr);
12236 g_assert (callinfo);
12237 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12240 inline_costs += 10 * num_calls++;
12243 case CEE_MONO_ICALL_ADDR: {
12244 MonoMethod *cmethod;
12247 CHECK_STACK_OVF (1);
12249 token = read32 (ip + 2);
12251 cmethod = mono_method_get_wrapper_data (method, token);
12253 if (cfg->compile_aot) {
12254 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12256 ptr = mono_lookup_internal_call (cmethod);
12258 EMIT_NEW_PCONST (cfg, ins, ptr);
12264 case CEE_MONO_VTADDR: {
12265 MonoInst *src_var, *src;
12271 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12272 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12277 case CEE_MONO_NEWOBJ: {
12278 MonoInst *iargs [2];
12280 CHECK_STACK_OVF (1);
12282 token = read32 (ip + 2);
12283 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12284 mono_class_init (klass);
12285 NEW_DOMAINCONST (cfg, iargs [0]);
12286 MONO_ADD_INS (cfg->cbb, iargs [0]);
12287 NEW_CLASSCONST (cfg, iargs [1], klass);
12288 MONO_ADD_INS (cfg->cbb, iargs [1]);
12289 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12291 inline_costs += 10 * num_calls++;
12294 case CEE_MONO_OBJADDR:
12297 MONO_INST_NEW (cfg, ins, OP_MOVE);
12298 ins->dreg = alloc_ireg_mp (cfg);
12299 ins->sreg1 = sp [0]->dreg;
12300 ins->type = STACK_MP;
12301 MONO_ADD_INS (cfg->cbb, ins);
12305 case CEE_MONO_LDNATIVEOBJ:
12307 * Similar to LDOBJ, but instead load the unmanaged
12308 * representation of the vtype to the stack.
12313 token = read32 (ip + 2);
12314 klass = mono_method_get_wrapper_data (method, token);
12315 g_assert (klass->valuetype);
12316 mono_class_init (klass);
12319 MonoInst *src, *dest, *temp;
12322 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12323 temp->backend.is_pinvoke = 1;
12324 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12325 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12327 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12328 dest->type = STACK_VTYPE;
12329 dest->klass = klass;
12335 case CEE_MONO_RETOBJ: {
12337 * Same as RET, but return the native representation of a vtype
12340 g_assert (cfg->ret);
12341 g_assert (mono_method_signature (method)->pinvoke);
12346 token = read32 (ip + 2);
12347 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12349 if (!cfg->vret_addr) {
12350 g_assert (cfg->ret_var_is_local);
12352 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12354 EMIT_NEW_RETLOADA (cfg, ins);
12356 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12358 if (sp != stack_start)
12361 MONO_INST_NEW (cfg, ins, OP_BR);
12362 ins->inst_target_bb = end_bblock;
12363 MONO_ADD_INS (cfg->cbb, ins);
12364 link_bblock (cfg, cfg->cbb, end_bblock);
12365 start_new_bblock = 1;
12369 case CEE_MONO_CISINST:
12370 case CEE_MONO_CCASTCLASS: {
12375 token = read32 (ip + 2);
12376 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12377 if (ip [1] == CEE_MONO_CISINST)
12378 ins = handle_cisinst (cfg, klass, sp [0]);
12380 ins = handle_ccastclass (cfg, klass, sp [0]);
12385 case CEE_MONO_SAVE_LMF:
12386 case CEE_MONO_RESTORE_LMF:
12389 case CEE_MONO_CLASSCONST:
12390 CHECK_STACK_OVF (1);
12392 token = read32 (ip + 2);
12393 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12396 inline_costs += 10 * num_calls++;
12398 case CEE_MONO_NOT_TAKEN:
12399 cfg->cbb->out_of_line = TRUE;
12402 case CEE_MONO_TLS: {
12405 CHECK_STACK_OVF (1);
12407 key = (gint32)read32 (ip + 2);
12408 g_assert (key < TLS_KEY_NUM);
12410 ins = mono_create_tls_get (cfg, key);
12412 if (cfg->compile_aot) {
12414 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12415 ins->dreg = alloc_preg (cfg);
12416 ins->type = STACK_PTR;
12418 g_assert_not_reached ();
12421 ins->type = STACK_PTR;
12422 MONO_ADD_INS (cfg->cbb, ins);
12427 case CEE_MONO_DYN_CALL: {
12428 MonoCallInst *call;
12430 /* It would be easier to call a trampoline, but that would put an
12431 * extra frame on the stack, confusing exception handling. So
12432 * implement it inline using an opcode for now.
12435 if (!cfg->dyn_call_var) {
12436 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12437 /* prevent it from being register allocated */
12438 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12441 /* Has to use a call inst since it local regalloc expects it */
12442 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12443 ins = (MonoInst*)call;
12445 ins->sreg1 = sp [0]->dreg;
12446 ins->sreg2 = sp [1]->dreg;
12447 MONO_ADD_INS (cfg->cbb, ins);
12449 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12452 inline_costs += 10 * num_calls++;
12456 case CEE_MONO_MEMORY_BARRIER: {
12458 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12462 case CEE_MONO_JIT_ATTACH: {
12463 MonoInst *args [16], *domain_ins;
12464 MonoInst *ad_ins, *jit_tls_ins;
12465 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12467 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12469 EMIT_NEW_PCONST (cfg, ins, NULL);
12470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12472 ad_ins = mono_get_domain_intrinsic (cfg);
12473 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12475 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12476 NEW_BBLOCK (cfg, next_bb);
12477 NEW_BBLOCK (cfg, call_bb);
12479 if (cfg->compile_aot) {
12480 /* AOT code is only used in the root domain */
12481 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12483 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12485 MONO_ADD_INS (cfg->cbb, ad_ins);
12486 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12487 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12489 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12494 MONO_START_BB (cfg, call_bb);
12497 if (cfg->compile_aot) {
12498 /* AOT code is only used in the root domain */
12499 EMIT_NEW_PCONST (cfg, args [0], NULL);
12501 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12503 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12504 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12507 MONO_START_BB (cfg, next_bb);
12511 case CEE_MONO_JIT_DETACH: {
12512 MonoInst *args [16];
12514 /* Restore the original domain */
12515 dreg = alloc_ireg (cfg);
12516 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12517 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12522 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12528 case CEE_PREFIX1: {
12531 case CEE_ARGLIST: {
12532 /* somewhat similar to LDTOKEN */
12533 MonoInst *addr, *vtvar;
12534 CHECK_STACK_OVF (1);
12535 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12537 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12538 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12540 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12541 ins->type = STACK_VTYPE;
12542 ins->klass = mono_defaults.argumenthandle_class;
12552 MonoInst *cmp, *arg1, *arg2;
12560 * The following transforms:
12561 * CEE_CEQ into OP_CEQ
12562 * CEE_CGT into OP_CGT
12563 * CEE_CGT_UN into OP_CGT_UN
12564 * CEE_CLT into OP_CLT
12565 * CEE_CLT_UN into OP_CLT_UN
12567 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12569 MONO_INST_NEW (cfg, ins, cmp->opcode);
12570 cmp->sreg1 = arg1->dreg;
12571 cmp->sreg2 = arg2->dreg;
12572 type_from_op (cfg, cmp, arg1, arg2);
12574 add_widen_op (cfg, cmp, &arg1, &arg2);
12575 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12576 cmp->opcode = OP_LCOMPARE;
12577 else if (arg1->type == STACK_R4)
12578 cmp->opcode = OP_RCOMPARE;
12579 else if (arg1->type == STACK_R8)
12580 cmp->opcode = OP_FCOMPARE;
12582 cmp->opcode = OP_ICOMPARE;
12583 MONO_ADD_INS (cfg->cbb, cmp);
12584 ins->type = STACK_I4;
12585 ins->dreg = alloc_dreg (cfg, ins->type);
12586 type_from_op (cfg, ins, arg1, arg2);
12588 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12590 * The backends expect the fceq opcodes to do the
12593 ins->sreg1 = cmp->sreg1;
12594 ins->sreg2 = cmp->sreg2;
12597 MONO_ADD_INS (cfg->cbb, ins);
12603 MonoInst *argconst;
12604 MonoMethod *cil_method;
12606 CHECK_STACK_OVF (1);
12608 n = read32 (ip + 2);
12609 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12610 if (!cmethod || mono_loader_get_last_error ())
12612 mono_class_init (cmethod->klass);
12614 mono_save_token_info (cfg, image, n, cmethod);
12616 context_used = mini_method_check_context_used (cfg, cmethod);
12618 cil_method = cmethod;
12619 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12620 METHOD_ACCESS_FAILURE (method, cil_method);
12622 if (mono_security_core_clr_enabled ())
12623 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12626 * Optimize the common case of ldftn+delegate creation
12628 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12629 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12630 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12631 MonoInst *target_ins, *handle_ins;
12632 MonoMethod *invoke;
12633 int invoke_context_used;
12635 invoke = mono_get_delegate_invoke (ctor_method->klass);
12636 if (!invoke || !mono_method_signature (invoke))
12639 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12641 target_ins = sp [-1];
12643 if (mono_security_core_clr_enabled ())
12644 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12646 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12647 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12648 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12650 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12654 /* FIXME: SGEN support */
12655 if (invoke_context_used == 0) {
12657 if (cfg->verbose_level > 3)
12658 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12659 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12662 CHECK_CFG_EXCEPTION;
12672 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12673 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12677 inline_costs += 10 * num_calls++;
12680 case CEE_LDVIRTFTN: {
12681 MonoInst *args [2];
12685 n = read32 (ip + 2);
12686 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12687 if (!cmethod || mono_loader_get_last_error ())
12689 mono_class_init (cmethod->klass);
12691 context_used = mini_method_check_context_used (cfg, cmethod);
12693 if (mono_security_core_clr_enabled ())
12694 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12697 * Optimize the common case of ldvirtftn+delegate creation
12699 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12700 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12701 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12702 MonoInst *target_ins, *handle_ins;
12703 MonoMethod *invoke;
12704 int invoke_context_used;
12705 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12707 invoke = mono_get_delegate_invoke (ctor_method->klass);
12708 if (!invoke || !mono_method_signature (invoke))
12711 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12713 target_ins = sp [-1];
12715 if (mono_security_core_clr_enabled ())
12716 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12718 /* FIXME: SGEN support */
12719 if (invoke_context_used == 0 || cfg->llvm_only) {
12721 if (cfg->verbose_level > 3)
12722 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12723 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12726 CHECK_CFG_EXCEPTION;
12739 args [1] = emit_get_rgctx_method (cfg, context_used,
12740 cmethod, MONO_RGCTX_INFO_METHOD);
12743 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12745 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12748 inline_costs += 10 * num_calls++;
12752 CHECK_STACK_OVF (1);
12754 n = read16 (ip + 2);
12756 EMIT_NEW_ARGLOAD (cfg, ins, n);
12761 CHECK_STACK_OVF (1);
12763 n = read16 (ip + 2);
12765 NEW_ARGLOADA (cfg, ins, n);
12766 MONO_ADD_INS (cfg->cbb, ins);
12774 n = read16 (ip + 2);
12776 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12778 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12782 CHECK_STACK_OVF (1);
12784 n = read16 (ip + 2);
12786 EMIT_NEW_LOCLOAD (cfg, ins, n);
12791 unsigned char *tmp_ip;
12792 CHECK_STACK_OVF (1);
12794 n = read16 (ip + 2);
12797 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12803 EMIT_NEW_LOCLOADA (cfg, ins, n);
12812 n = read16 (ip + 2);
12814 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12816 emit_stloc_ir (cfg, sp, header, n);
12823 if (sp != stack_start)
12825 if (cfg->method != method)
12827 * Inlining this into a loop in a parent could lead to
12828 * stack overflows which is different behavior than the
12829 * non-inlined case, thus disable inlining in this case.
12831 INLINE_FAILURE("localloc");
12833 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12834 ins->dreg = alloc_preg (cfg);
12835 ins->sreg1 = sp [0]->dreg;
12836 ins->type = STACK_PTR;
12837 MONO_ADD_INS (cfg->cbb, ins);
12839 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12841 ins->flags |= MONO_INST_INIT;
12846 case CEE_ENDFILTER: {
12847 MonoExceptionClause *clause, *nearest;
12852 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12854 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12855 ins->sreg1 = (*sp)->dreg;
12856 MONO_ADD_INS (cfg->cbb, ins);
12857 start_new_bblock = 1;
12861 for (cc = 0; cc < header->num_clauses; ++cc) {
12862 clause = &header->clauses [cc];
12863 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12864 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12865 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12868 g_assert (nearest);
12869 if ((ip - header->code) != nearest->handler_offset)
12874 case CEE_UNALIGNED_:
12875 ins_flag |= MONO_INST_UNALIGNED;
12876 /* FIXME: record alignment? we can assume 1 for now */
12880 case CEE_VOLATILE_:
12881 ins_flag |= MONO_INST_VOLATILE;
12885 ins_flag |= MONO_INST_TAILCALL;
12886 cfg->flags |= MONO_CFG_HAS_TAIL;
12887 /* Can't inline tail calls at this time */
12888 inline_costs += 100000;
12895 token = read32 (ip + 2);
12896 klass = mini_get_class (method, token, generic_context);
12897 CHECK_TYPELOAD (klass);
12898 if (generic_class_is_reference_type (cfg, klass))
12899 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12901 mini_emit_initobj (cfg, *sp, NULL, klass);
12905 case CEE_CONSTRAINED_:
12907 token = read32 (ip + 2);
12908 constrained_class = mini_get_class (method, token, generic_context);
12909 CHECK_TYPELOAD (constrained_class);
12913 case CEE_INITBLK: {
12914 MonoInst *iargs [3];
12918 /* Skip optimized paths for volatile operations. */
12919 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12920 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12921 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12922 /* emit_memset only works when val == 0 */
12923 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12926 iargs [0] = sp [0];
12927 iargs [1] = sp [1];
12928 iargs [2] = sp [2];
12929 if (ip [1] == CEE_CPBLK) {
12931 * FIXME: It's unclear whether we should be emitting both the acquire
12932 * and release barriers for cpblk. It is technically both a load and
12933 * store operation, so it seems like that's the sensible thing to do.
12935 * FIXME: We emit full barriers on both sides of the operation for
12936 * simplicity. We should have a separate atomic memcpy method instead.
12938 MonoMethod *memcpy_method = get_memcpy_method ();
12940 if (ins_flag & MONO_INST_VOLATILE)
12941 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12943 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12944 call->flags |= ins_flag;
12946 if (ins_flag & MONO_INST_VOLATILE)
12947 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12949 MonoMethod *memset_method = get_memset_method ();
12950 if (ins_flag & MONO_INST_VOLATILE) {
12951 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12952 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12954 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12955 call->flags |= ins_flag;
12966 ins_flag |= MONO_INST_NOTYPECHECK;
12968 ins_flag |= MONO_INST_NORANGECHECK;
12969 /* we ignore the no-nullcheck for now since we
12970 * really do it explicitly only when doing callvirt->call
12974 case CEE_RETHROW: {
12976 int handler_offset = -1;
12978 for (i = 0; i < header->num_clauses; ++i) {
12979 MonoExceptionClause *clause = &header->clauses [i];
12980 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12981 handler_offset = clause->handler_offset;
12986 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12988 if (handler_offset == -1)
12991 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12992 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12993 ins->sreg1 = load->dreg;
12994 MONO_ADD_INS (cfg->cbb, ins);
12996 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12997 MONO_ADD_INS (cfg->cbb, ins);
13000 link_bblock (cfg, cfg->cbb, end_bblock);
13001 start_new_bblock = 1;
13009 CHECK_STACK_OVF (1);
13011 token = read32 (ip + 2);
13012 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13013 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13016 val = mono_type_size (type, &ialign);
13018 MonoClass *klass = mini_get_class (method, token, generic_context);
13019 CHECK_TYPELOAD (klass);
13021 val = mono_type_size (&klass->byval_arg, &ialign);
13023 if (mini_is_gsharedvt_klass (klass))
13024 GSHAREDVT_FAILURE (*ip);
13026 EMIT_NEW_ICONST (cfg, ins, val);
13031 case CEE_REFANYTYPE: {
13032 MonoInst *src_var, *src;
13034 GSHAREDVT_FAILURE (*ip);
13040 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13042 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13043 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13044 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13049 case CEE_READONLY_:
13062 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13072 g_warning ("opcode 0x%02x not handled", *ip);
13076 if (start_new_bblock != 1)
13079 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13080 if (cfg->cbb->next_bb) {
13081 /* This could already be set because of inlining, #693905 */
13082 MonoBasicBlock *bb = cfg->cbb;
13084 while (bb->next_bb)
13086 bb->next_bb = end_bblock;
13088 cfg->cbb->next_bb = end_bblock;
13091 if (cfg->method == method && cfg->domainvar) {
13093 MonoInst *get_domain;
13095 cfg->cbb = init_localsbb;
13097 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13098 MONO_ADD_INS (cfg->cbb, get_domain);
13100 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13102 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13103 MONO_ADD_INS (cfg->cbb, store);
13106 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13107 if (cfg->compile_aot)
13108 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13109 mono_get_got_var (cfg);
13112 if (cfg->method == method && cfg->got_var)
13113 mono_emit_load_got_addr (cfg);
13115 if (init_localsbb) {
13116 cfg->cbb = init_localsbb;
13118 for (i = 0; i < header->num_locals; ++i) {
13119 emit_init_local (cfg, i, header->locals [i], init_locals);
13123 if (cfg->init_ref_vars && cfg->method == method) {
13124 /* Emit initialization for ref vars */
13125 // FIXME: Avoid duplication initialization for IL locals.
13126 for (i = 0; i < cfg->num_varinfo; ++i) {
13127 MonoInst *ins = cfg->varinfo [i];
13129 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13130 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13134 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13135 cfg->cbb = init_localsbb;
13136 emit_push_lmf (cfg);
13139 cfg->cbb = init_localsbb;
13140 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13143 MonoBasicBlock *bb;
13146 * Make seq points at backward branch targets interruptable.
13148 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13149 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13150 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13153 /* Add a sequence point for method entry/exit events */
13154 if (seq_points && cfg->gen_sdb_seq_points) {
13155 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13156 MONO_ADD_INS (init_localsbb, ins);
13157 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13158 MONO_ADD_INS (cfg->bb_exit, ins);
13162 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13163 * the code they refer to was dead (#11880).
13165 if (sym_seq_points) {
13166 for (i = 0; i < header->code_size; ++i) {
13167 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13170 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13171 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13178 if (cfg->method == method) {
13179 MonoBasicBlock *bb;
13180 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13181 bb->region = mono_find_block_region (cfg, bb->real_offset);
13183 mono_create_spvar_for_region (cfg, bb->region);
13184 if (cfg->verbose_level > 2)
13185 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13189 if (inline_costs < 0) {
13192 /* Method is too large */
13193 mname = mono_method_full_name (method, TRUE);
13194 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13195 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13199 if ((cfg->verbose_level > 2) && (cfg->method == method))
13200 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13205 g_assert (!mono_error_ok (&cfg->error));
13209 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13213 set_exception_type_from_invalid_il (cfg, method, ip);
13217 g_slist_free (class_inits);
13218 mono_basic_block_free (original_bb);
13219 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13220 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13221 if (cfg->exception_type)
13224 return inline_costs;
13228 store_membase_reg_to_store_membase_imm (int opcode)
13231 case OP_STORE_MEMBASE_REG:
13232 return OP_STORE_MEMBASE_IMM;
13233 case OP_STOREI1_MEMBASE_REG:
13234 return OP_STOREI1_MEMBASE_IMM;
13235 case OP_STOREI2_MEMBASE_REG:
13236 return OP_STOREI2_MEMBASE_IMM;
13237 case OP_STOREI4_MEMBASE_REG:
13238 return OP_STOREI4_MEMBASE_IMM;
13239 case OP_STOREI8_MEMBASE_REG:
13240 return OP_STOREI8_MEMBASE_IMM;
13242 g_assert_not_reached ();
13249 mono_op_to_op_imm (int opcode)
13253 return OP_IADD_IMM;
13255 return OP_ISUB_IMM;
13257 return OP_IDIV_IMM;
13259 return OP_IDIV_UN_IMM;
13261 return OP_IREM_IMM;
13263 return OP_IREM_UN_IMM;
13265 return OP_IMUL_IMM;
13267 return OP_IAND_IMM;
13271 return OP_IXOR_IMM;
13273 return OP_ISHL_IMM;
13275 return OP_ISHR_IMM;
13277 return OP_ISHR_UN_IMM;
13280 return OP_LADD_IMM;
13282 return OP_LSUB_IMM;
13284 return OP_LAND_IMM;
13288 return OP_LXOR_IMM;
13290 return OP_LSHL_IMM;
13292 return OP_LSHR_IMM;
13294 return OP_LSHR_UN_IMM;
13295 #if SIZEOF_REGISTER == 8
13297 return OP_LREM_IMM;
13301 return OP_COMPARE_IMM;
13303 return OP_ICOMPARE_IMM;
13305 return OP_LCOMPARE_IMM;
13307 case OP_STORE_MEMBASE_REG:
13308 return OP_STORE_MEMBASE_IMM;
13309 case OP_STOREI1_MEMBASE_REG:
13310 return OP_STOREI1_MEMBASE_IMM;
13311 case OP_STOREI2_MEMBASE_REG:
13312 return OP_STOREI2_MEMBASE_IMM;
13313 case OP_STOREI4_MEMBASE_REG:
13314 return OP_STOREI4_MEMBASE_IMM;
13316 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13318 return OP_X86_PUSH_IMM;
13319 case OP_X86_COMPARE_MEMBASE_REG:
13320 return OP_X86_COMPARE_MEMBASE_IMM;
13322 #if defined(TARGET_AMD64)
13323 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13324 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13326 case OP_VOIDCALL_REG:
13327 return OP_VOIDCALL;
13335 return OP_LOCALLOC_IMM;
13342 ldind_to_load_membase (int opcode)
13346 return OP_LOADI1_MEMBASE;
13348 return OP_LOADU1_MEMBASE;
13350 return OP_LOADI2_MEMBASE;
13352 return OP_LOADU2_MEMBASE;
13354 return OP_LOADI4_MEMBASE;
13356 return OP_LOADU4_MEMBASE;
13358 return OP_LOAD_MEMBASE;
13359 case CEE_LDIND_REF:
13360 return OP_LOAD_MEMBASE;
13362 return OP_LOADI8_MEMBASE;
13364 return OP_LOADR4_MEMBASE;
13366 return OP_LOADR8_MEMBASE;
13368 g_assert_not_reached ();
13375 stind_to_store_membase (int opcode)
13379 return OP_STOREI1_MEMBASE_REG;
13381 return OP_STOREI2_MEMBASE_REG;
13383 return OP_STOREI4_MEMBASE_REG;
13385 case CEE_STIND_REF:
13386 return OP_STORE_MEMBASE_REG;
13388 return OP_STOREI8_MEMBASE_REG;
13390 return OP_STORER4_MEMBASE_REG;
13392 return OP_STORER8_MEMBASE_REG;
13394 g_assert_not_reached ();
13401 mono_load_membase_to_load_mem (int opcode)
13403 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13404 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13406 case OP_LOAD_MEMBASE:
13407 return OP_LOAD_MEM;
13408 case OP_LOADU1_MEMBASE:
13409 return OP_LOADU1_MEM;
13410 case OP_LOADU2_MEMBASE:
13411 return OP_LOADU2_MEM;
13412 case OP_LOADI4_MEMBASE:
13413 return OP_LOADI4_MEM;
13414 case OP_LOADU4_MEMBASE:
13415 return OP_LOADU4_MEM;
13416 #if SIZEOF_REGISTER == 8
13417 case OP_LOADI8_MEMBASE:
13418 return OP_LOADI8_MEM;
13427 op_to_op_dest_membase (int store_opcode, int opcode)
13429 #if defined(TARGET_X86)
13430 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13435 return OP_X86_ADD_MEMBASE_REG;
13437 return OP_X86_SUB_MEMBASE_REG;
13439 return OP_X86_AND_MEMBASE_REG;
13441 return OP_X86_OR_MEMBASE_REG;
13443 return OP_X86_XOR_MEMBASE_REG;
13446 return OP_X86_ADD_MEMBASE_IMM;
13449 return OP_X86_SUB_MEMBASE_IMM;
13452 return OP_X86_AND_MEMBASE_IMM;
13455 return OP_X86_OR_MEMBASE_IMM;
13458 return OP_X86_XOR_MEMBASE_IMM;
13464 #if defined(TARGET_AMD64)
13465 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13470 return OP_X86_ADD_MEMBASE_REG;
13472 return OP_X86_SUB_MEMBASE_REG;
13474 return OP_X86_AND_MEMBASE_REG;
13476 return OP_X86_OR_MEMBASE_REG;
13478 return OP_X86_XOR_MEMBASE_REG;
13480 return OP_X86_ADD_MEMBASE_IMM;
13482 return OP_X86_SUB_MEMBASE_IMM;
13484 return OP_X86_AND_MEMBASE_IMM;
13486 return OP_X86_OR_MEMBASE_IMM;
13488 return OP_X86_XOR_MEMBASE_IMM;
13490 return OP_AMD64_ADD_MEMBASE_REG;
13492 return OP_AMD64_SUB_MEMBASE_REG;
13494 return OP_AMD64_AND_MEMBASE_REG;
13496 return OP_AMD64_OR_MEMBASE_REG;
13498 return OP_AMD64_XOR_MEMBASE_REG;
13501 return OP_AMD64_ADD_MEMBASE_IMM;
13504 return OP_AMD64_SUB_MEMBASE_IMM;
13507 return OP_AMD64_AND_MEMBASE_IMM;
13510 return OP_AMD64_OR_MEMBASE_IMM;
13513 return OP_AMD64_XOR_MEMBASE_IMM;
13523 op_to_op_store_membase (int store_opcode, int opcode)
13525 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13528 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13529 return OP_X86_SETEQ_MEMBASE;
13531 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13532 return OP_X86_SETNE_MEMBASE;
13540 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13543 /* FIXME: This has sign extension issues */
13545 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13546 return OP_X86_COMPARE_MEMBASE8_IMM;
13549 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13554 return OP_X86_PUSH_MEMBASE;
13555 case OP_COMPARE_IMM:
13556 case OP_ICOMPARE_IMM:
13557 return OP_X86_COMPARE_MEMBASE_IMM;
13560 return OP_X86_COMPARE_MEMBASE_REG;
13564 #ifdef TARGET_AMD64
13565 /* FIXME: This has sign extension issues */
13567 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13568 return OP_X86_COMPARE_MEMBASE8_IMM;
13573 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13574 return OP_X86_PUSH_MEMBASE;
13576 /* FIXME: This only works for 32 bit immediates
13577 case OP_COMPARE_IMM:
13578 case OP_LCOMPARE_IMM:
13579 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13580 return OP_AMD64_COMPARE_MEMBASE_IMM;
13582 case OP_ICOMPARE_IMM:
13583 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13584 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13588 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13589 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13590 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13591 return OP_AMD64_COMPARE_MEMBASE_REG;
13594 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13595 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13604 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13607 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13613 return OP_X86_COMPARE_REG_MEMBASE;
13615 return OP_X86_ADD_REG_MEMBASE;
13617 return OP_X86_SUB_REG_MEMBASE;
13619 return OP_X86_AND_REG_MEMBASE;
13621 return OP_X86_OR_REG_MEMBASE;
13623 return OP_X86_XOR_REG_MEMBASE;
13627 #ifdef TARGET_AMD64
13628 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13631 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13633 return OP_X86_ADD_REG_MEMBASE;
13635 return OP_X86_SUB_REG_MEMBASE;
13637 return OP_X86_AND_REG_MEMBASE;
13639 return OP_X86_OR_REG_MEMBASE;
13641 return OP_X86_XOR_REG_MEMBASE;
13643 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13647 return OP_AMD64_COMPARE_REG_MEMBASE;
13649 return OP_AMD64_ADD_REG_MEMBASE;
13651 return OP_AMD64_SUB_REG_MEMBASE;
13653 return OP_AMD64_AND_REG_MEMBASE;
13655 return OP_AMD64_OR_REG_MEMBASE;
13657 return OP_AMD64_XOR_REG_MEMBASE;
13666 mono_op_to_op_imm_noemul (int opcode)
13669 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13675 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13682 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13687 return mono_op_to_op_imm (opcode);
13692 * mono_handle_global_vregs:
13694 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13698 mono_handle_global_vregs (MonoCompile *cfg)
13700 gint32 *vreg_to_bb;
13701 MonoBasicBlock *bb;
13704 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13706 #ifdef MONO_ARCH_SIMD_INTRINSICS
13707 if (cfg->uses_simd_intrinsics)
13708 mono_simd_simplify_indirection (cfg);
13711 /* Find local vregs used in more than one bb */
13712 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13713 MonoInst *ins = bb->code;
13714 int block_num = bb->block_num;
13716 if (cfg->verbose_level > 2)
13717 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13720 for (; ins; ins = ins->next) {
13721 const char *spec = INS_INFO (ins->opcode);
13722 int regtype = 0, regindex;
13725 if (G_UNLIKELY (cfg->verbose_level > 2))
13726 mono_print_ins (ins);
13728 g_assert (ins->opcode >= MONO_CEE_LAST);
13730 for (regindex = 0; regindex < 4; regindex ++) {
13733 if (regindex == 0) {
13734 regtype = spec [MONO_INST_DEST];
13735 if (regtype == ' ')
13738 } else if (regindex == 1) {
13739 regtype = spec [MONO_INST_SRC1];
13740 if (regtype == ' ')
13743 } else if (regindex == 2) {
13744 regtype = spec [MONO_INST_SRC2];
13745 if (regtype == ' ')
13748 } else if (regindex == 3) {
13749 regtype = spec [MONO_INST_SRC3];
13750 if (regtype == ' ')
13755 #if SIZEOF_REGISTER == 4
13756 /* In the LLVM case, the long opcodes are not decomposed */
13757 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13759 * Since some instructions reference the original long vreg,
13760 * and some reference the two component vregs, it is quite hard
13761 * to determine when it needs to be global. So be conservative.
13763 if (!get_vreg_to_inst (cfg, vreg)) {
13764 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13766 if (cfg->verbose_level > 2)
13767 printf ("LONG VREG R%d made global.\n", vreg);
13771 * Make the component vregs volatile since the optimizations can
13772 * get confused otherwise.
13774 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13775 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13779 g_assert (vreg != -1);
13781 prev_bb = vreg_to_bb [vreg];
13782 if (prev_bb == 0) {
13783 /* 0 is a valid block num */
13784 vreg_to_bb [vreg] = block_num + 1;
13785 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13786 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13789 if (!get_vreg_to_inst (cfg, vreg)) {
13790 if (G_UNLIKELY (cfg->verbose_level > 2))
13791 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13795 if (vreg_is_ref (cfg, vreg))
13796 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13798 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13801 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13804 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13807 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13810 g_assert_not_reached ();
13814 /* Flag as having been used in more than one bb */
13815 vreg_to_bb [vreg] = -1;
13821 /* If a variable is used in only one bblock, convert it into a local vreg */
13822 for (i = 0; i < cfg->num_varinfo; i++) {
13823 MonoInst *var = cfg->varinfo [i];
13824 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13826 switch (var->type) {
13832 #if SIZEOF_REGISTER == 8
13835 #if !defined(TARGET_X86)
13836 /* Enabling this screws up the fp stack on x86 */
13839 if (mono_arch_is_soft_float ())
13842 /* Arguments are implicitly global */
13843 /* Putting R4 vars into registers doesn't work currently */
13844 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13845 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13847 * Make that the variable's liveness interval doesn't contain a call, since
13848 * that would cause the lvreg to be spilled, making the whole optimization
13851 /* This is too slow for JIT compilation */
13853 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13855 int def_index, call_index, ins_index;
13856 gboolean spilled = FALSE;
13861 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13862 const char *spec = INS_INFO (ins->opcode);
13864 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13865 def_index = ins_index;
13867 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13868 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13869 if (call_index > def_index) {
13875 if (MONO_IS_CALL (ins))
13876 call_index = ins_index;
13886 if (G_UNLIKELY (cfg->verbose_level > 2))
13887 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13888 var->flags |= MONO_INST_IS_DEAD;
13889 cfg->vreg_to_inst [var->dreg] = NULL;
13896 * Compress the varinfo and vars tables so the liveness computation is faster and
13897 * takes up less space.
13900 for (i = 0; i < cfg->num_varinfo; ++i) {
13901 MonoInst *var = cfg->varinfo [i];
13902 if (pos < i && cfg->locals_start == i)
13903 cfg->locals_start = pos;
13904 if (!(var->flags & MONO_INST_IS_DEAD)) {
13906 cfg->varinfo [pos] = cfg->varinfo [i];
13907 cfg->varinfo [pos]->inst_c0 = pos;
13908 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13909 cfg->vars [pos].idx = pos;
13910 #if SIZEOF_REGISTER == 4
13911 if (cfg->varinfo [pos]->type == STACK_I8) {
13912 /* Modify the two component vars too */
13915 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13916 var1->inst_c0 = pos;
13917 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13918 var1->inst_c0 = pos;
13925 cfg->num_varinfo = pos;
13926 if (cfg->locals_start > cfg->num_varinfo)
13927 cfg->locals_start = cfg->num_varinfo;
13931 * mono_spill_global_vars:
13933 * Generate spill code for variables which are not allocated to registers,
13934 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13935 * code is generated which could be optimized by the local optimization passes.
13938 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13940 MonoBasicBlock *bb;
13942 int orig_next_vreg;
13943 guint32 *vreg_to_lvreg;
13945 guint32 i, lvregs_len;
13946 gboolean dest_has_lvreg = FALSE;
13947 guint32 stacktypes [128];
13948 MonoInst **live_range_start, **live_range_end;
13949 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13950 int *gsharedvt_vreg_to_idx = NULL;
13952 *need_local_opts = FALSE;
13954 memset (spec2, 0, sizeof (spec2));
13956 /* FIXME: Move this function to mini.c */
13957 stacktypes ['i'] = STACK_PTR;
13958 stacktypes ['l'] = STACK_I8;
13959 stacktypes ['f'] = STACK_R8;
13960 #ifdef MONO_ARCH_SIMD_INTRINSICS
13961 stacktypes ['x'] = STACK_VTYPE;
13964 #if SIZEOF_REGISTER == 4
13965 /* Create MonoInsts for longs */
13966 for (i = 0; i < cfg->num_varinfo; i++) {
13967 MonoInst *ins = cfg->varinfo [i];
13969 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13970 switch (ins->type) {
13975 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13978 g_assert (ins->opcode == OP_REGOFFSET);
13980 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13982 tree->opcode = OP_REGOFFSET;
13983 tree->inst_basereg = ins->inst_basereg;
13984 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13986 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13988 tree->opcode = OP_REGOFFSET;
13989 tree->inst_basereg = ins->inst_basereg;
13990 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14000 if (cfg->compute_gc_maps) {
14001 /* registers need liveness info even for !non refs */
14002 for (i = 0; i < cfg->num_varinfo; i++) {
14003 MonoInst *ins = cfg->varinfo [i];
14005 if (ins->opcode == OP_REGVAR)
14006 ins->flags |= MONO_INST_GC_TRACK;
14010 if (cfg->gsharedvt) {
14011 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14013 for (i = 0; i < cfg->num_varinfo; ++i) {
14014 MonoInst *ins = cfg->varinfo [i];
14017 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14018 if (i >= cfg->locals_start) {
14020 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14021 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14022 ins->opcode = OP_GSHAREDVT_LOCAL;
14023 ins->inst_imm = idx;
14026 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14027 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14033 /* FIXME: widening and truncation */
14036 * As an optimization, when a variable allocated to the stack is first loaded into
14037 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14038 * the variable again.
14040 orig_next_vreg = cfg->next_vreg;
14041 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14042 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14046 * These arrays contain the first and last instructions accessing a given
14048 * Since we emit bblocks in the same order we process them here, and we
14049 * don't split live ranges, these will precisely describe the live range of
14050 * the variable, i.e. the instruction range where a valid value can be found
14051 * in the variables location.
14052 * The live range is computed using the liveness info computed by the liveness pass.
14053 * We can't use vmv->range, since that is an abstract live range, and we need
14054 * one which is instruction precise.
14055 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14057 /* FIXME: Only do this if debugging info is requested */
14058 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14059 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14060 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14061 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14063 /* Add spill loads/stores */
14064 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14067 if (cfg->verbose_level > 2)
14068 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14070 /* Clear vreg_to_lvreg array */
14071 for (i = 0; i < lvregs_len; i++)
14072 vreg_to_lvreg [lvregs [i]] = 0;
14076 MONO_BB_FOR_EACH_INS (bb, ins) {
14077 const char *spec = INS_INFO (ins->opcode);
14078 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14079 gboolean store, no_lvreg;
14080 int sregs [MONO_MAX_SRC_REGS];
14082 if (G_UNLIKELY (cfg->verbose_level > 2))
14083 mono_print_ins (ins);
14085 if (ins->opcode == OP_NOP)
14089 * We handle LDADDR here as well, since it can only be decomposed
14090 * when variable addresses are known.
14092 if (ins->opcode == OP_LDADDR) {
14093 MonoInst *var = ins->inst_p0;
14095 if (var->opcode == OP_VTARG_ADDR) {
14096 /* Happens on SPARC/S390 where vtypes are passed by reference */
14097 MonoInst *vtaddr = var->inst_left;
14098 if (vtaddr->opcode == OP_REGVAR) {
14099 ins->opcode = OP_MOVE;
14100 ins->sreg1 = vtaddr->dreg;
14102 else if (var->inst_left->opcode == OP_REGOFFSET) {
14103 ins->opcode = OP_LOAD_MEMBASE;
14104 ins->inst_basereg = vtaddr->inst_basereg;
14105 ins->inst_offset = vtaddr->inst_offset;
14108 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14109 /* gsharedvt arg passed by ref */
14110 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14112 ins->opcode = OP_LOAD_MEMBASE;
14113 ins->inst_basereg = var->inst_basereg;
14114 ins->inst_offset = var->inst_offset;
14115 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14116 MonoInst *load, *load2, *load3;
14117 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14118 int reg1, reg2, reg3;
14119 MonoInst *info_var = cfg->gsharedvt_info_var;
14120 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14124 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14127 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14129 g_assert (info_var);
14130 g_assert (locals_var);
14132 /* Mark the instruction used to compute the locals var as used */
14133 cfg->gsharedvt_locals_var_ins = NULL;
14135 /* Load the offset */
14136 if (info_var->opcode == OP_REGOFFSET) {
14137 reg1 = alloc_ireg (cfg);
14138 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14139 } else if (info_var->opcode == OP_REGVAR) {
14141 reg1 = info_var->dreg;
14143 g_assert_not_reached ();
14145 reg2 = alloc_ireg (cfg);
14146 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14147 /* Load the locals area address */
14148 reg3 = alloc_ireg (cfg);
14149 if (locals_var->opcode == OP_REGOFFSET) {
14150 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14151 } else if (locals_var->opcode == OP_REGVAR) {
14152 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14154 g_assert_not_reached ();
14156 /* Compute the address */
14157 ins->opcode = OP_PADD;
14161 mono_bblock_insert_before_ins (bb, ins, load3);
14162 mono_bblock_insert_before_ins (bb, load3, load2);
14164 mono_bblock_insert_before_ins (bb, load2, load);
14166 g_assert (var->opcode == OP_REGOFFSET);
14168 ins->opcode = OP_ADD_IMM;
14169 ins->sreg1 = var->inst_basereg;
14170 ins->inst_imm = var->inst_offset;
14173 *need_local_opts = TRUE;
14174 spec = INS_INFO (ins->opcode);
14177 if (ins->opcode < MONO_CEE_LAST) {
14178 mono_print_ins (ins);
14179 g_assert_not_reached ();
14183 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14187 if (MONO_IS_STORE_MEMBASE (ins)) {
14188 tmp_reg = ins->dreg;
14189 ins->dreg = ins->sreg2;
14190 ins->sreg2 = tmp_reg;
14193 spec2 [MONO_INST_DEST] = ' ';
14194 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14195 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14196 spec2 [MONO_INST_SRC3] = ' ';
14198 } else if (MONO_IS_STORE_MEMINDEX (ins))
14199 g_assert_not_reached ();
14204 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14205 printf ("\t %.3s %d", spec, ins->dreg);
14206 num_sregs = mono_inst_get_src_registers (ins, sregs);
14207 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14208 printf (" %d", sregs [srcindex]);
14215 regtype = spec [MONO_INST_DEST];
14216 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14219 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14220 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14221 MonoInst *store_ins;
14223 MonoInst *def_ins = ins;
14224 int dreg = ins->dreg; /* The original vreg */
14226 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14228 if (var->opcode == OP_REGVAR) {
14229 ins->dreg = var->dreg;
14230 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14232 * Instead of emitting a load+store, use a _membase opcode.
14234 g_assert (var->opcode == OP_REGOFFSET);
14235 if (ins->opcode == OP_MOVE) {
14239 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14240 ins->inst_basereg = var->inst_basereg;
14241 ins->inst_offset = var->inst_offset;
14244 spec = INS_INFO (ins->opcode);
14248 g_assert (var->opcode == OP_REGOFFSET);
14250 prev_dreg = ins->dreg;
14252 /* Invalidate any previous lvreg for this vreg */
14253 vreg_to_lvreg [ins->dreg] = 0;
14257 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14259 store_opcode = OP_STOREI8_MEMBASE_REG;
14262 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14264 #if SIZEOF_REGISTER != 8
14265 if (regtype == 'l') {
14266 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14267 mono_bblock_insert_after_ins (bb, ins, store_ins);
14268 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14269 mono_bblock_insert_after_ins (bb, ins, store_ins);
14270 def_ins = store_ins;
14275 g_assert (store_opcode != OP_STOREV_MEMBASE);
14277 /* Try to fuse the store into the instruction itself */
14278 /* FIXME: Add more instructions */
14279 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14280 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14281 ins->inst_imm = ins->inst_c0;
14282 ins->inst_destbasereg = var->inst_basereg;
14283 ins->inst_offset = var->inst_offset;
14284 spec = INS_INFO (ins->opcode);
14285 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14286 ins->opcode = store_opcode;
14287 ins->inst_destbasereg = var->inst_basereg;
14288 ins->inst_offset = var->inst_offset;
14292 tmp_reg = ins->dreg;
14293 ins->dreg = ins->sreg2;
14294 ins->sreg2 = tmp_reg;
14297 spec2 [MONO_INST_DEST] = ' ';
14298 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14299 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14300 spec2 [MONO_INST_SRC3] = ' ';
14302 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14303 // FIXME: The backends expect the base reg to be in inst_basereg
14304 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14306 ins->inst_basereg = var->inst_basereg;
14307 ins->inst_offset = var->inst_offset;
14308 spec = INS_INFO (ins->opcode);
14310 /* printf ("INS: "); mono_print_ins (ins); */
14311 /* Create a store instruction */
14312 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14314 /* Insert it after the instruction */
14315 mono_bblock_insert_after_ins (bb, ins, store_ins);
14317 def_ins = store_ins;
14320 * We can't assign ins->dreg to var->dreg here, since the
14321 * sregs could use it. So set a flag, and do it after
14324 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14325 dest_has_lvreg = TRUE;
14330 if (def_ins && !live_range_start [dreg]) {
14331 live_range_start [dreg] = def_ins;
14332 live_range_start_bb [dreg] = bb;
14335 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14338 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14339 tmp->inst_c1 = dreg;
14340 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14347 num_sregs = mono_inst_get_src_registers (ins, sregs);
14348 for (srcindex = 0; srcindex < 3; ++srcindex) {
14349 regtype = spec [MONO_INST_SRC1 + srcindex];
14350 sreg = sregs [srcindex];
14352 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14353 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14354 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14355 MonoInst *use_ins = ins;
14356 MonoInst *load_ins;
14357 guint32 load_opcode;
14359 if (var->opcode == OP_REGVAR) {
14360 sregs [srcindex] = var->dreg;
14361 //mono_inst_set_src_registers (ins, sregs);
14362 live_range_end [sreg] = use_ins;
14363 live_range_end_bb [sreg] = bb;
14365 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14368 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14369 /* var->dreg is a hreg */
14370 tmp->inst_c1 = sreg;
14371 mono_bblock_insert_after_ins (bb, ins, tmp);
14377 g_assert (var->opcode == OP_REGOFFSET);
14379 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14381 g_assert (load_opcode != OP_LOADV_MEMBASE);
14383 if (vreg_to_lvreg [sreg]) {
14384 g_assert (vreg_to_lvreg [sreg] != -1);
14386 /* The variable is already loaded to an lvreg */
14387 if (G_UNLIKELY (cfg->verbose_level > 2))
14388 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14389 sregs [srcindex] = vreg_to_lvreg [sreg];
14390 //mono_inst_set_src_registers (ins, sregs);
14394 /* Try to fuse the load into the instruction */
14395 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14396 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14397 sregs [0] = var->inst_basereg;
14398 //mono_inst_set_src_registers (ins, sregs);
14399 ins->inst_offset = var->inst_offset;
14400 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14401 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14402 sregs [1] = var->inst_basereg;
14403 //mono_inst_set_src_registers (ins, sregs);
14404 ins->inst_offset = var->inst_offset;
14406 if (MONO_IS_REAL_MOVE (ins)) {
14407 ins->opcode = OP_NOP;
14410 //printf ("%d ", srcindex); mono_print_ins (ins);
14412 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14414 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14415 if (var->dreg == prev_dreg) {
14417 * sreg refers to the value loaded by the load
14418 * emitted below, but we need to use ins->dreg
14419 * since it refers to the store emitted earlier.
14423 g_assert (sreg != -1);
14424 vreg_to_lvreg [var->dreg] = sreg;
14425 g_assert (lvregs_len < 1024);
14426 lvregs [lvregs_len ++] = var->dreg;
14430 sregs [srcindex] = sreg;
14431 //mono_inst_set_src_registers (ins, sregs);
14433 #if SIZEOF_REGISTER != 8
14434 if (regtype == 'l') {
14435 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14436 mono_bblock_insert_before_ins (bb, ins, load_ins);
14437 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14438 mono_bblock_insert_before_ins (bb, ins, load_ins);
14439 use_ins = load_ins;
14444 #if SIZEOF_REGISTER == 4
14445 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14447 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14448 mono_bblock_insert_before_ins (bb, ins, load_ins);
14449 use_ins = load_ins;
14453 if (var->dreg < orig_next_vreg) {
14454 live_range_end [var->dreg] = use_ins;
14455 live_range_end_bb [var->dreg] = bb;
14458 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14461 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14462 tmp->inst_c1 = var->dreg;
14463 mono_bblock_insert_after_ins (bb, ins, tmp);
14467 mono_inst_set_src_registers (ins, sregs);
14469 if (dest_has_lvreg) {
14470 g_assert (ins->dreg != -1);
14471 vreg_to_lvreg [prev_dreg] = ins->dreg;
14472 g_assert (lvregs_len < 1024);
14473 lvregs [lvregs_len ++] = prev_dreg;
14474 dest_has_lvreg = FALSE;
14478 tmp_reg = ins->dreg;
14479 ins->dreg = ins->sreg2;
14480 ins->sreg2 = tmp_reg;
14483 if (MONO_IS_CALL (ins)) {
14484 /* Clear vreg_to_lvreg array */
14485 for (i = 0; i < lvregs_len; i++)
14486 vreg_to_lvreg [lvregs [i]] = 0;
14488 } else if (ins->opcode == OP_NOP) {
14490 MONO_INST_NULLIFY_SREGS (ins);
14493 if (cfg->verbose_level > 2)
14494 mono_print_ins_index (1, ins);
14497 /* Extend the live range based on the liveness info */
14498 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14499 for (i = 0; i < cfg->num_varinfo; i ++) {
14500 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14502 if (vreg_is_volatile (cfg, vi->vreg))
14503 /* The liveness info is incomplete */
14506 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14507 /* Live from at least the first ins of this bb */
14508 live_range_start [vi->vreg] = bb->code;
14509 live_range_start_bb [vi->vreg] = bb;
14512 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14513 /* Live at least until the last ins of this bb */
14514 live_range_end [vi->vreg] = bb->last_ins;
14515 live_range_end_bb [vi->vreg] = bb;
14522 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14523 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14525 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14526 for (i = 0; i < cfg->num_varinfo; ++i) {
14527 int vreg = MONO_VARINFO (cfg, i)->vreg;
14530 if (live_range_start [vreg]) {
14531 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14533 ins->inst_c1 = vreg;
14534 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14536 if (live_range_end [vreg]) {
14537 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14539 ins->inst_c1 = vreg;
14540 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14541 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14543 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14548 if (cfg->gsharedvt_locals_var_ins) {
14549 /* Nullify if unused */
14550 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14551 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14554 g_free (live_range_start);
14555 g_free (live_range_end);
14556 g_free (live_range_start_bb);
14557 g_free (live_range_end_bb);
14562 * - use 'iadd' instead of 'int_add'
14563 * - handling ovf opcodes: decompose in method_to_ir.
14564 * - unify iregs/fregs
14565 * -> partly done, the missing parts are:
14566 * - a more complete unification would involve unifying the hregs as well, so
14567 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14568 * would no longer map to the machine hregs, so the code generators would need to
14569 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14570 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14571 * fp/non-fp branches speeds it up by about 15%.
14572 * - use sext/zext opcodes instead of shifts
14574 * - get rid of TEMPLOADs if possible and use vregs instead
14575 * - clean up usage of OP_P/OP_ opcodes
14576 * - cleanup usage of DUMMY_USE
14577 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14579 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14580 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14581 * - make sure handle_stack_args () is called before the branch is emitted
14582 * - when the new IR is done, get rid of all unused stuff
14583 * - COMPARE/BEQ as separate instructions or unify them ?
14584 * - keeping them separate allows specialized compare instructions like
14585 * compare_imm, compare_membase
14586 * - most back ends unify fp compare+branch, fp compare+ceq
14587 * - integrate mono_save_args into inline_method
14588 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14589 * - handle long shift opts on 32 bit platforms somehow: they require
14590 * 3 sregs (2 for arg1 and 1 for arg2)
14591 * - make byref a 'normal' type.
14592 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14593 * variable if needed.
14594 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14595 * like inline_method.
14596 * - remove inlining restrictions
14597 * - fix LNEG and enable cfold of INEG
14598 * - generalize x86 optimizations like ldelema as a peephole optimization
14599 * - add store_mem_imm for amd64
14600 * - optimize the loading of the interruption flag in the managed->native wrappers
14601 * - avoid special handling of OP_NOP in passes
14602 * - move code inserting instructions into one function/macro.
14603 * - try a coalescing phase after liveness analysis
14604 * - add float -> vreg conversion + local optimizations on !x86
14605 * - figure out how to handle decomposed branches during optimizations, ie.
14606 * compare+branch, op_jump_table+op_br etc.
14607 * - promote RuntimeXHandles to vregs
14608 * - vtype cleanups:
14609 * - add a NEW_VARLOADA_VREG macro
14610 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14611 * accessing vtype fields.
14612 * - get rid of I8CONST on 64 bit platforms
14613 * - dealing with the increase in code size due to branches created during opcode
14615 * - use extended basic blocks
14616 * - all parts of the JIT
14617 * - handle_global_vregs () && local regalloc
14618 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14619 * - sources of increase in code size:
14622 * - isinst and castclass
14623 * - lvregs not allocated to global registers even if used multiple times
14624 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14626 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14627 * - add all micro optimizations from the old JIT
14628 * - put tree optimizations into the deadce pass
14629 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14630 * specific function.
14631 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14632 * fcompare + branchCC.
14633 * - create a helper function for allocating a stack slot, taking into account
14634 * MONO_CFG_HAS_SPILLUP.
14636 * - merge the ia64 switch changes.
14637 * - optimize mono_regstate2_alloc_int/float.
14638 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14639 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14640 * parts of the tree could be separated by other instructions, killing the tree
14641 * arguments, or stores killing loads etc. Also, should we fold loads into other
14642 * instructions if the result of the load is used multiple times ?
14643 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14644 * - LAST MERGE: 108395.
14645 * - when returning vtypes in registers, generate IR and append it to the end of the
14646 * last bb instead of doing it in the epilog.
14647 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14655 - When to decompose opcodes:
14656 - earlier: this makes some optimizations hard to implement, since the low level IR
14657 no longer contains the neccessary information. But it is easier to do.
14658 - later: harder to implement, enables more optimizations.
14659 - Branches inside bblocks:
14660 - created when decomposing complex opcodes.
14661 - branches to another bblock: harmless, but not tracked by the branch
14662 optimizations, so need to branch to a label at the start of the bblock.
14663 - branches to inside the same bblock: very problematic, trips up the local
14664 reg allocator. Can be fixed by spitting the current bblock, but that is a
14665 complex operation, since some local vregs can become global vregs etc.
14666 - Local/global vregs:
14667 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14668 local register allocator.
14669 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14670 structure, created by mono_create_var (). Assigned to hregs or the stack by
14671 the global register allocator.
14672 - When to do optimizations like alu->alu_imm:
14673 - earlier -> saves work later on since the IR will be smaller/simpler
14674 - later -> can work on more instructions
14675 - Handling of valuetypes:
14676 - When a vtype is pushed on the stack, a new temporary is created, an
14677 instruction computing its address (LDADDR) is emitted and pushed on
14678 the stack. Need to optimize cases when the vtype is used immediately as in
14679 argument passing, stloc etc.
14680 - Instead of the to_end stuff in the old JIT, simply call the function handling
14681 the values on the stack before emitting the last instruction of the bb.
14684 #endif /* DISABLE_JIT */