2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 #ifdef MONO_ARCH_NEED_GOT_VAR
1231 if (!cfg->compile_aot)
1233 if (!cfg->got_var) {
1234 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1236 return cfg->got_var;
1243 mono_get_vtable_var (MonoCompile *cfg)
1245 g_assert (cfg->gshared);
1247 if (!cfg->rgctx_var) {
1248 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 /* force the var to be stack allocated */
1250 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1253 return cfg->rgctx_var;
1257 type_from_stack_type (MonoInst *ins) {
1258 switch (ins->type) {
1259 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1260 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1261 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1262 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1263 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1265 return &ins->klass->this_arg;
1266 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1267 case STACK_VTYPE: return &ins->klass->byval_arg;
1269 g_error ("stack type %d to monotype not handled\n", ins->type);
1274 static G_GNUC_UNUSED int
1275 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1277 t = mono_type_get_underlying_type (t);
1289 case MONO_TYPE_FNPTR:
1291 case MONO_TYPE_CLASS:
1292 case MONO_TYPE_STRING:
1293 case MONO_TYPE_OBJECT:
1294 case MONO_TYPE_SZARRAY:
1295 case MONO_TYPE_ARRAY:
1301 return cfg->r4_stack_type;
1304 case MONO_TYPE_VALUETYPE:
1305 case MONO_TYPE_TYPEDBYREF:
1307 case MONO_TYPE_GENERICINST:
1308 if (mono_type_generic_inst_is_valuetype (t))
1314 g_assert_not_reached ();
1321 array_access_to_klass (int opcode)
1325 return mono_defaults.byte_class;
1327 return mono_defaults.uint16_class;
1330 return mono_defaults.int_class;
1333 return mono_defaults.sbyte_class;
1336 return mono_defaults.int16_class;
1339 return mono_defaults.int32_class;
1341 return mono_defaults.uint32_class;
1344 return mono_defaults.int64_class;
1347 return mono_defaults.single_class;
1350 return mono_defaults.double_class;
1351 case CEE_LDELEM_REF:
1352 case CEE_STELEM_REF:
1353 return mono_defaults.object_class;
1355 g_assert_not_reached ();
1361 * We try to share variables when possible
1364 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1369 /* inlining can result in deeper stacks */
1370 if (slot >= cfg->header->max_stack)
1371 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1373 pos = ins->type - 1 + slot * STACK_MAX;
1375 switch (ins->type) {
1382 if ((vnum = cfg->intvars [pos]))
1383 return cfg->varinfo [vnum];
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1385 cfg->intvars [pos] = res->inst_c0;
1388 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1397 * Don't use this if a generic_context is set, since that means AOT can't
1398 * look up the method using just the image+token.
1399 * table == 0 means this is a reference made from a wrapper.
1401 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1402 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1403 jump_info_token->image = image;
1404 jump_info_token->token = token;
1405 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1410 * This function is called to handle items that are left on the evaluation stack
1411 * at basic block boundaries. What happens is that we save the values to local variables
1412 * and we reload them later when first entering the target basic block (with the
1413 * handle_loaded_temps () function).
1414 * A single joint point will use the same variables (stored in the array bb->out_stack or
1415 * bb->in_stack, if the basic block is before or after the joint point).
1417 * This function needs to be called _before_ emitting the last instruction of
1418 * the bb (i.e. before emitting a branch).
1419 * If the stack merge fails at a join point, cfg->unverifiable is set.
1422 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1425 MonoBasicBlock *bb = cfg->cbb;
1426 MonoBasicBlock *outb;
1427 MonoInst *inst, **locals;
1432 if (cfg->verbose_level > 3)
1433 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1434 if (!bb->out_scount) {
1435 bb->out_scount = count;
1436 //printf ("bblock %d has out:", bb->block_num);
1438 for (i = 0; i < bb->out_count; ++i) {
1439 outb = bb->out_bb [i];
1440 /* exception handlers are linked, but they should not be considered for stack args */
1441 if (outb->flags & BB_EXCEPTION_HANDLER)
1443 //printf (" %d", outb->block_num);
1444 if (outb->in_stack) {
1446 bb->out_stack = outb->in_stack;
1452 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1453 for (i = 0; i < count; ++i) {
1455 * try to reuse temps already allocated for this purpouse, if they occupy the same
1456 * stack slot and if they are of the same type.
1457 * This won't cause conflicts since if 'local' is used to
1458 * store one of the values in the in_stack of a bblock, then
1459 * the same variable will be used for the same outgoing stack
1461 * This doesn't work when inlining methods, since the bblocks
1462 * in the inlined methods do not inherit their in_stack from
1463 * the bblock they are inlined to. See bug #58863 for an
1466 if (cfg->inlined_method)
1467 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1469 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1474 for (i = 0; i < bb->out_count; ++i) {
1475 outb = bb->out_bb [i];
1476 /* exception handlers are linked, but they should not be considered for stack args */
1477 if (outb->flags & BB_EXCEPTION_HANDLER)
1479 if (outb->in_scount) {
1480 if (outb->in_scount != bb->out_scount) {
1481 cfg->unverifiable = TRUE;
1484 continue; /* check they are the same locals */
1486 outb->in_scount = count;
1487 outb->in_stack = bb->out_stack;
1490 locals = bb->out_stack;
1492 for (i = 0; i < count; ++i) {
1493 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1494 inst->cil_code = sp [i]->cil_code;
1495 sp [i] = locals [i];
1496 if (cfg->verbose_level > 3)
1497 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1501 * It is possible that the out bblocks already have in_stack assigned, and
1502 * the in_stacks differ. In this case, we will store to all the different
1509 /* Find a bblock which has a different in_stack */
1511 while (bindex < bb->out_count) {
1512 outb = bb->out_bb [bindex];
1513 /* exception handlers are linked, but they should not be considered for stack args */
1514 if (outb->flags & BB_EXCEPTION_HANDLER) {
1518 if (outb->in_stack != locals) {
1519 for (i = 0; i < count; ++i) {
1520 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1521 inst->cil_code = sp [i]->cil_code;
1522 sp [i] = locals [i];
1523 if (cfg->verbose_level > 3)
1524 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1526 locals = outb->in_stack;
1536 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1538 int ibitmap_reg = alloc_preg (cfg);
1539 #ifdef COMPRESSED_INTERFACE_BITMAP
1541 MonoInst *res, *ins;
1542 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1543 MONO_ADD_INS (cfg->cbb, ins);
1545 if (cfg->compile_aot)
1546 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1548 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1549 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1550 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1552 int ibitmap_byte_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1556 if (cfg->compile_aot) {
1557 int iid_reg = alloc_preg (cfg);
1558 int shifted_iid_reg = alloc_preg (cfg);
1559 int ibitmap_byte_address_reg = alloc_preg (cfg);
1560 int masked_iid_reg = alloc_preg (cfg);
1561 int iid_one_bit_reg = alloc_preg (cfg);
1562 int iid_bit_reg = alloc_preg (cfg);
1563 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1568 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1580 * stored in "klass_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1589 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1590 * stored in "vtable_reg" implements the interface "klass".
1593 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1595 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1599 * Emit code which checks whenever the interface id of @klass is smaller than
1600 * than the value given by max_iid_reg.
1603 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1604 MonoBasicBlock *false_target)
1606 if (cfg->compile_aot) {
1607 int iid_reg = alloc_preg (cfg);
1608 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1609 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1616 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1619 /* Same as above, but obtains max_iid from a vtable */
1621 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1622 MonoBasicBlock *false_target)
1624 int max_iid_reg = alloc_preg (cfg);
1626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1627 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1630 /* Same as above, but obtains max_iid from a klass */
1632 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 int max_iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1638 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1642 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1644 int idepth_reg = alloc_preg (cfg);
1645 int stypes_reg = alloc_preg (cfg);
1646 int stype = alloc_preg (cfg);
1648 mono_class_setup_supertypes (klass);
1650 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1659 } else if (cfg->compile_aot) {
1660 int const_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1662 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1670 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1672 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1676 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1678 int intf_reg = alloc_preg (cfg);
1680 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1681 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1686 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1690 * Variant of the above that takes a register to the class, not the vtable.
1693 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1695 int intf_bit_reg = alloc_preg (cfg);
1697 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1698 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1703 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1707 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1711 } else if (cfg->compile_aot) {
1712 int const_reg = alloc_preg (cfg);
1713 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1714 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1722 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1724 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1728 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1730 if (cfg->compile_aot) {
1731 int const_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1733 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1741 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1744 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1747 int rank_reg = alloc_preg (cfg);
1748 int eclass_reg = alloc_preg (cfg);
1750 g_assert (!klass_inst);
1751 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1753 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1754 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1756 if (klass->cast_class == mono_defaults.object_class) {
1757 int parent_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1759 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1760 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1761 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1762 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1763 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1764 } else if (klass->cast_class == mono_defaults.enum_class) {
1765 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1766 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1767 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1769 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1770 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1773 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1774 /* Check that the object is a vector too */
1775 int bounds_reg = alloc_preg (cfg);
1776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1778 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1781 int idepth_reg = alloc_preg (cfg);
1782 int stypes_reg = alloc_preg (cfg);
1783 int stype = alloc_preg (cfg);
1785 mono_class_setup_supertypes (klass);
1787 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1790 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1794 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1799 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1801 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1805 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1809 g_assert (val == 0);
1814 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1820 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1825 #if SIZEOF_REGISTER == 8
1827 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1833 val_reg = alloc_preg (cfg);
1835 if (SIZEOF_REGISTER == 8)
1836 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1838 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1841 /* This could be optimized further if neccesary */
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1850 #if !NO_UNALIGNED_ACCESS
1851 if (SIZEOF_REGISTER == 8) {
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1890 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1891 g_assert (size < 10000);
1894 /* This could be optimized further if neccesary */
1896 cur_reg = alloc_preg (cfg);
1897 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1905 #if !NO_UNALIGNED_ACCESS
1906 if (SIZEOF_REGISTER == 8) {
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1945 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1949 if (cfg->compile_aot) {
1950 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1951 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1953 ins->sreg2 = c->dreg;
1954 MONO_ADD_INS (cfg->cbb, ins);
1956 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1958 ins->inst_offset = mini_get_tls_offset (tls_key);
1959 MONO_ADD_INS (cfg->cbb, ins);
1966 * Emit IR to push the current LMF onto the LMF stack.
1969 emit_push_lmf (MonoCompile *cfg)
1972 * Emit IR to push the LMF:
1973 * lmf_addr = <lmf_addr from tls>
1974 * lmf->lmf_addr = lmf_addr
1975 * lmf->prev_lmf = *lmf_addr
1978 int lmf_reg, prev_lmf_reg;
1979 MonoInst *ins, *lmf_ins;
1984 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1985 /* Load current lmf */
1986 lmf_ins = mono_get_lmf_intrinsic (cfg);
1988 MONO_ADD_INS (cfg->cbb, lmf_ins);
1989 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1990 lmf_reg = ins->dreg;
1991 /* Save previous_lmf */
1992 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1994 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1997 * Store lmf_addr in a variable, so it can be allocated to a global register.
1999 if (!cfg->lmf_addr_var)
2000 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2003 ins = mono_get_jit_tls_intrinsic (cfg);
2005 int jit_tls_dreg = ins->dreg;
2007 MONO_ADD_INS (cfg->cbb, ins);
2008 lmf_reg = alloc_preg (cfg);
2009 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2011 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2014 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2016 MONO_ADD_INS (cfg->cbb, lmf_ins);
2019 MonoInst *args [16], *jit_tls_ins, *ins;
2021 /* Inline mono_get_lmf_addr () */
2022 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2024 /* Load mono_jit_tls_id */
2025 if (cfg->compile_aot)
2026 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2028 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2029 /* call pthread_getspecific () */
2030 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2031 /* lmf_addr = &jit_tls->lmf */
2032 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2035 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2039 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2041 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2042 lmf_reg = ins->dreg;
2044 prev_lmf_reg = alloc_preg (cfg);
2045 /* Save previous_lmf */
2046 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2047 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2049 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2056 * Emit IR to pop the current LMF from the LMF stack.
2059 emit_pop_lmf (MonoCompile *cfg)
2061 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2067 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2068 lmf_reg = ins->dreg;
2070 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2071 /* Load previous_lmf */
2072 prev_lmf_reg = alloc_preg (cfg);
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2078 * Emit IR to pop the LMF:
2079 * *(lmf->lmf_addr) = lmf->prev_lmf
2081 /* This could be called before emit_push_lmf () */
2082 if (!cfg->lmf_addr_var)
2083 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2084 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2086 prev_lmf_reg = alloc_preg (cfg);
2087 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2088 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2093 emit_instrumentation_call (MonoCompile *cfg, void *func)
2095 MonoInst *iargs [1];
2098 * Avoid instrumenting inlined methods since it can
2099 * distort profiling results.
2101 if (cfg->method != cfg->current_method)
2104 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2105 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2106 mono_emit_jit_icall (cfg, func, iargs);
2111 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2114 type = mini_get_underlying_type (type);
2115 switch (type->type) {
2116 case MONO_TYPE_VOID:
2117 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2124 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2128 case MONO_TYPE_FNPTR:
2129 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 case MONO_TYPE_CLASS:
2131 case MONO_TYPE_STRING:
2132 case MONO_TYPE_OBJECT:
2133 case MONO_TYPE_SZARRAY:
2134 case MONO_TYPE_ARRAY:
2135 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2141 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2143 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2145 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2146 case MONO_TYPE_VALUETYPE:
2147 if (type->data.klass->enumtype) {
2148 type = mono_class_enum_basetype (type->data.klass);
2151 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2152 case MONO_TYPE_TYPEDBYREF:
2153 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 case MONO_TYPE_GENERICINST:
2155 type = &type->data.generic_class->container_class->byval_arg;
2158 case MONO_TYPE_MVAR:
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2168 * target_type_is_incompatible:
2169 * @cfg: MonoCompile context
2171 * Check that the item @arg on the evaluation stack can be stored
2172 * in the target type (can be a local, or field, etc).
2173 * The cfg arg can be used to check if we need verification or just
2176 * Returns: non-0 value if arg can't be stored on a target.
2179 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2181 MonoType *simple_type;
2184 if (target->byref) {
2185 /* FIXME: check that the pointed to types match */
2186 if (arg->type == STACK_MP)
2187 return arg->klass != mono_class_from_mono_type (target);
2188 if (arg->type == STACK_PTR)
2193 simple_type = mini_get_underlying_type (target);
2194 switch (simple_type->type) {
2195 case MONO_TYPE_VOID:
2203 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2207 /* STACK_MP is needed when setting pinned locals */
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_FNPTR:
2215 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2216 * in native int. (#688008).
2218 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2221 case MONO_TYPE_CLASS:
2222 case MONO_TYPE_STRING:
2223 case MONO_TYPE_OBJECT:
2224 case MONO_TYPE_SZARRAY:
2225 case MONO_TYPE_ARRAY:
2226 if (arg->type != STACK_OBJ)
2228 /* FIXME: check type compatibility */
2232 if (arg->type != STACK_I8)
2236 if (arg->type != cfg->r4_stack_type)
2240 if (arg->type != STACK_R8)
2243 case MONO_TYPE_VALUETYPE:
2244 if (arg->type != STACK_VTYPE)
2246 klass = mono_class_from_mono_type (simple_type);
2247 if (klass != arg->klass)
2250 case MONO_TYPE_TYPEDBYREF:
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 if (klass != arg->klass)
2257 case MONO_TYPE_GENERICINST:
2258 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2259 if (arg->type != STACK_VTYPE)
2261 klass = mono_class_from_mono_type (simple_type);
2262 /* The second cases is needed when doing partial sharing */
2263 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2267 if (arg->type != STACK_OBJ)
2269 /* FIXME: check type compatibility */
2273 case MONO_TYPE_MVAR:
2274 g_assert (cfg->gshared);
2275 if (mini_type_var_is_vt (simple_type)) {
2276 if (arg->type != STACK_VTYPE)
2279 if (arg->type != STACK_OBJ)
2284 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2290 * Prepare arguments for passing to a function call.
2291 * Return a non-zero value if the arguments can't be passed to the given
2293 * The type checks are not yet complete and some conversions may need
2294 * casts on 32 or 64 bit architectures.
2296 * FIXME: implement this using target_type_is_incompatible ()
2299 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2301 MonoType *simple_type;
2305 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2309 for (i = 0; i < sig->param_count; ++i) {
2310 if (sig->params [i]->byref) {
2311 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2315 simple_type = mini_get_underlying_type (sig->params [i]);
2317 switch (simple_type->type) {
2318 case MONO_TYPE_VOID:
2327 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2333 case MONO_TYPE_FNPTR:
2334 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2337 case MONO_TYPE_CLASS:
2338 case MONO_TYPE_STRING:
2339 case MONO_TYPE_OBJECT:
2340 case MONO_TYPE_SZARRAY:
2341 case MONO_TYPE_ARRAY:
2342 if (args [i]->type != STACK_OBJ)
2347 if (args [i]->type != STACK_I8)
2351 if (args [i]->type != cfg->r4_stack_type)
2355 if (args [i]->type != STACK_R8)
2358 case MONO_TYPE_VALUETYPE:
2359 if (simple_type->data.klass->enumtype) {
2360 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2363 if (args [i]->type != STACK_VTYPE)
2366 case MONO_TYPE_TYPEDBYREF:
2367 if (args [i]->type != STACK_VTYPE)
2370 case MONO_TYPE_GENERICINST:
2371 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2374 case MONO_TYPE_MVAR:
2376 if (args [i]->type != STACK_VTYPE)
2380 g_error ("unknown type 0x%02x in check_call_signature",
2388 callvirt_to_call (int opcode)
2391 case OP_CALL_MEMBASE:
2393 case OP_VOIDCALL_MEMBASE:
2395 case OP_FCALL_MEMBASE:
2397 case OP_RCALL_MEMBASE:
2399 case OP_VCALL_MEMBASE:
2401 case OP_LCALL_MEMBASE:
2404 g_assert_not_reached ();
2410 /* Either METHOD or IMT_ARG needs to be set */
2412 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2416 if (COMPILE_LLVM (cfg)) {
2417 method_reg = alloc_preg (cfg);
2420 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2421 } else if (cfg->compile_aot) {
2422 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2425 MONO_INST_NEW (cfg, ins, OP_PCONST);
2426 ins->inst_p0 = method;
2427 ins->dreg = method_reg;
2428 MONO_ADD_INS (cfg->cbb, ins);
2432 call->imt_arg_reg = method_reg;
2434 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2438 method_reg = alloc_preg (cfg);
2441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2442 } else if (cfg->compile_aot) {
2443 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2446 MONO_INST_NEW (cfg, ins, OP_PCONST);
2447 ins->inst_p0 = method;
2448 ins->dreg = method_reg;
2449 MONO_ADD_INS (cfg->cbb, ins);
2452 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2455 static MonoJumpInfo *
2456 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2458 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2462 ji->data.target = target;
2468 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2471 return mono_class_check_context_used (klass);
2477 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2480 return mono_method_check_context_used (method);
2486 * check_method_sharing:
2488 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2491 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2493 gboolean pass_vtable = FALSE;
2494 gboolean pass_mrgctx = FALSE;
2496 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2497 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2498 gboolean sharable = FALSE;
2500 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2504 * Pass vtable iff target method might
2505 * be shared, which means that sharing
2506 * is enabled for its class and its
2507 * context is sharable (and it's not a
2510 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2514 if (mini_method_get_context (cmethod) &&
2515 mini_method_get_context (cmethod)->method_inst) {
2516 g_assert (!pass_vtable);
2518 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2521 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2526 if (out_pass_vtable)
2527 *out_pass_vtable = pass_vtable;
2528 if (out_pass_mrgctx)
2529 *out_pass_mrgctx = pass_mrgctx;
2532 inline static MonoCallInst *
2533 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2534 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2538 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2543 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2545 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2547 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2550 call->signature = sig;
2551 call->rgctx_reg = rgctx;
2552 sig_ret = mini_get_underlying_type (sig->ret);
2554 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2557 if (mini_type_is_vtype (sig_ret)) {
2558 call->vret_var = cfg->vret_addr;
2559 //g_assert_not_reached ();
2561 } else if (mini_type_is_vtype (sig_ret)) {
2562 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2565 temp->backend.is_pinvoke = sig->pinvoke;
2568 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2569 * address of return value to increase optimization opportunities.
2570 * Before vtype decomposition, the dreg of the call ins itself represents the
2571 * fact the call modifies the return value. After decomposition, the call will
2572 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2573 * will be transformed into an LDADDR.
2575 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2576 loada->dreg = alloc_preg (cfg);
2577 loada->inst_p0 = temp;
2578 /* We reference the call too since call->dreg could change during optimization */
2579 loada->inst_p1 = call;
2580 MONO_ADD_INS (cfg->cbb, loada);
2582 call->inst.dreg = temp->dreg;
2584 call->vret_var = loada;
2585 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2586 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2588 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2589 if (COMPILE_SOFT_FLOAT (cfg)) {
2591 * If the call has a float argument, we would need to do an r8->r4 conversion using
2592 * an icall, but that cannot be done during the call sequence since it would clobber
2593 * the call registers + the stack. So we do it before emitting the call.
2595 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2597 MonoInst *in = call->args [i];
2599 if (i >= sig->hasthis)
2600 t = sig->params [i - sig->hasthis];
2602 t = &mono_defaults.int_class->byval_arg;
2603 t = mono_type_get_underlying_type (t);
2605 if (!t->byref && t->type == MONO_TYPE_R4) {
2606 MonoInst *iargs [1];
2610 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2612 /* The result will be in an int vreg */
2613 call->args [i] = conv;
2619 call->need_unbox_trampoline = unbox_trampoline;
2622 if (COMPILE_LLVM (cfg))
2623 mono_llvm_emit_call (cfg, call);
2625 mono_arch_emit_call (cfg, call);
2627 mono_arch_emit_call (cfg, call);
2630 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2631 cfg->flags |= MONO_CFG_HAS_CALLS;
2637 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2639 #ifdef MONO_ARCH_RGCTX_REG
2640 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2641 cfg->uses_rgctx_reg = TRUE;
2642 call->rgctx_reg = TRUE;
2644 call->rgctx_arg_reg = rgctx_reg;
2651 inline static MonoInst*
2652 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2657 gboolean check_sp = FALSE;
2659 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2660 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2662 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2667 rgctx_reg = mono_alloc_preg (cfg);
2668 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2672 if (!cfg->stack_inbalance_var)
2673 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2675 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2676 ins->dreg = cfg->stack_inbalance_var->dreg;
2677 MONO_ADD_INS (cfg->cbb, ins);
2680 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2682 call->inst.sreg1 = addr->dreg;
2685 emit_imt_argument (cfg, call, NULL, imt_arg);
2687 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2692 sp_reg = mono_alloc_preg (cfg);
2694 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2696 MONO_ADD_INS (cfg->cbb, ins);
2698 /* Restore the stack so we don't crash when throwing the exception */
2699 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2700 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2701 MONO_ADD_INS (cfg->cbb, ins);
2703 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2704 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2708 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2710 return (MonoInst*)call;
2714 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2717 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2719 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2722 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2723 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2725 #ifndef DISABLE_REMOTING
2726 gboolean might_be_remote = FALSE;
2728 gboolean virtual = this_ins != NULL;
2729 gboolean enable_for_aot = TRUE;
2733 gboolean need_unbox_trampoline;
2736 sig = mono_method_signature (method);
2739 rgctx_reg = mono_alloc_preg (cfg);
2740 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2743 if (method->string_ctor) {
2744 /* Create the real signature */
2745 /* FIXME: Cache these */
2746 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2747 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2752 context_used = mini_method_check_context_used (cfg, method);
2754 #ifndef DISABLE_REMOTING
2755 might_be_remote = this_ins && sig->hasthis &&
2756 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2757 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2759 if (might_be_remote && context_used) {
2762 g_assert (cfg->gshared);
2764 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2766 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2770 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2772 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2774 #ifndef DISABLE_REMOTING
2775 if (might_be_remote)
2776 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2779 call->method = method;
2780 call->inst.flags |= MONO_INST_HAS_METHOD;
2781 call->inst.inst_left = this_ins;
2782 call->tail_call = tail;
2785 int vtable_reg, slot_reg, this_reg;
2788 this_reg = this_ins->dreg;
2790 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2791 MonoInst *dummy_use;
2793 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2795 /* Make a call to delegate->invoke_impl */
2796 call->inst.inst_basereg = this_reg;
2797 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2798 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2800 /* We must emit a dummy use here because the delegate trampoline will
2801 replace the 'this' argument with the delegate target making this activation
2802 no longer a root for the delegate.
2803 This is an issue for delegates that target collectible code such as dynamic
2804 methods of GC'able assemblies.
2806 For a test case look into #667921.
2808 FIXME: a dummy use is not the best way to do it as the local register allocator
2809 will put it on a caller save register and spil it around the call.
2810 Ideally, we would either put it on a callee save register or only do the store part.
2812 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2814 return (MonoInst*)call;
2817 if ((!cfg->compile_aot || enable_for_aot) &&
2818 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2819 (MONO_METHOD_IS_FINAL (method) &&
2820 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2821 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2823 * the method is not virtual, we just need to ensure this is not null
2824 * and then we can call the method directly.
2826 #ifndef DISABLE_REMOTING
2827 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2829 * The check above ensures method is not gshared, this is needed since
2830 * gshared methods can't have wrappers.
2832 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2836 if (!method->string_ctor)
2837 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2839 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2840 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2842 * the method is virtual, but we can statically dispatch since either
2843 * it's class or the method itself are sealed.
2844 * But first we need to ensure it's not a null reference.
2846 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2848 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2850 vtable_reg = alloc_preg (cfg);
2851 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2852 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2853 guint32 imt_slot = mono_method_get_imt_slot (method);
2854 emit_imt_argument (cfg, call, call->method, imt_arg);
2855 slot_reg = vtable_reg;
2856 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2858 slot_reg = vtable_reg;
2859 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2860 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2862 g_assert (mono_method_signature (method)->generic_param_count);
2863 emit_imt_argument (cfg, call, call->method, imt_arg);
2867 call->inst.sreg1 = slot_reg;
2868 call->inst.inst_offset = offset;
2869 call->virtual = TRUE;
2873 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2876 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2878 return (MonoInst*)call;
2882 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2884 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2888 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2895 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2898 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2900 return (MonoInst*)call;
2904 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2906 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2910 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2914 * mono_emit_abs_call:
2916 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2918 inline static MonoInst*
2919 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2920 MonoMethodSignature *sig, MonoInst **args)
2922 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2926 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2929 if (cfg->abs_patches == NULL)
2930 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2931 g_hash_table_insert (cfg->abs_patches, ji, ji);
2932 ins = mono_emit_native_call (cfg, ji, sig, args);
2933 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2938 direct_icalls_enabled (MonoCompile *cfg)
2940 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2942 if (cfg->compile_llvm)
2945 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2951 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
2954 * Call the jit icall without a wrapper if possible.
2955 * The wrapper is needed for the following reasons:
2956 * - to handle exceptions thrown using mono_raise_exceptions () from the
2957 * icall function. The EH code needs the lmf frame pushed by the
2958 * wrapper to be able to unwind back to managed code.
2959 * - to be able to do stack walks for asynchronously suspended
2960 * threads when debugging.
2962 if (info->no_raise && direct_icalls_enabled (cfg)) {
2966 if (!info->wrapper_method) {
2967 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2968 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2970 mono_memory_barrier ();
2974 * Inline the wrapper method, which is basically a call to the C icall, and
2975 * an exception check.
2977 costs = inline_method (cfg, info->wrapper_method, NULL,
2978 args, NULL, cfg->real_offset, TRUE);
2979 g_assert (costs > 0);
2980 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2984 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2989 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2991 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2992 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2996 * Native code might return non register sized integers
2997 * without initializing the upper bits.
2999 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3000 case OP_LOADI1_MEMBASE:
3001 widen_op = OP_ICONV_TO_I1;
3003 case OP_LOADU1_MEMBASE:
3004 widen_op = OP_ICONV_TO_U1;
3006 case OP_LOADI2_MEMBASE:
3007 widen_op = OP_ICONV_TO_I2;
3009 case OP_LOADU2_MEMBASE:
3010 widen_op = OP_ICONV_TO_U2;
3016 if (widen_op != -1) {
3017 int dreg = alloc_preg (cfg);
3020 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3021 widen->type = ins->type;
3031 get_memcpy_method (void)
3033 static MonoMethod *memcpy_method = NULL;
3034 if (!memcpy_method) {
3035 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3037 g_error ("Old corlib found. Install a new one");
3039 return memcpy_method;
3043 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3045 MonoClassField *field;
3046 gpointer iter = NULL;
3048 while ((field = mono_class_get_fields (klass, &iter))) {
3051 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3053 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3054 if (mini_type_is_reference (mono_field_get_type (field))) {
3055 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3056 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3058 MonoClass *field_class = mono_class_from_mono_type (field->type);
3059 if (field_class->has_references)
3060 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3066 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3068 int card_table_shift_bits;
3069 gpointer card_table_mask;
3071 MonoInst *dummy_use;
3072 int nursery_shift_bits;
3073 size_t nursery_size;
3074 gboolean has_card_table_wb = FALSE;
3076 if (!cfg->gen_write_barriers)
3079 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3081 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3083 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3084 has_card_table_wb = TRUE;
3087 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3090 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3091 wbarrier->sreg1 = ptr->dreg;
3092 wbarrier->sreg2 = value->dreg;
3093 MONO_ADD_INS (cfg->cbb, wbarrier);
3094 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3095 int offset_reg = alloc_preg (cfg);
3096 int card_reg = alloc_preg (cfg);
3099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3100 if (card_table_mask)
3101 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3103 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3104 * IMM's larger than 32bits.
3106 if (cfg->compile_aot) {
3107 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3109 MONO_INST_NEW (cfg, ins, OP_PCONST);
3110 ins->inst_p0 = card_table;
3111 ins->dreg = card_reg;
3112 MONO_ADD_INS (cfg->cbb, ins);
3115 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3116 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3118 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3119 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3122 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3126 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3128 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3129 unsigned need_wb = 0;
3134 /*types with references can't have alignment smaller than sizeof(void*) */
3135 if (align < SIZEOF_VOID_P)
3138 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3139 if (size > 32 * SIZEOF_VOID_P)
3142 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3144 /* We don't unroll more than 5 stores to avoid code bloat. */
3145 if (size > 5 * SIZEOF_VOID_P) {
3146 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3147 size += (SIZEOF_VOID_P - 1);
3148 size &= ~(SIZEOF_VOID_P - 1);
3150 EMIT_NEW_ICONST (cfg, iargs [2], size);
3151 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3152 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3156 destreg = iargs [0]->dreg;
3157 srcreg = iargs [1]->dreg;
3160 dest_ptr_reg = alloc_preg (cfg);
3161 tmp_reg = alloc_preg (cfg);
3164 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3166 while (size >= SIZEOF_VOID_P) {
3167 MonoInst *load_inst;
3168 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3169 load_inst->dreg = tmp_reg;
3170 load_inst->inst_basereg = srcreg;
3171 load_inst->inst_offset = offset;
3172 MONO_ADD_INS (cfg->cbb, load_inst);
3174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3177 emit_write_barrier (cfg, iargs [0], load_inst);
3179 offset += SIZEOF_VOID_P;
3180 size -= SIZEOF_VOID_P;
3183 /*tmp += sizeof (void*)*/
3184 if (size >= SIZEOF_VOID_P) {
3185 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3186 MONO_ADD_INS (cfg->cbb, iargs [0]);
3190 /* Those cannot be references since size < sizeof (void*) */
3192 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3193 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3199 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3200 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3206 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3207 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3216 * Emit code to copy a valuetype of type @klass whose address is stored in
3217 * @src->dreg to memory whose address is stored at @dest->dreg.
3220 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3222 MonoInst *iargs [4];
3225 MonoMethod *memcpy_method;
3226 MonoInst *size_ins = NULL;
3227 MonoInst *memcpy_ins = NULL;
3231 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3234 * This check breaks with spilled vars... need to handle it during verification anyway.
3235 * g_assert (klass && klass == src->klass && klass == dest->klass);
3238 if (mini_is_gsharedvt_klass (klass)) {
3240 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3241 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3245 n = mono_class_native_size (klass, &align);
3247 n = mono_class_value_size (klass, &align);
3249 /* if native is true there should be no references in the struct */
3250 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3251 /* Avoid barriers when storing to the stack */
3252 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3253 (dest->opcode == OP_LDADDR))) {
3259 context_used = mini_class_check_context_used (cfg, klass);
3261 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3262 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3264 } else if (context_used) {
3265 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3267 if (cfg->compile_aot) {
3268 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3270 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3271 mono_class_compute_gc_descriptor (klass);
3276 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3278 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3283 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3284 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3285 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3290 iargs [2] = size_ins;
3292 EMIT_NEW_ICONST (cfg, iargs [2], n);
3294 memcpy_method = get_memcpy_method ();
3296 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3298 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3303 get_memset_method (void)
3305 static MonoMethod *memset_method = NULL;
3306 if (!memset_method) {
3307 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3309 g_error ("Old corlib found. Install a new one");
3311 return memset_method;
3315 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3317 MonoInst *iargs [3];
3320 MonoMethod *memset_method;
3321 MonoInst *size_ins = NULL;
3322 MonoInst *bzero_ins = NULL;
3323 static MonoMethod *bzero_method;
3325 /* FIXME: Optimize this for the case when dest is an LDADDR */
3326 mono_class_init (klass);
3327 if (mini_is_gsharedvt_klass (klass)) {
3328 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3329 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3331 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3332 g_assert (bzero_method);
3334 iargs [1] = size_ins;
3335 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3339 n = mono_class_value_size (klass, &align);
3341 if (n <= sizeof (gpointer) * 8) {
3342 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3345 memset_method = get_memset_method ();
3347 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3348 EMIT_NEW_ICONST (cfg, iargs [2], n);
3349 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3356 * Emit IR to return either the this pointer for instance method,
3357 * or the mrgctx for static methods.
3360 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3362 MonoInst *this_ins = NULL;
3364 g_assert (cfg->gshared);
3366 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3367 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3368 !method->klass->valuetype)
3369 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3371 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3372 MonoInst *mrgctx_loc, *mrgctx_var;
3374 g_assert (!this_ins);
3375 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3377 mrgctx_loc = mono_get_vtable_var (cfg);
3378 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3381 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3382 MonoInst *vtable_loc, *vtable_var;
3384 g_assert (!this_ins);
3386 vtable_loc = mono_get_vtable_var (cfg);
3387 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3389 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3390 MonoInst *mrgctx_var = vtable_var;
3393 vtable_reg = alloc_preg (cfg);
3394 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3395 vtable_var->type = STACK_PTR;
3403 vtable_reg = alloc_preg (cfg);
3404 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3409 static MonoJumpInfoRgctxEntry *
3410 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3412 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3413 res->method = method;
3414 res->in_mrgctx = in_mrgctx;
3415 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3416 res->data->type = patch_type;
3417 res->data->data.target = patch_data;
3418 res->info_type = info_type;
3426 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3429 static inline MonoInst*
3430 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3432 /* Inline version, not currently used */
3433 // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
3435 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3437 MonoBasicBlock *is_null_bb, *end_bb;
3438 MonoInst *res, *ins, *call;
3441 slot = mini_get_rgctx_entry_slot (entry);
3443 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3444 index = MONO_RGCTX_SLOT_INDEX (slot);
3446 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3447 for (depth = 0; ; ++depth) {
3448 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3450 if (index < size - 1)
3455 NEW_BBLOCK (cfg, end_bb);
3456 NEW_BBLOCK (cfg, is_null_bb);
3459 rgctx_reg = rgctx->dreg;
3461 rgctx_reg = alloc_preg (cfg);
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3464 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3465 NEW_BBLOCK (cfg, is_null_bb);
3467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3471 for (i = 0; i < depth; ++i) {
3472 int array_reg = alloc_preg (cfg);
3474 /* load ptr to next array */
3475 if (mrgctx && i == 0)
3476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3479 rgctx_reg = array_reg;
3480 /* is the ptr null? */
3481 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3482 /* if yes, jump to actual trampoline */
3483 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3487 val_reg = alloc_preg (cfg);
3488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3489 /* is the slot null? */
3490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3491 /* if yes, jump to actual trampoline */
3492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3495 res_reg = alloc_preg (cfg);
3496 MONO_INST_NEW (cfg, ins, OP_MOVE);
3497 ins->dreg = res_reg;
3498 ins->sreg1 = val_reg;
3499 MONO_ADD_INS (cfg->cbb, ins);
3501 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3504 MONO_START_BB (cfg, is_null_bb);
3506 EMIT_NEW_ICONST (cfg, args [1], index);
3508 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3510 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3511 MONO_INST_NEW (cfg, ins, OP_MOVE);
3512 ins->dreg = res_reg;
3513 ins->sreg1 = call->dreg;
3514 MONO_ADD_INS (cfg->cbb, ins);
3515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3517 MONO_START_BB (cfg, end_bb);
3521 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3526 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3527 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3529 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3530 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3532 return emit_rgctx_fetch (cfg, rgctx, entry);
3536 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3537 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3539 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3540 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3542 return emit_rgctx_fetch (cfg, rgctx, entry);
3546 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3547 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3549 MonoJumpInfoGSharedVtCall *call_info;
3550 MonoJumpInfoRgctxEntry *entry;
3553 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3554 call_info->sig = sig;
3555 call_info->method = cmethod;
3557 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3558 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3560 return emit_rgctx_fetch (cfg, rgctx, entry);
3564 * emit_get_rgctx_virt_method:
3566 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3569 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3570 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3572 MonoJumpInfoVirtMethod *info;
3573 MonoJumpInfoRgctxEntry *entry;
3576 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3577 info->klass = klass;
3578 info->method = virt_method;
3580 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3581 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3583 return emit_rgctx_fetch (cfg, rgctx, entry);
3587 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3588 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3590 MonoJumpInfoRgctxEntry *entry;
3593 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3594 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3596 return emit_rgctx_fetch (cfg, rgctx, entry);
3600 * emit_get_rgctx_method:
3602 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3603 * normal constants, else emit a load from the rgctx.
3606 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3607 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3609 if (!context_used) {
3612 switch (rgctx_type) {
3613 case MONO_RGCTX_INFO_METHOD:
3614 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3616 case MONO_RGCTX_INFO_METHOD_RGCTX:
3617 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3620 g_assert_not_reached ();
3623 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3624 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3626 return emit_rgctx_fetch (cfg, rgctx, entry);
3631 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3632 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3634 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3635 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3637 return emit_rgctx_fetch (cfg, rgctx, entry);
3641 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3643 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3644 MonoRuntimeGenericContextInfoTemplate *template;
3649 for (i = 0; i < info->num_entries; ++i) {
3650 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3652 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3656 if (info->num_entries == info->count_entries) {
3657 MonoRuntimeGenericContextInfoTemplate *new_entries;
3658 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3660 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3662 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3663 info->entries = new_entries;
3664 info->count_entries = new_count_entries;
3667 idx = info->num_entries;
3668 template = &info->entries [idx];
3669 template->info_type = rgctx_type;
3670 template->data = data;
3672 info->num_entries ++;
3678 * emit_get_gsharedvt_info:
3680 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3683 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3688 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3689 /* Load info->entries [idx] */
3690 dreg = alloc_preg (cfg);
3691 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3697 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3699 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3703 * On return the caller must check @klass for load errors.
3706 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3708 MonoInst *vtable_arg;
3710 gboolean use_op_generic_class_init = FALSE;
3712 context_used = mini_class_check_context_used (cfg, klass);
3715 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3716 klass, MONO_RGCTX_INFO_VTABLE);
3718 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3722 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3725 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3726 if (!COMPILE_LLVM (cfg))
3727 use_op_generic_class_init = TRUE;
3730 if (use_op_generic_class_init) {
3734 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3735 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3737 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3738 ins->sreg1 = vtable_arg->dreg;
3739 MONO_ADD_INS (cfg->cbb, ins);
3741 static int byte_offset = -1;
3742 static guint8 bitmask;
3743 int bits_reg, inited_reg;
3744 MonoBasicBlock *inited_bb;
3745 MonoInst *args [16];
3747 if (byte_offset < 0)
3748 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3750 bits_reg = alloc_ireg (cfg);
3751 inited_reg = alloc_ireg (cfg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3756 NEW_BBLOCK (cfg, inited_bb);
3758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3761 args [0] = vtable_arg;
3762 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3764 MONO_START_BB (cfg, inited_bb);
3769 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3773 if (cfg->gen_seq_points && cfg->method == method) {
3774 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3776 ins->flags |= MONO_INST_NONEMPTY_STACK;
3777 MONO_ADD_INS (cfg->cbb, ins);
3782 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3784 if (mini_get_debug_options ()->better_cast_details) {
3785 int vtable_reg = alloc_preg (cfg);
3786 int klass_reg = alloc_preg (cfg);
3787 MonoBasicBlock *is_null_bb = NULL;
3789 int to_klass_reg, context_used;
3792 NEW_BBLOCK (cfg, is_null_bb);
3794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3795 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3798 tls_get = mono_get_jit_tls_intrinsic (cfg);
3800 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3804 MONO_ADD_INS (cfg->cbb, tls_get);
3805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3810 context_used = mini_class_check_context_used (cfg, klass);
3812 MonoInst *class_ins;
3814 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3815 to_klass_reg = class_ins->dreg;
3817 to_klass_reg = alloc_preg (cfg);
3818 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3823 MONO_START_BB (cfg, is_null_bb);
3828 reset_cast_details (MonoCompile *cfg)
3830 /* Reset the variables holding the cast details */
3831 if (mini_get_debug_options ()->better_cast_details) {
3832 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3834 MONO_ADD_INS (cfg->cbb, tls_get);
3835 /* It is enough to reset the from field */
3836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3841 * On return the caller must check @array_class for load errors
3844 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3846 int vtable_reg = alloc_preg (cfg);
3849 context_used = mini_class_check_context_used (cfg, array_class);
3851 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3853 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3855 if (cfg->opt & MONO_OPT_SHARED) {
3856 int class_reg = alloc_preg (cfg);
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3858 if (cfg->compile_aot) {
3859 int klass_reg = alloc_preg (cfg);
3860 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3861 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3863 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3865 } else if (context_used) {
3866 MonoInst *vtable_ins;
3868 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3869 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3871 if (cfg->compile_aot) {
3875 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3877 vt_reg = alloc_preg (cfg);
3878 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3879 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3882 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3888 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3890 reset_cast_details (cfg);
3894 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3895 * generic code is generated.
3898 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3900 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3903 MonoInst *rgctx, *addr;
3905 /* FIXME: What if the class is shared? We might not
3906 have to get the address of the method from the
3908 addr = emit_get_rgctx_method (cfg, context_used, method,
3909 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3911 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3913 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3915 gboolean pass_vtable, pass_mrgctx;
3916 MonoInst *rgctx_arg = NULL;
3918 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3919 g_assert (!pass_mrgctx);
3922 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3925 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3928 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3933 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3937 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3938 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3939 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3940 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3942 obj_reg = sp [0]->dreg;
3943 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3946 /* FIXME: generics */
3947 g_assert (klass->rank == 0);
3950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3951 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3957 MonoInst *element_class;
3959 /* This assertion is from the unboxcast insn */
3960 g_assert (klass->rank == 0);
3962 element_class = emit_get_rgctx_klass (cfg, context_used,
3963 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3965 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3966 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3968 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3969 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3970 reset_cast_details (cfg);
3973 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3974 MONO_ADD_INS (cfg->cbb, add);
3975 add->type = STACK_MP;
3982 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3984 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3985 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3989 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3995 args [1] = klass_inst;
3998 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4000 NEW_BBLOCK (cfg, is_ref_bb);
4001 NEW_BBLOCK (cfg, is_nullable_bb);
4002 NEW_BBLOCK (cfg, end_bb);
4003 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4010 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4011 addr_reg = alloc_dreg (cfg, STACK_MP);
4015 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4016 MONO_ADD_INS (cfg->cbb, addr);
4018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4021 MONO_START_BB (cfg, is_ref_bb);
4023 /* Save the ref to a temporary */
4024 dreg = alloc_ireg (cfg);
4025 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4026 addr->dreg = addr_reg;
4027 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4028 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4031 MONO_START_BB (cfg, is_nullable_bb);
4034 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4035 MonoInst *unbox_call;
4036 MonoMethodSignature *unbox_sig;
4038 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4039 unbox_sig->ret = &klass->byval_arg;
4040 unbox_sig->param_count = 1;
4041 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4042 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4044 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4045 addr->dreg = addr_reg;
4048 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4051 MONO_START_BB (cfg, end_bb);
4054 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4060 * Returns NULL and set the cfg exception on error.
4063 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4065 MonoInst *iargs [2];
4071 MonoInst *iargs [2];
4072 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4074 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4076 if (cfg->opt & MONO_OPT_SHARED)
4077 rgctx_info = MONO_RGCTX_INFO_KLASS;
4079 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4080 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4082 if (cfg->opt & MONO_OPT_SHARED) {
4083 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4085 alloc_ftn = mono_object_new;
4088 alloc_ftn = mono_object_new_specific;
4091 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4092 if (known_instance_size) {
4093 int size = mono_class_instance_size (klass);
4094 if (size < sizeof (MonoObject))
4095 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4097 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4099 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4102 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4105 if (cfg->opt & MONO_OPT_SHARED) {
4106 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4107 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4109 alloc_ftn = mono_object_new;
4110 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4111 /* This happens often in argument checking code, eg. throw new FooException... */
4112 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4113 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4114 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4116 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4117 MonoMethod *managed_alloc = NULL;
4121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4122 cfg->exception_ptr = klass;
4126 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4128 if (managed_alloc) {
4129 int size = mono_class_instance_size (klass);
4130 if (size < sizeof (MonoObject))
4131 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4133 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4134 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4135 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4137 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4139 guint32 lw = vtable->klass->instance_size;
4140 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4141 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4142 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4145 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4149 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4153 * Returns NULL and set the cfg exception on error.
4156 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4158 MonoInst *alloc, *ins;
4160 if (mono_class_is_nullable (klass)) {
4161 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4164 /* FIXME: What if the class is shared? We might not
4165 have to get the method address from the RGCTX. */
4166 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4167 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4168 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4170 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4172 gboolean pass_vtable, pass_mrgctx;
4173 MonoInst *rgctx_arg = NULL;
4175 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4176 g_assert (!pass_mrgctx);
4179 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4182 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4185 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4189 if (mini_is_gsharedvt_klass (klass)) {
4190 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4191 MonoInst *res, *is_ref, *src_var, *addr;
4194 dreg = alloc_ireg (cfg);
4196 NEW_BBLOCK (cfg, is_ref_bb);
4197 NEW_BBLOCK (cfg, is_nullable_bb);
4198 NEW_BBLOCK (cfg, end_bb);
4199 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4201 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4204 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4207 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4210 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4211 ins->opcode = OP_STOREV_MEMBASE;
4213 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4214 res->type = STACK_OBJ;
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4219 MONO_START_BB (cfg, is_ref_bb);
4221 /* val is a vtype, so has to load the value manually */
4222 src_var = get_vreg_to_inst (cfg, val->dreg);
4224 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4225 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4230 MONO_START_BB (cfg, is_nullable_bb);
4233 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4234 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4236 MonoMethodSignature *box_sig;
4239 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4240 * construct that method at JIT time, so have to do things by hand.
4242 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4243 box_sig->ret = &mono_defaults.object_class->byval_arg;
4244 box_sig->param_count = 1;
4245 box_sig->params [0] = &klass->byval_arg;
4246 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4247 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4248 res->type = STACK_OBJ;
4252 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4254 MONO_START_BB (cfg, end_bb);
4258 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4268 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4271 MonoGenericContainer *container;
4272 MonoGenericInst *ginst;
4274 if (klass->generic_class) {
4275 container = klass->generic_class->container_class->generic_container;
4276 ginst = klass->generic_class->context.class_inst;
4277 } else if (klass->generic_container && context_used) {
4278 container = klass->generic_container;
4279 ginst = container->context.class_inst;
4284 for (i = 0; i < container->type_argc; ++i) {
4286 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4288 type = ginst->type_argv [i];
4289 if (mini_type_is_reference (type))
4295 static GHashTable* direct_icall_type_hash;
4298 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4300 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4301 if (!direct_icalls_enabled (cfg))
4305 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4306 * Whitelist a few icalls for now.
4308 if (!direct_icall_type_hash) {
4309 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4311 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4312 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4313 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4314 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4315 mono_memory_barrier ();
4316 direct_icall_type_hash = h;
4319 if (cmethod->klass == mono_defaults.math_class)
4321 /* No locking needed */
4322 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4327 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4330 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4332 MonoMethod *mono_castclass;
4335 mono_castclass = mono_marshal_get_castclass_with_cache ();
4337 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4338 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4339 reset_cast_details (cfg);
4345 get_castclass_cache_idx (MonoCompile *cfg)
4347 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4348 cfg->castclass_cache_index ++;
4349 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4353 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4362 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4365 if (cfg->compile_aot) {
4366 idx = get_castclass_cache_idx (cfg);
4367 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4369 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4372 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4373 return emit_castclass_with_cache (cfg, klass, args);
4377 * Returns NULL and set the cfg exception on error.
4380 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4382 MonoBasicBlock *is_null_bb;
4383 int obj_reg = src->dreg;
4384 int vtable_reg = alloc_preg (cfg);
4386 MonoInst *klass_inst = NULL, *res;
4388 context_used = mini_class_check_context_used (cfg, klass);
4390 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4391 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4392 (*inline_costs) += 2;
4394 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4395 MonoMethod *mono_castclass;
4396 MonoInst *iargs [1];
4399 mono_castclass = mono_marshal_get_castclass (klass);
4402 save_cast_details (cfg, klass, src->dreg, TRUE);
4403 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4404 iargs, ip, cfg->real_offset, TRUE);
4405 reset_cast_details (cfg);
4406 CHECK_CFG_EXCEPTION;
4407 g_assert (costs > 0);
4409 cfg->real_offset += 5;
4411 (*inline_costs) += costs;
4419 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4420 MonoInst *cache_ins;
4422 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4427 /* klass - it's the second element of the cache entry*/
4428 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4431 args [2] = cache_ins;
4433 return emit_castclass_with_cache (cfg, klass, args);
4436 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4439 NEW_BBLOCK (cfg, is_null_bb);
4441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4444 save_cast_details (cfg, klass, obj_reg, FALSE);
4446 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4448 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4450 int klass_reg = alloc_preg (cfg);
4452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4454 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4455 /* the remoting code is broken, access the class for now */
4456 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4457 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4459 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4460 cfg->exception_ptr = klass;
4463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4468 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4471 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4475 MONO_START_BB (cfg, is_null_bb);
4477 reset_cast_details (cfg);
4486 * Returns NULL and set the cfg exception on error.
4489 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4492 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4493 int obj_reg = src->dreg;
4494 int vtable_reg = alloc_preg (cfg);
4495 int res_reg = alloc_ireg_ref (cfg);
4496 MonoInst *klass_inst = NULL;
4501 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4502 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4503 MonoInst *cache_ins;
4505 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4510 /* klass - it's the second element of the cache entry*/
4511 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4514 args [2] = cache_ins;
4516 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4519 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4522 NEW_BBLOCK (cfg, is_null_bb);
4523 NEW_BBLOCK (cfg, false_bb);
4524 NEW_BBLOCK (cfg, end_bb);
4526 /* Do the assignment at the beginning, so the other assignment can be if converted */
4527 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4528 ins->type = STACK_OBJ;
4531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4536 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4537 g_assert (!context_used);
4538 /* the is_null_bb target simply copies the input register to the output */
4539 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4541 int klass_reg = alloc_preg (cfg);
4544 int rank_reg = alloc_preg (cfg);
4545 int eclass_reg = alloc_preg (cfg);
4547 g_assert (!context_used);
4548 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4553 if (klass->cast_class == mono_defaults.object_class) {
4554 int parent_reg = alloc_preg (cfg);
4555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4556 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4557 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4558 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4559 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4560 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4561 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4563 } else if (klass->cast_class == mono_defaults.enum_class) {
4564 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4569 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4570 /* Check that the object is a vector too */
4571 int bounds_reg = alloc_preg (cfg);
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4577 /* the is_null_bb target simply copies the input register to the output */
4578 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4580 } else if (mono_class_is_nullable (klass)) {
4581 g_assert (!context_used);
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4583 /* the is_null_bb target simply copies the input register to the output */
4584 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4586 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4587 g_assert (!context_used);
4588 /* the remoting code is broken, access the class for now */
4589 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4590 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4592 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4593 cfg->exception_ptr = klass;
4596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4605 /* the is_null_bb target simply copies the input register to the output */
4606 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4611 MONO_START_BB (cfg, false_bb);
4613 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4616 MONO_START_BB (cfg, is_null_bb);
4618 MONO_START_BB (cfg, end_bb);
4624 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4626 /* This opcode takes as input an object reference and a class, and returns:
4627 0) if the object is an instance of the class,
4628 1) if the object is not instance of the class,
4629 2) if the object is a proxy whose type cannot be determined */
4632 #ifndef DISABLE_REMOTING
4633 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4635 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4637 int obj_reg = src->dreg;
4638 int dreg = alloc_ireg (cfg);
4640 #ifndef DISABLE_REMOTING
4641 int klass_reg = alloc_preg (cfg);
4644 NEW_BBLOCK (cfg, true_bb);
4645 NEW_BBLOCK (cfg, false_bb);
4646 NEW_BBLOCK (cfg, end_bb);
4647 #ifndef DISABLE_REMOTING
4648 NEW_BBLOCK (cfg, false2_bb);
4649 NEW_BBLOCK (cfg, no_proxy_bb);
4652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4655 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4656 #ifndef DISABLE_REMOTING
4657 NEW_BBLOCK (cfg, interface_fail_bb);
4660 tmp_reg = alloc_preg (cfg);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4662 #ifndef DISABLE_REMOTING
4663 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4664 MONO_START_BB (cfg, interface_fail_bb);
4665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4667 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4669 tmp_reg = alloc_preg (cfg);
4670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4674 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4677 #ifndef DISABLE_REMOTING
4678 tmp_reg = alloc_preg (cfg);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4682 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4683 tmp_reg = alloc_preg (cfg);
4684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4687 tmp_reg = alloc_preg (cfg);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4692 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4695 MONO_START_BB (cfg, no_proxy_bb);
4697 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4699 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4703 MONO_START_BB (cfg, false_bb);
4705 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4708 #ifndef DISABLE_REMOTING
4709 MONO_START_BB (cfg, false2_bb);
4711 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4715 MONO_START_BB (cfg, true_bb);
4717 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4719 MONO_START_BB (cfg, end_bb);
4722 MONO_INST_NEW (cfg, ins, OP_ICONST);
4724 ins->type = STACK_I4;
4730 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4732 /* This opcode takes as input an object reference and a class, and returns:
4733 0) if the object is an instance of the class,
4734 1) if the object is a proxy whose type cannot be determined
4735 an InvalidCastException exception is thrown otherwhise*/
4738 #ifndef DISABLE_REMOTING
4739 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4741 MonoBasicBlock *ok_result_bb;
4743 int obj_reg = src->dreg;
4744 int dreg = alloc_ireg (cfg);
4745 int tmp_reg = alloc_preg (cfg);
4747 #ifndef DISABLE_REMOTING
4748 int klass_reg = alloc_preg (cfg);
4749 NEW_BBLOCK (cfg, end_bb);
4752 NEW_BBLOCK (cfg, ok_result_bb);
4754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4757 save_cast_details (cfg, klass, obj_reg, FALSE);
4759 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4760 #ifndef DISABLE_REMOTING
4761 NEW_BBLOCK (cfg, interface_fail_bb);
4763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4764 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4765 MONO_START_BB (cfg, interface_fail_bb);
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4768 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4770 tmp_reg = alloc_preg (cfg);
4771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4773 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4775 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4776 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4779 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, no_proxy_bb);
4786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4788 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4790 tmp_reg = alloc_preg (cfg);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4794 tmp_reg = alloc_preg (cfg);
4795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4799 NEW_BBLOCK (cfg, fail_1_bb);
4801 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4803 MONO_START_BB (cfg, fail_1_bb);
4805 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4806 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4808 MONO_START_BB (cfg, no_proxy_bb);
4810 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4812 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4816 MONO_START_BB (cfg, ok_result_bb);
4818 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4820 #ifndef DISABLE_REMOTING
4821 MONO_START_BB (cfg, end_bb);
4825 MONO_INST_NEW (cfg, ins, OP_ICONST);
4827 ins->type = STACK_I4;
4832 static G_GNUC_UNUSED MonoInst*
4833 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4835 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4836 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4839 switch (enum_type->type) {
4842 #if SIZEOF_REGISTER == 8
4854 MonoInst *load, *and, *cmp, *ceq;
4855 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4856 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4857 int dest_reg = alloc_ireg (cfg);
4859 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4860 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4861 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4862 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4864 ceq->type = STACK_I4;
4867 load = mono_decompose_opcode (cfg, load);
4868 and = mono_decompose_opcode (cfg, and);
4869 cmp = mono_decompose_opcode (cfg, cmp);
4870 ceq = mono_decompose_opcode (cfg, ceq);
4878 * Returns NULL and set the cfg exception on error.
4880 static G_GNUC_UNUSED MonoInst*
4881 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4885 gpointer trampoline;
4886 MonoInst *obj, *method_ins, *tramp_ins;
4891 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4894 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4898 obj = handle_alloc (cfg, klass, FALSE, 0);
4902 /* Inline the contents of mono_delegate_ctor */
4904 /* Set target field */
4905 /* Optimize away setting of NULL target */
4906 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4907 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4908 if (cfg->gen_write_barriers) {
4909 dreg = alloc_preg (cfg);
4910 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4911 emit_write_barrier (cfg, ptr, target);
4915 /* Set method field */
4916 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4920 * To avoid looking up the compiled code belonging to the target method
4921 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4922 * store it, and we fill it after the method has been compiled.
4924 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4925 MonoInst *code_slot_ins;
4928 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4930 domain = mono_domain_get ();
4931 mono_domain_lock (domain);
4932 if (!domain_jit_info (domain)->method_code_hash)
4933 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4934 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4936 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4937 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4939 mono_domain_unlock (domain);
4941 if (cfg->compile_aot)
4942 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4944 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4946 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4949 if (cfg->compile_aot) {
4950 MonoDelegateClassMethodPair *del_tramp;
4952 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4953 del_tramp->klass = klass;
4954 del_tramp->method = context_used ? NULL : method;
4955 del_tramp->virtual = virtual;
4956 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4959 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4961 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4962 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4965 /* Set invoke_impl field */
4967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4969 dreg = alloc_preg (cfg);
4970 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4971 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4973 dreg = alloc_preg (cfg);
4974 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4978 dreg = alloc_preg (cfg);
4979 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
4980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4982 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4988 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4990 MonoJitICallInfo *info;
4992 /* Need to register the icall so it gets an icall wrapper */
4993 info = mono_get_array_new_va_icall (rank);
4995 cfg->flags |= MONO_CFG_HAS_VARARGS;
4997 /* mono_array_new_va () needs a vararg calling convention */
4998 cfg->disable_llvm = TRUE;
5000 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5001 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5005 * handle_constrained_gsharedvt_call:
5007 * Handle constrained calls where the receiver is a gsharedvt type.
5008 * Return the instruction representing the call. Set the cfg exception on failure.
5011 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5012 gboolean *ref_emit_widen)
5014 MonoInst *ins = NULL;
5015 gboolean emit_widen = *ref_emit_widen;
5018 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5019 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5020 * pack the arguments into an array, and do the rest of the work in in an icall.
5022 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5023 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5024 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5025 MonoInst *args [16];
5028 * This case handles calls to
5029 * - object:ToString()/Equals()/GetHashCode(),
5030 * - System.IComparable<T>:CompareTo()
5031 * - System.IEquatable<T>:Equals ()
5032 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5036 if (mono_method_check_context_used (cmethod))
5037 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5039 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5040 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5042 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5043 if (fsig->hasthis && fsig->param_count) {
5044 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5045 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5046 ins->dreg = alloc_preg (cfg);
5047 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5048 MONO_ADD_INS (cfg->cbb, ins);
5051 if (mini_is_gsharedvt_type (fsig->params [0])) {
5054 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5056 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5057 addr_reg = ins->dreg;
5058 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5060 EMIT_NEW_ICONST (cfg, args [3], 0);
5061 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5064 EMIT_NEW_ICONST (cfg, args [3], 0);
5065 EMIT_NEW_ICONST (cfg, args [4], 0);
5067 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5070 if (mini_is_gsharedvt_type (fsig->ret)) {
5071 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5072 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5076 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5077 MONO_ADD_INS (cfg->cbb, add);
5079 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5080 MONO_ADD_INS (cfg->cbb, ins);
5081 /* ins represents the call result */
5084 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5087 *ref_emit_widen = emit_widen;
5096 mono_emit_load_got_addr (MonoCompile *cfg)
5098 MonoInst *getaddr, *dummy_use;
5100 if (!cfg->got_var || cfg->got_var_allocated)
5103 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5104 getaddr->cil_code = cfg->header->code;
5105 getaddr->dreg = cfg->got_var->dreg;
5107 /* Add it to the start of the first bblock */
5108 if (cfg->bb_entry->code) {
5109 getaddr->next = cfg->bb_entry->code;
5110 cfg->bb_entry->code = getaddr;
5113 MONO_ADD_INS (cfg->bb_entry, getaddr);
5115 cfg->got_var_allocated = TRUE;
5118 * Add a dummy use to keep the got_var alive, since real uses might
5119 * only be generated by the back ends.
5120 * Add it to end_bblock, so the variable's lifetime covers the whole
5122 * It would be better to make the usage of the got var explicit in all
5123 * cases when the backend needs it (i.e. calls, throw etc.), so this
5124 * wouldn't be needed.
5126 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5127 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5130 static int inline_limit;
5131 static gboolean inline_limit_inited;
5134 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5136 MonoMethodHeaderSummary header;
5138 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5139 MonoMethodSignature *sig = mono_method_signature (method);
5143 if (cfg->disable_inline)
5148 if (cfg->inline_depth > 10)
5151 if (!mono_method_get_header_summary (method, &header))
5154 /*runtime, icall and pinvoke are checked by summary call*/
5155 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5156 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5157 (mono_class_is_marshalbyref (method->klass)) ||
5161 /* also consider num_locals? */
5162 /* Do the size check early to avoid creating vtables */
5163 if (!inline_limit_inited) {
5164 if (g_getenv ("MONO_INLINELIMIT"))
5165 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5167 inline_limit = INLINE_LENGTH_LIMIT;
5168 inline_limit_inited = TRUE;
5170 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5174 * if we can initialize the class of the method right away, we do,
5175 * otherwise we don't allow inlining if the class needs initialization,
5176 * since it would mean inserting a call to mono_runtime_class_init()
5177 * inside the inlined code
5179 if (!(cfg->opt & MONO_OPT_SHARED)) {
5180 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5181 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5182 vtable = mono_class_vtable (cfg->domain, method->klass);
5185 if (!cfg->compile_aot)
5186 mono_runtime_class_init (vtable);
5187 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5188 if (cfg->run_cctors && method->klass->has_cctor) {
5189 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5190 if (!method->klass->runtime_info)
5191 /* No vtable created yet */
5193 vtable = mono_class_vtable (cfg->domain, method->klass);
5196 /* This makes so that inline cannot trigger */
5197 /* .cctors: too many apps depend on them */
5198 /* running with a specific order... */
5199 if (! vtable->initialized)
5201 mono_runtime_class_init (vtable);
5203 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5204 if (!method->klass->runtime_info)
5205 /* No vtable created yet */
5207 vtable = mono_class_vtable (cfg->domain, method->klass);
5210 if (!vtable->initialized)
5215 * If we're compiling for shared code
5216 * the cctor will need to be run at aot method load time, for example,
5217 * or at the end of the compilation of the inlining method.
5219 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5223 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5224 if (mono_arch_is_soft_float ()) {
5226 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5228 for (i = 0; i < sig->param_count; ++i)
5229 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5234 if (g_list_find (cfg->dont_inline, method))
5241 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5243 if (!cfg->compile_aot) {
5245 if (vtable->initialized)
5249 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5250 if (cfg->method == method)
5254 if (!mono_class_needs_cctor_run (klass, method))
5257 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5258 /* The initialization is already done before the method is called */
5265 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5269 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5272 if (mini_is_gsharedvt_variable_klass (klass)) {
5275 mono_class_init (klass);
5276 size = mono_class_array_element_size (klass);
5279 mult_reg = alloc_preg (cfg);
5280 array_reg = arr->dreg;
5281 index_reg = index->dreg;
5283 #if SIZEOF_REGISTER == 8
5284 /* The array reg is 64 bits but the index reg is only 32 */
5285 if (COMPILE_LLVM (cfg)) {
5287 index2_reg = index_reg;
5289 index2_reg = alloc_preg (cfg);
5290 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5293 if (index->type == STACK_I8) {
5294 index2_reg = alloc_preg (cfg);
5295 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5297 index2_reg = index_reg;
5302 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5304 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5305 if (size == 1 || size == 2 || size == 4 || size == 8) {
5306 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5308 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5309 ins->klass = mono_class_get_element_class (klass);
5310 ins->type = STACK_MP;
5316 add_reg = alloc_ireg_mp (cfg);
5319 MonoInst *rgctx_ins;
5322 g_assert (cfg->gshared);
5323 context_used = mini_class_check_context_used (cfg, klass);
5324 g_assert (context_used);
5325 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5326 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5330 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5331 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5332 ins->klass = mono_class_get_element_class (klass);
5333 ins->type = STACK_MP;
5334 MONO_ADD_INS (cfg->cbb, ins);
5339 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5341 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5343 int bounds_reg = alloc_preg (cfg);
5344 int add_reg = alloc_ireg_mp (cfg);
5345 int mult_reg = alloc_preg (cfg);
5346 int mult2_reg = alloc_preg (cfg);
5347 int low1_reg = alloc_preg (cfg);
5348 int low2_reg = alloc_preg (cfg);
5349 int high1_reg = alloc_preg (cfg);
5350 int high2_reg = alloc_preg (cfg);
5351 int realidx1_reg = alloc_preg (cfg);
5352 int realidx2_reg = alloc_preg (cfg);
5353 int sum_reg = alloc_preg (cfg);
5354 int index1, index2, tmpreg;
5358 mono_class_init (klass);
5359 size = mono_class_array_element_size (klass);
5361 index1 = index_ins1->dreg;
5362 index2 = index_ins2->dreg;
5364 #if SIZEOF_REGISTER == 8
5365 /* The array reg is 64 bits but the index reg is only 32 */
5366 if (COMPILE_LLVM (cfg)) {
5369 tmpreg = alloc_preg (cfg);
5370 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5372 tmpreg = alloc_preg (cfg);
5373 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5377 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5381 /* range checking */
5382 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5383 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5385 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5386 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5387 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5388 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5389 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5390 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5391 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5393 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5394 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5395 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5396 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5397 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5398 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5399 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5401 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5402 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5405 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5407 ins->type = STACK_MP;
5409 MONO_ADD_INS (cfg->cbb, ins);
5416 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5420 MonoMethod *addr_method;
5422 MonoClass *eclass = cmethod->klass->element_class;
5424 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5427 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5429 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5430 /* emit_ldelema_2 depends on OP_LMUL */
5431 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5432 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5436 if (mini_is_gsharedvt_variable_klass (eclass))
5439 element_size = mono_class_array_element_size (eclass);
5440 addr_method = mono_marshal_get_array_address (rank, element_size);
5441 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5446 static MonoBreakPolicy
5447 always_insert_breakpoint (MonoMethod *method)
5449 return MONO_BREAK_POLICY_ALWAYS;
5452 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5455 * mono_set_break_policy:
5456 * policy_callback: the new callback function
5458 * Allow embedders to decide wherther to actually obey breakpoint instructions
5459 * (both break IL instructions and Debugger.Break () method calls), for example
5460 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5461 * untrusted or semi-trusted code.
5463 * @policy_callback will be called every time a break point instruction needs to
5464 * be inserted with the method argument being the method that calls Debugger.Break()
5465 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5466 * if it wants the breakpoint to not be effective in the given method.
5467 * #MONO_BREAK_POLICY_ALWAYS is the default.
5470 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5472 if (policy_callback)
5473 break_policy_func = policy_callback;
5475 break_policy_func = always_insert_breakpoint;
5479 should_insert_brekpoint (MonoMethod *method) {
5480 switch (break_policy_func (method)) {
5481 case MONO_BREAK_POLICY_ALWAYS:
5483 case MONO_BREAK_POLICY_NEVER:
5485 case MONO_BREAK_POLICY_ON_DBG:
5486 g_warning ("mdb no longer supported");
5489 g_warning ("Incorrect value returned from break policy callback");
5494 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5496 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5498 MonoInst *addr, *store, *load;
5499 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5501 /* the bounds check is already done by the callers */
5502 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5504 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5505 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5506 if (mini_type_is_reference (fsig->params [2]))
5507 emit_write_barrier (cfg, addr, load);
5509 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5510 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5517 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5519 return mini_type_is_reference (&klass->byval_arg);
5523 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5525 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5526 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5527 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5528 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5529 MonoInst *iargs [3];
5532 mono_class_setup_vtable (obj_array);
5533 g_assert (helper->slot);
5535 if (sp [0]->type != STACK_OBJ)
5537 if (sp [2]->type != STACK_OBJ)
5544 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5548 if (mini_is_gsharedvt_variable_klass (klass)) {
5551 // FIXME-VT: OP_ICONST optimization
5552 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5553 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5554 ins->opcode = OP_STOREV_MEMBASE;
5555 } else if (sp [1]->opcode == OP_ICONST) {
5556 int array_reg = sp [0]->dreg;
5557 int index_reg = sp [1]->dreg;
5558 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5561 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5562 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5564 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5565 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5566 if (generic_class_is_reference_type (cfg, klass))
5567 emit_write_barrier (cfg, addr, sp [2]);
5574 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5579 eklass = mono_class_from_mono_type (fsig->params [2]);
5581 eklass = mono_class_from_mono_type (fsig->ret);
5584 return emit_array_store (cfg, eklass, args, FALSE);
5586 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5587 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5593 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5597 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5599 //Only allow for valuetypes
5600 if (!param_klass->valuetype || !return_klass->valuetype)
5604 if (param_klass->has_references || return_klass->has_references)
5607 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5608 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5609 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5612 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5613 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5616 //And have the same size
5617 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5623 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5625 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5626 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5628 //Valuetypes that are semantically equivalent
5629 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5632 //Arrays of valuetypes that are semantically equivalent
5633 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5640 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5642 #ifdef MONO_ARCH_SIMD_INTRINSICS
5643 MonoInst *ins = NULL;
5645 if (cfg->opt & MONO_OPT_SIMD) {
5646 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5652 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5656 emit_memory_barrier (MonoCompile *cfg, int kind)
5658 MonoInst *ins = NULL;
5659 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5660 MONO_ADD_INS (cfg->cbb, ins);
5661 ins->backend.memory_barrier_kind = kind;
5667 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5669 MonoInst *ins = NULL;
5672 /* The LLVM backend supports these intrinsics */
5673 if (cmethod->klass == mono_defaults.math_class) {
5674 if (strcmp (cmethod->name, "Sin") == 0) {
5676 } else if (strcmp (cmethod->name, "Cos") == 0) {
5678 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5680 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5684 if (opcode && fsig->param_count == 1) {
5685 MONO_INST_NEW (cfg, ins, opcode);
5686 ins->type = STACK_R8;
5687 ins->dreg = mono_alloc_freg (cfg);
5688 ins->sreg1 = args [0]->dreg;
5689 MONO_ADD_INS (cfg->cbb, ins);
5693 if (cfg->opt & MONO_OPT_CMOV) {
5694 if (strcmp (cmethod->name, "Min") == 0) {
5695 if (fsig->params [0]->type == MONO_TYPE_I4)
5697 if (fsig->params [0]->type == MONO_TYPE_U4)
5698 opcode = OP_IMIN_UN;
5699 else if (fsig->params [0]->type == MONO_TYPE_I8)
5701 else if (fsig->params [0]->type == MONO_TYPE_U8)
5702 opcode = OP_LMIN_UN;
5703 } else if (strcmp (cmethod->name, "Max") == 0) {
5704 if (fsig->params [0]->type == MONO_TYPE_I4)
5706 if (fsig->params [0]->type == MONO_TYPE_U4)
5707 opcode = OP_IMAX_UN;
5708 else if (fsig->params [0]->type == MONO_TYPE_I8)
5710 else if (fsig->params [0]->type == MONO_TYPE_U8)
5711 opcode = OP_LMAX_UN;
5715 if (opcode && fsig->param_count == 2) {
5716 MONO_INST_NEW (cfg, ins, opcode);
5717 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5718 ins->dreg = mono_alloc_ireg (cfg);
5719 ins->sreg1 = args [0]->dreg;
5720 ins->sreg2 = args [1]->dreg;
5721 MONO_ADD_INS (cfg->cbb, ins);
5729 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5731 if (cmethod->klass == mono_defaults.array_class) {
5732 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5733 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5734 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5735 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5736 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5737 return emit_array_unsafe_mov (cfg, fsig, args);
5744 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5746 MonoInst *ins = NULL;
5748 static MonoClass *runtime_helpers_class = NULL;
5749 if (! runtime_helpers_class)
5750 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5751 "System.Runtime.CompilerServices", "RuntimeHelpers");
5753 if (cmethod->klass == mono_defaults.string_class) {
5754 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5755 int dreg = alloc_ireg (cfg);
5756 int index_reg = alloc_preg (cfg);
5757 int add_reg = alloc_preg (cfg);
5759 #if SIZEOF_REGISTER == 8
5760 /* The array reg is 64 bits but the index reg is only 32 */
5761 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5763 index_reg = args [1]->dreg;
5765 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5767 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5768 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5769 add_reg = ins->dreg;
5770 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5773 int mult_reg = alloc_preg (cfg);
5774 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5775 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5776 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5777 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5779 type_from_op (cfg, ins, NULL, NULL);
5781 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5782 int dreg = alloc_ireg (cfg);
5783 /* Decompose later to allow more optimizations */
5784 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5785 ins->type = STACK_I4;
5786 ins->flags |= MONO_INST_FAULT;
5787 cfg->cbb->has_array_access = TRUE;
5788 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5793 } else if (cmethod->klass == mono_defaults.object_class) {
5795 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5796 int dreg = alloc_ireg_ref (cfg);
5797 int vt_reg = alloc_preg (cfg);
5798 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5799 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5800 type_from_op (cfg, ins, NULL, NULL);
5803 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5804 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5805 int dreg = alloc_ireg (cfg);
5806 int t1 = alloc_ireg (cfg);
5808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5809 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5810 ins->type = STACK_I4;
5814 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5815 MONO_INST_NEW (cfg, ins, OP_NOP);
5816 MONO_ADD_INS (cfg->cbb, ins);
5820 } else if (cmethod->klass == mono_defaults.array_class) {
5821 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5822 return emit_array_generic_access (cfg, fsig, args, FALSE);
5823 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5824 return emit_array_generic_access (cfg, fsig, args, TRUE);
5826 #ifndef MONO_BIG_ARRAYS
5828 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5831 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5832 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5833 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5834 int dreg = alloc_ireg (cfg);
5835 int bounds_reg = alloc_ireg_mp (cfg);
5836 MonoBasicBlock *end_bb, *szarray_bb;
5837 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5839 NEW_BBLOCK (cfg, end_bb);
5840 NEW_BBLOCK (cfg, szarray_bb);
5842 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5843 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5845 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5846 /* Non-szarray case */
5848 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5849 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5851 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5852 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5853 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5854 MONO_START_BB (cfg, szarray_bb);
5857 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5858 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5860 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5861 MONO_START_BB (cfg, end_bb);
5863 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5864 ins->type = STACK_I4;
5870 if (cmethod->name [0] != 'g')
5873 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5874 int dreg = alloc_ireg (cfg);
5875 int vtable_reg = alloc_preg (cfg);
5876 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5877 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5878 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5879 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5880 type_from_op (cfg, ins, NULL, NULL);
5883 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5884 int dreg = alloc_ireg (cfg);
5886 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5887 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5888 type_from_op (cfg, ins, NULL, NULL);
5893 } else if (cmethod->klass == runtime_helpers_class) {
5895 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5896 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5900 } else if (cmethod->klass == mono_defaults.thread_class) {
5901 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5902 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5903 MONO_ADD_INS (cfg->cbb, ins);
5905 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5906 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5907 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5909 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5911 if (fsig->params [0]->type == MONO_TYPE_I1)
5912 opcode = OP_LOADI1_MEMBASE;
5913 else if (fsig->params [0]->type == MONO_TYPE_U1)
5914 opcode = OP_LOADU1_MEMBASE;
5915 else if (fsig->params [0]->type == MONO_TYPE_I2)
5916 opcode = OP_LOADI2_MEMBASE;
5917 else if (fsig->params [0]->type == MONO_TYPE_U2)
5918 opcode = OP_LOADU2_MEMBASE;
5919 else if (fsig->params [0]->type == MONO_TYPE_I4)
5920 opcode = OP_LOADI4_MEMBASE;
5921 else if (fsig->params [0]->type == MONO_TYPE_U4)
5922 opcode = OP_LOADU4_MEMBASE;
5923 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5924 opcode = OP_LOADI8_MEMBASE;
5925 else if (fsig->params [0]->type == MONO_TYPE_R4)
5926 opcode = OP_LOADR4_MEMBASE;
5927 else if (fsig->params [0]->type == MONO_TYPE_R8)
5928 opcode = OP_LOADR8_MEMBASE;
5929 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5930 opcode = OP_LOAD_MEMBASE;
5933 MONO_INST_NEW (cfg, ins, opcode);
5934 ins->inst_basereg = args [0]->dreg;
5935 ins->inst_offset = 0;
5936 MONO_ADD_INS (cfg->cbb, ins);
5938 switch (fsig->params [0]->type) {
5945 ins->dreg = mono_alloc_ireg (cfg);
5946 ins->type = STACK_I4;
5950 ins->dreg = mono_alloc_lreg (cfg);
5951 ins->type = STACK_I8;
5955 ins->dreg = mono_alloc_ireg (cfg);
5956 #if SIZEOF_REGISTER == 8
5957 ins->type = STACK_I8;
5959 ins->type = STACK_I4;
5964 ins->dreg = mono_alloc_freg (cfg);
5965 ins->type = STACK_R8;
5968 g_assert (mini_type_is_reference (fsig->params [0]));
5969 ins->dreg = mono_alloc_ireg_ref (cfg);
5970 ins->type = STACK_OBJ;
5974 if (opcode == OP_LOADI8_MEMBASE)
5975 ins = mono_decompose_opcode (cfg, ins);
5977 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5981 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5983 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5985 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5986 opcode = OP_STOREI1_MEMBASE_REG;
5987 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5988 opcode = OP_STOREI2_MEMBASE_REG;
5989 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5990 opcode = OP_STOREI4_MEMBASE_REG;
5991 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5992 opcode = OP_STOREI8_MEMBASE_REG;
5993 else if (fsig->params [0]->type == MONO_TYPE_R4)
5994 opcode = OP_STORER4_MEMBASE_REG;
5995 else if (fsig->params [0]->type == MONO_TYPE_R8)
5996 opcode = OP_STORER8_MEMBASE_REG;
5997 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5998 opcode = OP_STORE_MEMBASE_REG;
6001 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6003 MONO_INST_NEW (cfg, ins, opcode);
6004 ins->sreg1 = args [1]->dreg;
6005 ins->inst_destbasereg = args [0]->dreg;
6006 ins->inst_offset = 0;
6007 MONO_ADD_INS (cfg->cbb, ins);
6009 if (opcode == OP_STOREI8_MEMBASE_REG)
6010 ins = mono_decompose_opcode (cfg, ins);
6015 } else if (cmethod->klass->image == mono_defaults.corlib &&
6016 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6017 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6020 #if SIZEOF_REGISTER == 8
6021 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6022 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6023 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6024 ins->dreg = mono_alloc_preg (cfg);
6025 ins->sreg1 = args [0]->dreg;
6026 ins->type = STACK_I8;
6027 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6028 MONO_ADD_INS (cfg->cbb, ins);
6032 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6034 /* 64 bit reads are already atomic */
6035 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6036 load_ins->dreg = mono_alloc_preg (cfg);
6037 load_ins->inst_basereg = args [0]->dreg;
6038 load_ins->inst_offset = 0;
6039 load_ins->type = STACK_I8;
6040 MONO_ADD_INS (cfg->cbb, load_ins);
6042 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6049 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6050 MonoInst *ins_iconst;
6053 if (fsig->params [0]->type == MONO_TYPE_I4) {
6054 opcode = OP_ATOMIC_ADD_I4;
6055 cfg->has_atomic_add_i4 = TRUE;
6057 #if SIZEOF_REGISTER == 8
6058 else if (fsig->params [0]->type == MONO_TYPE_I8)
6059 opcode = OP_ATOMIC_ADD_I8;
6062 if (!mono_arch_opcode_supported (opcode))
6064 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6065 ins_iconst->inst_c0 = 1;
6066 ins_iconst->dreg = mono_alloc_ireg (cfg);
6067 MONO_ADD_INS (cfg->cbb, ins_iconst);
6069 MONO_INST_NEW (cfg, ins, opcode);
6070 ins->dreg = mono_alloc_ireg (cfg);
6071 ins->inst_basereg = args [0]->dreg;
6072 ins->inst_offset = 0;
6073 ins->sreg2 = ins_iconst->dreg;
6074 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6075 MONO_ADD_INS (cfg->cbb, ins);
6077 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6078 MonoInst *ins_iconst;
6081 if (fsig->params [0]->type == MONO_TYPE_I4) {
6082 opcode = OP_ATOMIC_ADD_I4;
6083 cfg->has_atomic_add_i4 = TRUE;
6085 #if SIZEOF_REGISTER == 8
6086 else if (fsig->params [0]->type == MONO_TYPE_I8)
6087 opcode = OP_ATOMIC_ADD_I8;
6090 if (!mono_arch_opcode_supported (opcode))
6092 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6093 ins_iconst->inst_c0 = -1;
6094 ins_iconst->dreg = mono_alloc_ireg (cfg);
6095 MONO_ADD_INS (cfg->cbb, ins_iconst);
6097 MONO_INST_NEW (cfg, ins, opcode);
6098 ins->dreg = mono_alloc_ireg (cfg);
6099 ins->inst_basereg = args [0]->dreg;
6100 ins->inst_offset = 0;
6101 ins->sreg2 = ins_iconst->dreg;
6102 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6103 MONO_ADD_INS (cfg->cbb, ins);
6105 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6108 if (fsig->params [0]->type == MONO_TYPE_I4) {
6109 opcode = OP_ATOMIC_ADD_I4;
6110 cfg->has_atomic_add_i4 = TRUE;
6112 #if SIZEOF_REGISTER == 8
6113 else if (fsig->params [0]->type == MONO_TYPE_I8)
6114 opcode = OP_ATOMIC_ADD_I8;
6117 if (!mono_arch_opcode_supported (opcode))
6119 MONO_INST_NEW (cfg, ins, opcode);
6120 ins->dreg = mono_alloc_ireg (cfg);
6121 ins->inst_basereg = args [0]->dreg;
6122 ins->inst_offset = 0;
6123 ins->sreg2 = args [1]->dreg;
6124 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6125 MONO_ADD_INS (cfg->cbb, ins);
6128 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6129 MonoInst *f2i = NULL, *i2f;
6130 guint32 opcode, f2i_opcode, i2f_opcode;
6131 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6132 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6134 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6135 fsig->params [0]->type == MONO_TYPE_R4) {
6136 opcode = OP_ATOMIC_EXCHANGE_I4;
6137 f2i_opcode = OP_MOVE_F_TO_I4;
6138 i2f_opcode = OP_MOVE_I4_TO_F;
6139 cfg->has_atomic_exchange_i4 = TRUE;
6141 #if SIZEOF_REGISTER == 8
6143 fsig->params [0]->type == MONO_TYPE_I8 ||
6144 fsig->params [0]->type == MONO_TYPE_R8 ||
6145 fsig->params [0]->type == MONO_TYPE_I) {
6146 opcode = OP_ATOMIC_EXCHANGE_I8;
6147 f2i_opcode = OP_MOVE_F_TO_I8;
6148 i2f_opcode = OP_MOVE_I8_TO_F;
6151 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6152 opcode = OP_ATOMIC_EXCHANGE_I4;
6153 cfg->has_atomic_exchange_i4 = TRUE;
6159 if (!mono_arch_opcode_supported (opcode))
6163 /* TODO: Decompose these opcodes instead of bailing here. */
6164 if (COMPILE_SOFT_FLOAT (cfg))
6167 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6168 f2i->dreg = mono_alloc_ireg (cfg);
6169 f2i->sreg1 = args [1]->dreg;
6170 if (f2i_opcode == OP_MOVE_F_TO_I4)
6171 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6172 MONO_ADD_INS (cfg->cbb, f2i);
6175 MONO_INST_NEW (cfg, ins, opcode);
6176 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6177 ins->inst_basereg = args [0]->dreg;
6178 ins->inst_offset = 0;
6179 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6180 MONO_ADD_INS (cfg->cbb, ins);
6182 switch (fsig->params [0]->type) {
6184 ins->type = STACK_I4;
6187 ins->type = STACK_I8;
6190 #if SIZEOF_REGISTER == 8
6191 ins->type = STACK_I8;
6193 ins->type = STACK_I4;
6198 ins->type = STACK_R8;
6201 g_assert (mini_type_is_reference (fsig->params [0]));
6202 ins->type = STACK_OBJ;
6207 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6208 i2f->dreg = mono_alloc_freg (cfg);
6209 i2f->sreg1 = ins->dreg;
6210 i2f->type = STACK_R8;
6211 if (i2f_opcode == OP_MOVE_I4_TO_F)
6212 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6213 MONO_ADD_INS (cfg->cbb, i2f);
6218 if (cfg->gen_write_barriers && is_ref)
6219 emit_write_barrier (cfg, args [0], args [1]);
6221 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6222 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6223 guint32 opcode, f2i_opcode, i2f_opcode;
6224 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6225 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6227 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6228 fsig->params [1]->type == MONO_TYPE_R4) {
6229 opcode = OP_ATOMIC_CAS_I4;
6230 f2i_opcode = OP_MOVE_F_TO_I4;
6231 i2f_opcode = OP_MOVE_I4_TO_F;
6232 cfg->has_atomic_cas_i4 = TRUE;
6234 #if SIZEOF_REGISTER == 8
6236 fsig->params [1]->type == MONO_TYPE_I8 ||
6237 fsig->params [1]->type == MONO_TYPE_R8 ||
6238 fsig->params [1]->type == MONO_TYPE_I) {
6239 opcode = OP_ATOMIC_CAS_I8;
6240 f2i_opcode = OP_MOVE_F_TO_I8;
6241 i2f_opcode = OP_MOVE_I8_TO_F;
6244 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6245 opcode = OP_ATOMIC_CAS_I4;
6246 cfg->has_atomic_cas_i4 = TRUE;
6252 if (!mono_arch_opcode_supported (opcode))
6256 /* TODO: Decompose these opcodes instead of bailing here. */
6257 if (COMPILE_SOFT_FLOAT (cfg))
6260 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6261 f2i_new->dreg = mono_alloc_ireg (cfg);
6262 f2i_new->sreg1 = args [1]->dreg;
6263 if (f2i_opcode == OP_MOVE_F_TO_I4)
6264 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6265 MONO_ADD_INS (cfg->cbb, f2i_new);
6267 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6268 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6269 f2i_cmp->sreg1 = args [2]->dreg;
6270 if (f2i_opcode == OP_MOVE_F_TO_I4)
6271 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6272 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6275 MONO_INST_NEW (cfg, ins, opcode);
6276 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6277 ins->sreg1 = args [0]->dreg;
6278 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6279 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6280 MONO_ADD_INS (cfg->cbb, ins);
6282 switch (fsig->params [1]->type) {
6284 ins->type = STACK_I4;
6287 ins->type = STACK_I8;
6290 #if SIZEOF_REGISTER == 8
6291 ins->type = STACK_I8;
6293 ins->type = STACK_I4;
6297 ins->type = cfg->r4_stack_type;
6300 ins->type = STACK_R8;
6303 g_assert (mini_type_is_reference (fsig->params [1]));
6304 ins->type = STACK_OBJ;
6309 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6310 i2f->dreg = mono_alloc_freg (cfg);
6311 i2f->sreg1 = ins->dreg;
6312 i2f->type = STACK_R8;
6313 if (i2f_opcode == OP_MOVE_I4_TO_F)
6314 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6315 MONO_ADD_INS (cfg->cbb, i2f);
6320 if (cfg->gen_write_barriers && is_ref)
6321 emit_write_barrier (cfg, args [0], args [1]);
6323 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6324 fsig->params [1]->type == MONO_TYPE_I4) {
6325 MonoInst *cmp, *ceq;
6327 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6330 /* int32 r = CAS (location, value, comparand); */
6331 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6332 ins->dreg = alloc_ireg (cfg);
6333 ins->sreg1 = args [0]->dreg;
6334 ins->sreg2 = args [1]->dreg;
6335 ins->sreg3 = args [2]->dreg;
6336 ins->type = STACK_I4;
6337 MONO_ADD_INS (cfg->cbb, ins);
6339 /* bool result = r == comparand; */
6340 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6341 cmp->sreg1 = ins->dreg;
6342 cmp->sreg2 = args [2]->dreg;
6343 cmp->type = STACK_I4;
6344 MONO_ADD_INS (cfg->cbb, cmp);
6346 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6347 ceq->dreg = alloc_ireg (cfg);
6348 ceq->type = STACK_I4;
6349 MONO_ADD_INS (cfg->cbb, ceq);
6351 /* *success = result; */
6352 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6354 cfg->has_atomic_cas_i4 = TRUE;
6356 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6357 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6361 } else if (cmethod->klass->image == mono_defaults.corlib &&
6362 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6363 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6366 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6368 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6369 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6371 if (fsig->params [0]->type == MONO_TYPE_I1)
6372 opcode = OP_ATOMIC_LOAD_I1;
6373 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6374 opcode = OP_ATOMIC_LOAD_U1;
6375 else if (fsig->params [0]->type == MONO_TYPE_I2)
6376 opcode = OP_ATOMIC_LOAD_I2;
6377 else if (fsig->params [0]->type == MONO_TYPE_U2)
6378 opcode = OP_ATOMIC_LOAD_U2;
6379 else if (fsig->params [0]->type == MONO_TYPE_I4)
6380 opcode = OP_ATOMIC_LOAD_I4;
6381 else if (fsig->params [0]->type == MONO_TYPE_U4)
6382 opcode = OP_ATOMIC_LOAD_U4;
6383 else if (fsig->params [0]->type == MONO_TYPE_R4)
6384 opcode = OP_ATOMIC_LOAD_R4;
6385 else if (fsig->params [0]->type == MONO_TYPE_R8)
6386 opcode = OP_ATOMIC_LOAD_R8;
6387 #if SIZEOF_REGISTER == 8
6388 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6389 opcode = OP_ATOMIC_LOAD_I8;
6390 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6391 opcode = OP_ATOMIC_LOAD_U8;
6393 else if (fsig->params [0]->type == MONO_TYPE_I)
6394 opcode = OP_ATOMIC_LOAD_I4;
6395 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6396 opcode = OP_ATOMIC_LOAD_U4;
6400 if (!mono_arch_opcode_supported (opcode))
6403 MONO_INST_NEW (cfg, ins, opcode);
6404 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6405 ins->sreg1 = args [0]->dreg;
6406 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6407 MONO_ADD_INS (cfg->cbb, ins);
6409 switch (fsig->params [0]->type) {
6410 case MONO_TYPE_BOOLEAN:
6417 ins->type = STACK_I4;
6421 ins->type = STACK_I8;
6425 #if SIZEOF_REGISTER == 8
6426 ins->type = STACK_I8;
6428 ins->type = STACK_I4;
6432 ins->type = cfg->r4_stack_type;
6435 ins->type = STACK_R8;
6438 g_assert (mini_type_is_reference (fsig->params [0]));
6439 ins->type = STACK_OBJ;
6445 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6447 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6449 if (fsig->params [0]->type == MONO_TYPE_I1)
6450 opcode = OP_ATOMIC_STORE_I1;
6451 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6452 opcode = OP_ATOMIC_STORE_U1;
6453 else if (fsig->params [0]->type == MONO_TYPE_I2)
6454 opcode = OP_ATOMIC_STORE_I2;
6455 else if (fsig->params [0]->type == MONO_TYPE_U2)
6456 opcode = OP_ATOMIC_STORE_U2;
6457 else if (fsig->params [0]->type == MONO_TYPE_I4)
6458 opcode = OP_ATOMIC_STORE_I4;
6459 else if (fsig->params [0]->type == MONO_TYPE_U4)
6460 opcode = OP_ATOMIC_STORE_U4;
6461 else if (fsig->params [0]->type == MONO_TYPE_R4)
6462 opcode = OP_ATOMIC_STORE_R4;
6463 else if (fsig->params [0]->type == MONO_TYPE_R8)
6464 opcode = OP_ATOMIC_STORE_R8;
6465 #if SIZEOF_REGISTER == 8
6466 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6467 opcode = OP_ATOMIC_STORE_I8;
6468 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6469 opcode = OP_ATOMIC_STORE_U8;
6471 else if (fsig->params [0]->type == MONO_TYPE_I)
6472 opcode = OP_ATOMIC_STORE_I4;
6473 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6474 opcode = OP_ATOMIC_STORE_U4;
6478 if (!mono_arch_opcode_supported (opcode))
6481 MONO_INST_NEW (cfg, ins, opcode);
6482 ins->dreg = args [0]->dreg;
6483 ins->sreg1 = args [1]->dreg;
6484 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6485 MONO_ADD_INS (cfg->cbb, ins);
6487 if (cfg->gen_write_barriers && is_ref)
6488 emit_write_barrier (cfg, args [0], args [1]);
6494 } else if (cmethod->klass->image == mono_defaults.corlib &&
6495 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6496 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6497 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6498 if (should_insert_brekpoint (cfg->method)) {
6499 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6501 MONO_INST_NEW (cfg, ins, OP_NOP);
6502 MONO_ADD_INS (cfg->cbb, ins);
6506 } else if (cmethod->klass->image == mono_defaults.corlib &&
6507 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6508 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6509 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6511 EMIT_NEW_ICONST (cfg, ins, 1);
6513 EMIT_NEW_ICONST (cfg, ins, 0);
6516 } else if (cmethod->klass == mono_defaults.math_class) {
6518 * There is general branchless code for Min/Max, but it does not work for
6520 * http://everything2.com/?node_id=1051618
6522 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6523 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6524 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6525 !strcmp (cmethod->klass->name, "Selector")) ||
6526 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6527 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6528 !strcmp (cmethod->klass->name, "Selector"))
6530 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6531 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6532 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6535 MonoJumpInfoToken *ji;
6538 cfg->disable_llvm = TRUE;
6540 if (args [0]->opcode == OP_GOT_ENTRY) {
6541 pi = args [0]->inst_p1;
6542 g_assert (pi->opcode == OP_PATCH_INFO);
6543 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6546 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6547 ji = args [0]->inst_p0;
6550 NULLIFY_INS (args [0]);
6553 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6554 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6555 ins->dreg = mono_alloc_ireg (cfg);
6557 ins->inst_p0 = mono_string_to_utf8 (s);
6558 MONO_ADD_INS (cfg->cbb, ins);
6564 #ifdef MONO_ARCH_SIMD_INTRINSICS
6565 if (cfg->opt & MONO_OPT_SIMD) {
6566 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6572 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6576 if (COMPILE_LLVM (cfg)) {
6577 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6582 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6586 * This entry point could be used later for arbitrary method
6589 inline static MonoInst*
6590 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6591 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6593 if (method->klass == mono_defaults.string_class) {
6594 /* managed string allocation support */
6595 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6596 MonoInst *iargs [2];
6597 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6598 MonoMethod *managed_alloc = NULL;
6600 g_assert (vtable); /*Should not fail since it System.String*/
6601 #ifndef MONO_CROSS_COMPILE
6602 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6606 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6607 iargs [1] = args [0];
6608 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6615 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6617 MonoInst *store, *temp;
6620 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6621 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6624 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6625 * would be different than the MonoInst's used to represent arguments, and
6626 * the ldelema implementation can't deal with that.
6627 * Solution: When ldelema is used on an inline argument, create a var for
6628 * it, emit ldelema on that var, and emit the saving code below in
6629 * inline_method () if needed.
6631 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6632 cfg->args [i] = temp;
6633 /* This uses cfg->args [i] which is set by the preceeding line */
6634 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6635 store->cil_code = sp [0]->cil_code;
6640 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6641 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6643 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6645 check_inline_called_method_name_limit (MonoMethod *called_method)
6648 static const char *limit = NULL;
6650 if (limit == NULL) {
6651 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6653 if (limit_string != NULL)
6654 limit = limit_string;
6659 if (limit [0] != '\0') {
6660 char *called_method_name = mono_method_full_name (called_method, TRUE);
6662 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6663 g_free (called_method_name);
6665 //return (strncmp_result <= 0);
6666 return (strncmp_result == 0);
6673 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6675 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6678 static const char *limit = NULL;
6680 if (limit == NULL) {
6681 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6682 if (limit_string != NULL) {
6683 limit = limit_string;
6689 if (limit [0] != '\0') {
6690 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6692 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6693 g_free (caller_method_name);
6695 //return (strncmp_result <= 0);
6696 return (strncmp_result == 0);
6704 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6706 static double r8_0 = 0.0;
6707 static float r4_0 = 0.0;
6711 rtype = mini_get_underlying_type (rtype);
6715 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6716 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6717 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6718 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6719 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6720 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6721 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6722 ins->type = STACK_R4;
6723 ins->inst_p0 = (void*)&r4_0;
6725 MONO_ADD_INS (cfg->cbb, ins);
6726 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6727 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6728 ins->type = STACK_R8;
6729 ins->inst_p0 = (void*)&r8_0;
6731 MONO_ADD_INS (cfg->cbb, ins);
6732 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6733 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6734 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6735 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6736 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6738 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6743 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6747 rtype = mini_get_underlying_type (rtype);
6751 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6752 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6753 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6754 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6755 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6756 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6757 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6758 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6759 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6760 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6761 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6762 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6763 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6764 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6766 emit_init_rvar (cfg, dreg, rtype);
6770 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6772 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6774 MonoInst *var = cfg->locals [local];
6775 if (COMPILE_SOFT_FLOAT (cfg)) {
6777 int reg = alloc_dreg (cfg, var->type);
6778 emit_init_rvar (cfg, reg, type);
6779 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6782 emit_init_rvar (cfg, var->dreg, type);
6784 emit_dummy_init_rvar (cfg, var->dreg, type);
6791 * Return the cost of inlining CMETHOD.
6794 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6795 guchar *ip, guint real_offset, gboolean inline_always)
6797 MonoInst *ins, *rvar = NULL;
6798 MonoMethodHeader *cheader;
6799 MonoBasicBlock *ebblock, *sbblock;
6801 MonoMethod *prev_inlined_method;
6802 MonoInst **prev_locals, **prev_args;
6803 MonoType **prev_arg_types;
6804 guint prev_real_offset;
6805 GHashTable *prev_cbb_hash;
6806 MonoBasicBlock **prev_cil_offset_to_bb;
6807 MonoBasicBlock *prev_cbb;
6808 unsigned char* prev_cil_start;
6809 guint32 prev_cil_offset_to_bb_len;
6810 MonoMethod *prev_current_method;
6811 MonoGenericContext *prev_generic_context;
6812 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6814 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6816 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6817 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6820 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6821 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6826 fsig = mono_method_signature (cmethod);
6828 if (cfg->verbose_level > 2)
6829 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6831 if (!cmethod->inline_info) {
6832 cfg->stat_inlineable_methods++;
6833 cmethod->inline_info = 1;
6836 /* allocate local variables */
6837 cheader = mono_method_get_header (cmethod);
6839 if (cheader == NULL || mono_loader_get_last_error ()) {
6840 MonoLoaderError *error = mono_loader_get_last_error ();
6843 mono_metadata_free_mh (cheader);
6844 if (inline_always && error)
6845 mono_cfg_set_exception (cfg, error->exception_type);
6847 mono_loader_clear_error ();
6851 /*Must verify before creating locals as it can cause the JIT to assert.*/
6852 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6853 mono_metadata_free_mh (cheader);
6857 /* allocate space to store the return value */
6858 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6859 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6862 prev_locals = cfg->locals;
6863 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6864 for (i = 0; i < cheader->num_locals; ++i)
6865 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6867 /* allocate start and end blocks */
6868 /* This is needed so if the inline is aborted, we can clean up */
6869 NEW_BBLOCK (cfg, sbblock);
6870 sbblock->real_offset = real_offset;
6872 NEW_BBLOCK (cfg, ebblock);
6873 ebblock->block_num = cfg->num_bblocks++;
6874 ebblock->real_offset = real_offset;
6876 prev_args = cfg->args;
6877 prev_arg_types = cfg->arg_types;
6878 prev_inlined_method = cfg->inlined_method;
6879 cfg->inlined_method = cmethod;
6880 cfg->ret_var_set = FALSE;
6881 cfg->inline_depth ++;
6882 prev_real_offset = cfg->real_offset;
6883 prev_cbb_hash = cfg->cbb_hash;
6884 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6885 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6886 prev_cil_start = cfg->cil_start;
6887 prev_cbb = cfg->cbb;
6888 prev_current_method = cfg->current_method;
6889 prev_generic_context = cfg->generic_context;
6890 prev_ret_var_set = cfg->ret_var_set;
6891 prev_disable_inline = cfg->disable_inline;
6893 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6896 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6898 ret_var_set = cfg->ret_var_set;
6900 cfg->inlined_method = prev_inlined_method;
6901 cfg->real_offset = prev_real_offset;
6902 cfg->cbb_hash = prev_cbb_hash;
6903 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6904 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6905 cfg->cil_start = prev_cil_start;
6906 cfg->locals = prev_locals;
6907 cfg->args = prev_args;
6908 cfg->arg_types = prev_arg_types;
6909 cfg->current_method = prev_current_method;
6910 cfg->generic_context = prev_generic_context;
6911 cfg->ret_var_set = prev_ret_var_set;
6912 cfg->disable_inline = prev_disable_inline;
6913 cfg->inline_depth --;
6915 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6916 if (cfg->verbose_level > 2)
6917 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6919 cfg->stat_inlined_methods++;
6921 /* always add some code to avoid block split failures */
6922 MONO_INST_NEW (cfg, ins, OP_NOP);
6923 MONO_ADD_INS (prev_cbb, ins);
6925 prev_cbb->next_bb = sbblock;
6926 link_bblock (cfg, prev_cbb, sbblock);
6929 * Get rid of the begin and end bblocks if possible to aid local
6932 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6934 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6935 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6937 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6938 MonoBasicBlock *prev = ebblock->in_bb [0];
6939 mono_merge_basic_blocks (cfg, prev, ebblock);
6941 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6942 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6943 cfg->cbb = prev_cbb;
6947 * Its possible that the rvar is set in some prev bblock, but not in others.
6953 for (i = 0; i < ebblock->in_count; ++i) {
6954 bb = ebblock->in_bb [i];
6956 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6959 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6969 * If the inlined method contains only a throw, then the ret var is not
6970 * set, so set it to a dummy value.
6973 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6975 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6978 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6981 if (cfg->verbose_level > 2)
6982 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6983 cfg->exception_type = MONO_EXCEPTION_NONE;
6984 mono_loader_clear_error ();
6986 /* This gets rid of the newly added bblocks */
6987 cfg->cbb = prev_cbb;
6989 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6994 * Some of these comments may well be out-of-date.
6995 * Design decisions: we do a single pass over the IL code (and we do bblock
6996 * splitting/merging in the few cases when it's required: a back jump to an IL
6997 * address that was not already seen as bblock starting point).
6998 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6999 * Complex operations are decomposed in simpler ones right away. We need to let the
7000 * arch-specific code peek and poke inside this process somehow (except when the
7001 * optimizations can take advantage of the full semantic info of coarse opcodes).
7002 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7003 * MonoInst->opcode initially is the IL opcode or some simplification of that
7004 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7005 * opcode with value bigger than OP_LAST.
7006 * At this point the IR can be handed over to an interpreter, a dumb code generator
7007 * or to the optimizing code generator that will translate it to SSA form.
7009 * Profiling directed optimizations.
7010 * We may compile by default with few or no optimizations and instrument the code
7011 * or the user may indicate what methods to optimize the most either in a config file
7012 * or through repeated runs where the compiler applies offline the optimizations to
7013 * each method and then decides if it was worth it.
7016 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7017 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7018 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7019 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7020 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7021 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7022 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7023 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7025 /* offset from br.s -> br like opcodes */
7026 #define BIG_BRANCH_OFFSET 13
7029 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7031 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7033 return b == NULL || b == bb;
7037 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7039 unsigned char *ip = start;
7040 unsigned char *target;
7043 MonoBasicBlock *bblock;
7044 const MonoOpcode *opcode;
7047 cli_addr = ip - start;
7048 i = mono_opcode_value ((const guint8 **)&ip, end);
7051 opcode = &mono_opcodes [i];
7052 switch (opcode->argument) {
7053 case MonoInlineNone:
7056 case MonoInlineString:
7057 case MonoInlineType:
7058 case MonoInlineField:
7059 case MonoInlineMethod:
7062 case MonoShortInlineR:
7069 case MonoShortInlineVar:
7070 case MonoShortInlineI:
7073 case MonoShortInlineBrTarget:
7074 target = start + cli_addr + 2 + (signed char)ip [1];
7075 GET_BBLOCK (cfg, bblock, target);
7078 GET_BBLOCK (cfg, bblock, ip);
7080 case MonoInlineBrTarget:
7081 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7082 GET_BBLOCK (cfg, bblock, target);
7085 GET_BBLOCK (cfg, bblock, ip);
7087 case MonoInlineSwitch: {
7088 guint32 n = read32 (ip + 1);
7091 cli_addr += 5 + 4 * n;
7092 target = start + cli_addr;
7093 GET_BBLOCK (cfg, bblock, target);
7095 for (j = 0; j < n; ++j) {
7096 target = start + cli_addr + (gint32)read32 (ip);
7097 GET_BBLOCK (cfg, bblock, target);
7107 g_assert_not_reached ();
7110 if (i == CEE_THROW) {
7111 unsigned char *bb_start = ip - 1;
7113 /* Find the start of the bblock containing the throw */
7115 while ((bb_start >= start) && !bblock) {
7116 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7120 bblock->out_of_line = 1;
7130 static inline MonoMethod *
7131 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7135 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7136 method = mono_method_get_wrapper_data (m, token);
7139 method = mono_class_inflate_generic_method_checked (method, context, &error);
7140 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7143 method = mono_get_method_full (m->klass->image, token, klass, context);
7149 static inline MonoMethod *
7150 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7152 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7154 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7160 static inline MonoClass*
7161 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7166 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7167 klass = mono_method_get_wrapper_data (method, token);
7169 klass = mono_class_inflate_generic_class (klass, context);
7171 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7172 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7175 mono_class_init (klass);
7179 static inline MonoMethodSignature*
7180 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7182 MonoMethodSignature *fsig;
7184 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7187 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7189 fsig = mono_inflate_generic_signature (fsig, context, &error);
7191 g_assert (mono_error_ok (&error));
7194 fsig = mono_metadata_parse_signature (method->klass->image, token);
7200 throw_exception (void)
7202 static MonoMethod *method = NULL;
7205 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7206 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7213 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7215 MonoMethod *thrower = throw_exception ();
7218 EMIT_NEW_PCONST (cfg, args [0], ex);
7219 mono_emit_method_call (cfg, thrower, args, NULL);
7223 * Return the original method is a wrapper is specified. We can only access
7224 * the custom attributes from the original method.
7227 get_original_method (MonoMethod *method)
7229 if (method->wrapper_type == MONO_WRAPPER_NONE)
7232 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7233 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7236 /* in other cases we need to find the original method */
7237 return mono_marshal_method_from_wrapper (method);
7241 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7243 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7244 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7246 emit_throw_exception (cfg, ex);
7250 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7252 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7253 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7255 emit_throw_exception (cfg, ex);
7259 * Check that the IL instructions at ip are the array initialization
7260 * sequence and return the pointer to the data and the size.
7263 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7266 * newarr[System.Int32]
7268 * ldtoken field valuetype ...
7269 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7271 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7273 guint32 token = read32 (ip + 7);
7274 guint32 field_token = read32 (ip + 2);
7275 guint32 field_index = field_token & 0xffffff;
7277 const char *data_ptr;
7279 MonoMethod *cmethod;
7280 MonoClass *dummy_class;
7281 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7285 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7289 *out_field_token = field_token;
7291 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7294 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7296 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7297 case MONO_TYPE_BOOLEAN:
7301 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7302 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7303 case MONO_TYPE_CHAR:
7320 if (size > mono_type_size (field->type, &dummy_align))
7323 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7324 if (!image_is_dynamic (method->klass->image)) {
7325 field_index = read32 (ip + 2) & 0xffffff;
7326 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7327 data_ptr = mono_image_rva_map (method->klass->image, rva);
7328 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7329 /* for aot code we do the lookup on load */
7330 if (aot && data_ptr)
7331 return GUINT_TO_POINTER (rva);
7333 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7335 data_ptr = mono_field_get_data (field);
7343 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7345 char *method_fname = mono_method_full_name (method, TRUE);
7347 MonoMethodHeader *header = mono_method_get_header (method);
7349 if (header->code_size == 0)
7350 method_code = g_strdup ("method body is empty.");
7352 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7353 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7354 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7355 g_free (method_fname);
7356 g_free (method_code);
7357 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7361 set_exception_object (MonoCompile *cfg, MonoException *exception)
7363 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7364 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7365 cfg->exception_ptr = exception;
7369 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7372 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7373 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7374 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7375 /* Optimize reg-reg moves away */
7377 * Can't optimize other opcodes, since sp[0] might point to
7378 * the last ins of a decomposed opcode.
7380 sp [0]->dreg = (cfg)->locals [n]->dreg;
7382 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7387 * ldloca inhibits many optimizations so try to get rid of it in common
7390 static inline unsigned char *
7391 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7401 local = read16 (ip + 2);
7405 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7406 /* From the INITOBJ case */
7407 token = read32 (ip + 2);
7408 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7409 CHECK_TYPELOAD (klass);
7410 type = mini_get_underlying_type (&klass->byval_arg);
7411 emit_init_local (cfg, local, type, TRUE);
7419 is_exception_class (MonoClass *klass)
7422 if (klass == mono_defaults.exception_class)
7424 klass = klass->parent;
7430 * is_jit_optimizer_disabled:
7432 * Determine whenever M's assembly has a DebuggableAttribute with the
7433 * IsJITOptimizerDisabled flag set.
7436 is_jit_optimizer_disabled (MonoMethod *m)
7438 MonoAssembly *ass = m->klass->image->assembly;
7439 MonoCustomAttrInfo* attrs;
7440 static MonoClass *klass;
7442 gboolean val = FALSE;
7445 if (ass->jit_optimizer_disabled_inited)
7446 return ass->jit_optimizer_disabled;
7449 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7452 ass->jit_optimizer_disabled = FALSE;
7453 mono_memory_barrier ();
7454 ass->jit_optimizer_disabled_inited = TRUE;
7458 attrs = mono_custom_attrs_from_assembly (ass);
7460 for (i = 0; i < attrs->num_attrs; ++i) {
7461 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7463 MonoMethodSignature *sig;
7465 if (!attr->ctor || attr->ctor->klass != klass)
7467 /* Decode the attribute. See reflection.c */
7468 p = (const char*)attr->data;
7469 g_assert (read16 (p) == 0x0001);
7472 // FIXME: Support named parameters
7473 sig = mono_method_signature (attr->ctor);
7474 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7476 /* Two boolean arguments */
7480 mono_custom_attrs_free (attrs);
7483 ass->jit_optimizer_disabled = val;
7484 mono_memory_barrier ();
7485 ass->jit_optimizer_disabled_inited = TRUE;
7491 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7493 gboolean supported_tail_call;
7496 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7497 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7499 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7502 for (i = 0; i < fsig->param_count; ++i) {
7503 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7504 /* These can point to the current method's stack */
7505 supported_tail_call = FALSE;
7507 if (fsig->hasthis && cmethod->klass->valuetype)
7508 /* this might point to the current method's stack */
7509 supported_tail_call = FALSE;
7510 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7511 supported_tail_call = FALSE;
7512 if (cfg->method->save_lmf)
7513 supported_tail_call = FALSE;
7514 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7515 supported_tail_call = FALSE;
7516 if (call_opcode != CEE_CALL)
7517 supported_tail_call = FALSE;
7519 /* Debugging support */
7521 if (supported_tail_call) {
7522 if (!mono_debug_count ())
7523 supported_tail_call = FALSE;
7527 return supported_tail_call;
7533 * Handle calls made to ctors from NEWOBJ opcodes.
7536 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7537 MonoInst **sp, guint8 *ip, int *inline_costs)
7539 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7541 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7542 mono_method_is_generic_sharable (cmethod, TRUE)) {
7543 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7544 mono_class_vtable (cfg->domain, cmethod->klass);
7545 CHECK_TYPELOAD (cmethod->klass);
7547 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7548 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7551 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7552 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7554 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7556 CHECK_TYPELOAD (cmethod->klass);
7557 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7562 /* Avoid virtual calls to ctors if possible */
7563 if (mono_class_is_marshalbyref (cmethod->klass))
7564 callvirt_this_arg = sp [0];
7566 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7567 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7568 CHECK_CFG_EXCEPTION;
7569 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7570 mono_method_check_inlining (cfg, cmethod) &&
7571 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7574 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7575 cfg->real_offset += 5;
7577 *inline_costs += costs - 5;
7579 INLINE_FAILURE ("inline failure");
7580 // FIXME-VT: Clean this up
7581 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7582 GSHAREDVT_FAILURE(*ip);
7583 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7585 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7588 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7589 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7590 } else if (context_used &&
7591 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7592 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7593 MonoInst *cmethod_addr;
7595 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7597 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7598 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7600 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7602 INLINE_FAILURE ("ctor call");
7603 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7604 callvirt_this_arg, NULL, vtable_arg);
7611 * mono_method_to_ir:
7613 * Translate the .net IL into linear IR.
7616 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7617 MonoInst *return_var, MonoInst **inline_args,
7618 guint inline_offset, gboolean is_virtual_call)
7621 MonoInst *ins, **sp, **stack_start;
7622 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7623 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7624 MonoMethod *cmethod, *method_definition;
7625 MonoInst **arg_array;
7626 MonoMethodHeader *header;
7628 guint32 token, ins_flag;
7630 MonoClass *constrained_class = NULL;
7631 unsigned char *ip, *end, *target, *err_pos;
7632 MonoMethodSignature *sig;
7633 MonoGenericContext *generic_context = NULL;
7634 MonoGenericContainer *generic_container = NULL;
7635 MonoType **param_types;
7636 int i, n, start_new_bblock, dreg;
7637 int num_calls = 0, inline_costs = 0;
7638 int breakpoint_id = 0;
7640 GSList *class_inits = NULL;
7641 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7643 gboolean init_locals, seq_points, skip_dead_blocks;
7644 gboolean sym_seq_points = FALSE;
7645 MonoDebugMethodInfo *minfo;
7646 MonoBitSet *seq_point_locs = NULL;
7647 MonoBitSet *seq_point_set_locs = NULL;
7649 cfg->disable_inline = is_jit_optimizer_disabled (method);
7651 /* serialization and xdomain stuff may need access to private fields and methods */
7652 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7653 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7654 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7655 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7656 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7657 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7659 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7660 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7661 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7662 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7663 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7665 image = method->klass->image;
7666 header = mono_method_get_header (method);
7668 MonoLoaderError *error;
7670 if ((error = mono_loader_get_last_error ())) {
7671 mono_cfg_set_exception (cfg, error->exception_type);
7673 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7674 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7676 goto exception_exit;
7678 generic_container = mono_method_get_generic_container (method);
7679 sig = mono_method_signature (method);
7680 num_args = sig->hasthis + sig->param_count;
7681 ip = (unsigned char*)header->code;
7682 cfg->cil_start = ip;
7683 end = ip + header->code_size;
7684 cfg->stat_cil_code_size += header->code_size;
7686 seq_points = cfg->gen_seq_points && cfg->method == method;
7688 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7689 /* We could hit a seq point before attaching to the JIT (#8338) */
7693 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7694 minfo = mono_debug_lookup_method (method);
7696 MonoSymSeqPoint *sps;
7697 int i, n_il_offsets;
7699 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7700 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7701 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7702 sym_seq_points = TRUE;
7703 for (i = 0; i < n_il_offsets; ++i) {
7704 if (sps [i].il_offset < header->code_size)
7705 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7708 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7709 /* Methods without line number info like auto-generated property accessors */
7710 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7711 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7712 sym_seq_points = TRUE;
7717 * Methods without init_locals set could cause asserts in various passes
7718 * (#497220). To work around this, we emit dummy initialization opcodes
7719 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7720 * on some platforms.
7722 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7723 init_locals = header->init_locals;
7727 method_definition = method;
7728 while (method_definition->is_inflated) {
7729 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7730 method_definition = imethod->declaring;
7733 /* SkipVerification is not allowed if core-clr is enabled */
7734 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7736 dont_verify_stloc = TRUE;
7739 if (sig->is_inflated)
7740 generic_context = mono_method_get_context (method);
7741 else if (generic_container)
7742 generic_context = &generic_container->context;
7743 cfg->generic_context = generic_context;
7746 g_assert (!sig->has_type_parameters);
7748 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7749 g_assert (method->is_inflated);
7750 g_assert (mono_method_get_context (method)->method_inst);
7752 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7753 g_assert (sig->generic_param_count);
7755 if (cfg->method == method) {
7756 cfg->real_offset = 0;
7758 cfg->real_offset = inline_offset;
7761 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7762 cfg->cil_offset_to_bb_len = header->code_size;
7764 cfg->current_method = method;
7766 if (cfg->verbose_level > 2)
7767 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7769 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7771 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7772 for (n = 0; n < sig->param_count; ++n)
7773 param_types [n + sig->hasthis] = sig->params [n];
7774 cfg->arg_types = param_types;
7776 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7777 if (cfg->method == method) {
7779 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7780 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7783 NEW_BBLOCK (cfg, start_bblock);
7784 cfg->bb_entry = start_bblock;
7785 start_bblock->cil_code = NULL;
7786 start_bblock->cil_length = 0;
7789 NEW_BBLOCK (cfg, end_bblock);
7790 cfg->bb_exit = end_bblock;
7791 end_bblock->cil_code = NULL;
7792 end_bblock->cil_length = 0;
7793 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7794 g_assert (cfg->num_bblocks == 2);
7796 arg_array = cfg->args;
7798 if (header->num_clauses) {
7799 cfg->spvars = g_hash_table_new (NULL, NULL);
7800 cfg->exvars = g_hash_table_new (NULL, NULL);
7802 /* handle exception clauses */
7803 for (i = 0; i < header->num_clauses; ++i) {
7804 MonoBasicBlock *try_bb;
7805 MonoExceptionClause *clause = &header->clauses [i];
7806 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7807 try_bb->real_offset = clause->try_offset;
7808 try_bb->try_start = TRUE;
7809 try_bb->region = ((i + 1) << 8) | clause->flags;
7810 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7811 tblock->real_offset = clause->handler_offset;
7812 tblock->flags |= BB_EXCEPTION_HANDLER;
7815 * Linking the try block with the EH block hinders inlining as we won't be able to
7816 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7818 if (COMPILE_LLVM (cfg))
7819 link_bblock (cfg, try_bb, tblock);
7821 if (*(ip + clause->handler_offset) == CEE_POP)
7822 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7824 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7825 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7826 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7827 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7828 MONO_ADD_INS (tblock, ins);
7830 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7831 /* finally clauses already have a seq point */
7832 /* seq points for filter clauses are emitted below */
7833 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7834 MONO_ADD_INS (tblock, ins);
7837 /* todo: is a fault block unsafe to optimize? */
7838 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7839 tblock->flags |= BB_EXCEPTION_UNSAFE;
7842 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7844 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7846 /* catch and filter blocks get the exception object on the stack */
7847 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7848 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7850 /* mostly like handle_stack_args (), but just sets the input args */
7851 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7852 tblock->in_scount = 1;
7853 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7854 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7858 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7859 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7860 if (!cfg->compile_llvm) {
7861 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7862 ins->dreg = tblock->in_stack [0]->dreg;
7863 MONO_ADD_INS (tblock, ins);
7866 MonoInst *dummy_use;
7869 * Add a dummy use for the exvar so its liveness info will be
7872 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7875 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7876 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7877 MONO_ADD_INS (tblock, ins);
7880 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7881 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7882 tblock->flags |= BB_EXCEPTION_HANDLER;
7883 tblock->real_offset = clause->data.filter_offset;
7884 tblock->in_scount = 1;
7885 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7886 /* The filter block shares the exvar with the handler block */
7887 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7888 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7889 MONO_ADD_INS (tblock, ins);
7893 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7894 clause->data.catch_class &&
7896 mono_class_check_context_used (clause->data.catch_class)) {
7898 * In shared generic code with catch
7899 * clauses containing type variables
7900 * the exception handling code has to
7901 * be able to get to the rgctx.
7902 * Therefore we have to make sure that
7903 * the vtable/mrgctx argument (for
7904 * static or generic methods) or the
7905 * "this" argument (for non-static
7906 * methods) are live.
7908 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7909 mini_method_get_context (method)->method_inst ||
7910 method->klass->valuetype) {
7911 mono_get_vtable_var (cfg);
7913 MonoInst *dummy_use;
7915 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7920 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7921 cfg->cbb = start_bblock;
7922 cfg->args = arg_array;
7923 mono_save_args (cfg, sig, inline_args);
7926 /* FIRST CODE BLOCK */
7927 NEW_BBLOCK (cfg, tblock);
7928 tblock->cil_code = ip;
7932 ADD_BBLOCK (cfg, tblock);
7934 if (cfg->method == method) {
7935 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7936 if (breakpoint_id) {
7937 MONO_INST_NEW (cfg, ins, OP_BREAK);
7938 MONO_ADD_INS (cfg->cbb, ins);
7942 /* we use a separate basic block for the initialization code */
7943 NEW_BBLOCK (cfg, init_localsbb);
7944 cfg->bb_init = init_localsbb;
7945 init_localsbb->real_offset = cfg->real_offset;
7946 start_bblock->next_bb = init_localsbb;
7947 init_localsbb->next_bb = cfg->cbb;
7948 link_bblock (cfg, start_bblock, init_localsbb);
7949 link_bblock (cfg, init_localsbb, cfg->cbb);
7951 cfg->cbb = init_localsbb;
7953 if (cfg->gsharedvt && cfg->method == method) {
7954 MonoGSharedVtMethodInfo *info;
7955 MonoInst *var, *locals_var;
7958 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7959 info->method = cfg->method;
7960 info->count_entries = 16;
7961 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7962 cfg->gsharedvt_info = info;
7964 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7965 /* prevent it from being register allocated */
7966 //var->flags |= MONO_INST_VOLATILE;
7967 cfg->gsharedvt_info_var = var;
7969 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7970 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7972 /* Allocate locals */
7973 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7974 /* prevent it from being register allocated */
7975 //locals_var->flags |= MONO_INST_VOLATILE;
7976 cfg->gsharedvt_locals_var = locals_var;
7978 dreg = alloc_ireg (cfg);
7979 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7981 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7982 ins->dreg = locals_var->dreg;
7984 MONO_ADD_INS (cfg->cbb, ins);
7985 cfg->gsharedvt_locals_var_ins = ins;
7987 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7990 ins->flags |= MONO_INST_INIT;
7994 if (mono_security_core_clr_enabled ()) {
7995 /* check if this is native code, e.g. an icall or a p/invoke */
7996 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7997 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7999 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8000 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8002 /* if this ia a native call then it can only be JITted from platform code */
8003 if ((icall || pinvk) && method->klass && method->klass->image) {
8004 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8005 MonoException *ex = icall ? mono_get_exception_security () :
8006 mono_get_exception_method_access ();
8007 emit_throw_exception (cfg, ex);
8014 CHECK_CFG_EXCEPTION;
8016 if (header->code_size == 0)
8019 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8024 if (cfg->method == method)
8025 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8027 for (n = 0; n < header->num_locals; ++n) {
8028 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8033 /* We force the vtable variable here for all shared methods
8034 for the possibility that they might show up in a stack
8035 trace where their exact instantiation is needed. */
8036 if (cfg->gshared && method == cfg->method) {
8037 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8038 mini_method_get_context (method)->method_inst ||
8039 method->klass->valuetype) {
8040 mono_get_vtable_var (cfg);
8042 /* FIXME: Is there a better way to do this?
8043 We need the variable live for the duration
8044 of the whole method. */
8045 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8049 /* add a check for this != NULL to inlined methods */
8050 if (is_virtual_call) {
8053 NEW_ARGLOAD (cfg, arg_ins, 0);
8054 MONO_ADD_INS (cfg->cbb, arg_ins);
8055 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8058 skip_dead_blocks = !dont_verify;
8059 if (skip_dead_blocks) {
8060 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8065 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8066 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8069 start_new_bblock = 0;
8071 if (cfg->method == method)
8072 cfg->real_offset = ip - header->code;
8074 cfg->real_offset = inline_offset;
8079 if (start_new_bblock) {
8080 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8081 if (start_new_bblock == 2) {
8082 g_assert (ip == tblock->cil_code);
8084 GET_BBLOCK (cfg, tblock, ip);
8086 cfg->cbb->next_bb = tblock;
8088 start_new_bblock = 0;
8089 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8090 if (cfg->verbose_level > 3)
8091 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8092 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8096 g_slist_free (class_inits);
8099 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8100 link_bblock (cfg, cfg->cbb, tblock);
8101 if (sp != stack_start) {
8102 handle_stack_args (cfg, stack_start, sp - stack_start);
8104 CHECK_UNVERIFIABLE (cfg);
8106 cfg->cbb->next_bb = tblock;
8108 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8109 if (cfg->verbose_level > 3)
8110 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8111 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8114 g_slist_free (class_inits);
8119 if (skip_dead_blocks) {
8120 int ip_offset = ip - header->code;
8122 if (ip_offset == bb->end)
8126 int op_size = mono_opcode_size (ip, end);
8127 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8129 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8131 if (ip_offset + op_size == bb->end) {
8132 MONO_INST_NEW (cfg, ins, OP_NOP);
8133 MONO_ADD_INS (cfg->cbb, ins);
8134 start_new_bblock = 1;
8142 * Sequence points are points where the debugger can place a breakpoint.
8143 * Currently, we generate these automatically at points where the IL
8146 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8148 * Make methods interruptable at the beginning, and at the targets of
8149 * backward branches.
8150 * Also, do this at the start of every bblock in methods with clauses too,
8151 * to be able to handle instructions with inprecise control flow like
8153 * Backward branches are handled at the end of method-to-ir ().
8155 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8156 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8158 /* Avoid sequence points on empty IL like .volatile */
8159 // FIXME: Enable this
8160 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8161 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8162 if ((sp != stack_start) && !sym_seq_point)
8163 ins->flags |= MONO_INST_NONEMPTY_STACK;
8164 MONO_ADD_INS (cfg->cbb, ins);
8167 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8170 cfg->cbb->real_offset = cfg->real_offset;
8172 if ((cfg->method == method) && cfg->coverage_info) {
8173 guint32 cil_offset = ip - header->code;
8174 cfg->coverage_info->data [cil_offset].cil_code = ip;
8176 /* TODO: Use an increment here */
8177 #if defined(TARGET_X86)
8178 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8179 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8181 MONO_ADD_INS (cfg->cbb, ins);
8183 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8184 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8188 if (cfg->verbose_level > 3)
8189 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8193 if (seq_points && !sym_seq_points && sp != stack_start) {
8195 * The C# compiler uses these nops to notify the JIT that it should
8196 * insert seq points.
8198 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8199 MONO_ADD_INS (cfg->cbb, ins);
8201 if (cfg->keep_cil_nops)
8202 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8204 MONO_INST_NEW (cfg, ins, OP_NOP);
8206 MONO_ADD_INS (cfg->cbb, ins);
8209 if (should_insert_brekpoint (cfg->method)) {
8210 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8212 MONO_INST_NEW (cfg, ins, OP_NOP);
8215 MONO_ADD_INS (cfg->cbb, ins);
8221 CHECK_STACK_OVF (1);
8222 n = (*ip)-CEE_LDARG_0;
8224 EMIT_NEW_ARGLOAD (cfg, ins, n);
8232 CHECK_STACK_OVF (1);
8233 n = (*ip)-CEE_LDLOC_0;
8235 EMIT_NEW_LOCLOAD (cfg, ins, n);
8244 n = (*ip)-CEE_STLOC_0;
8247 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8249 emit_stloc_ir (cfg, sp, header, n);
8256 CHECK_STACK_OVF (1);
8259 EMIT_NEW_ARGLOAD (cfg, ins, n);
8265 CHECK_STACK_OVF (1);
8268 NEW_ARGLOADA (cfg, ins, n);
8269 MONO_ADD_INS (cfg->cbb, ins);
8279 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8281 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8286 CHECK_STACK_OVF (1);
8289 EMIT_NEW_LOCLOAD (cfg, ins, n);
8293 case CEE_LDLOCA_S: {
8294 unsigned char *tmp_ip;
8296 CHECK_STACK_OVF (1);
8297 CHECK_LOCAL (ip [1]);
8299 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8305 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8314 CHECK_LOCAL (ip [1]);
8315 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8317 emit_stloc_ir (cfg, sp, header, ip [1]);
8322 CHECK_STACK_OVF (1);
8323 EMIT_NEW_PCONST (cfg, ins, NULL);
8324 ins->type = STACK_OBJ;
8329 CHECK_STACK_OVF (1);
8330 EMIT_NEW_ICONST (cfg, ins, -1);
8343 CHECK_STACK_OVF (1);
8344 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8350 CHECK_STACK_OVF (1);
8352 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8358 CHECK_STACK_OVF (1);
8359 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8365 CHECK_STACK_OVF (1);
8366 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8367 ins->type = STACK_I8;
8368 ins->dreg = alloc_dreg (cfg, STACK_I8);
8370 ins->inst_l = (gint64)read64 (ip);
8371 MONO_ADD_INS (cfg->cbb, ins);
8377 gboolean use_aotconst = FALSE;
8379 #ifdef TARGET_POWERPC
8380 /* FIXME: Clean this up */
8381 if (cfg->compile_aot)
8382 use_aotconst = TRUE;
8385 /* FIXME: we should really allocate this only late in the compilation process */
8386 f = mono_domain_alloc (cfg->domain, sizeof (float));
8388 CHECK_STACK_OVF (1);
8394 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8396 dreg = alloc_freg (cfg);
8397 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8398 ins->type = cfg->r4_stack_type;
8400 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8401 ins->type = cfg->r4_stack_type;
8402 ins->dreg = alloc_dreg (cfg, STACK_R8);
8404 MONO_ADD_INS (cfg->cbb, ins);
8414 gboolean use_aotconst = FALSE;
8416 #ifdef TARGET_POWERPC
8417 /* FIXME: Clean this up */
8418 if (cfg->compile_aot)
8419 use_aotconst = TRUE;
8422 /* FIXME: we should really allocate this only late in the compilation process */
8423 d = mono_domain_alloc (cfg->domain, sizeof (double));
8425 CHECK_STACK_OVF (1);
8431 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8433 dreg = alloc_freg (cfg);
8434 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8435 ins->type = STACK_R8;
8437 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8438 ins->type = STACK_R8;
8439 ins->dreg = alloc_dreg (cfg, STACK_R8);
8441 MONO_ADD_INS (cfg->cbb, ins);
8450 MonoInst *temp, *store;
8452 CHECK_STACK_OVF (1);
8456 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8457 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8459 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8462 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8475 if (sp [0]->type == STACK_R8)
8476 /* we need to pop the value from the x86 FP stack */
8477 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8483 INLINE_FAILURE ("jmp");
8484 GSHAREDVT_FAILURE (*ip);
8487 if (stack_start != sp)
8489 token = read32 (ip + 1);
8490 /* FIXME: check the signature matches */
8491 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8493 if (!cmethod || mono_loader_get_last_error ())
8496 if (cfg->gshared && mono_method_check_context_used (cmethod))
8497 GENERIC_SHARING_FAILURE (CEE_JMP);
8499 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8501 if (ARCH_HAVE_OP_TAIL_CALL) {
8502 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8505 /* Handle tail calls similarly to calls */
8506 n = fsig->param_count + fsig->hasthis;
8510 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8511 call->method = cmethod;
8512 call->tail_call = TRUE;
8513 call->signature = mono_method_signature (cmethod);
8514 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8515 call->inst.inst_p0 = cmethod;
8516 for (i = 0; i < n; ++i)
8517 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8519 mono_arch_emit_call (cfg, call);
8520 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8523 for (i = 0; i < num_args; ++i)
8524 /* Prevent arguments from being optimized away */
8525 arg_array [i]->flags |= MONO_INST_VOLATILE;
8527 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8528 ins = (MonoInst*)call;
8529 ins->inst_p0 = cmethod;
8530 MONO_ADD_INS (cfg->cbb, ins);
8534 start_new_bblock = 1;
8539 MonoMethodSignature *fsig;
8542 token = read32 (ip + 1);
8546 //GSHAREDVT_FAILURE (*ip);
8551 fsig = mini_get_signature (method, token, generic_context);
8553 if (method->dynamic && fsig->pinvoke) {
8557 * This is a call through a function pointer using a pinvoke
8558 * signature. Have to create a wrapper and call that instead.
8559 * FIXME: This is very slow, need to create a wrapper at JIT time
8560 * instead based on the signature.
8562 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8563 EMIT_NEW_PCONST (cfg, args [1], fsig);
8565 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8568 n = fsig->param_count + fsig->hasthis;
8572 //g_assert (!virtual || fsig->hasthis);
8576 inline_costs += 10 * num_calls++;
8579 * Making generic calls out of gsharedvt methods.
8580 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8581 * patching gshared method addresses into a gsharedvt method.
8583 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8585 * We pass the address to the gsharedvt trampoline in the rgctx reg
8587 MonoInst *callee = addr;
8589 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8591 GSHAREDVT_FAILURE (*ip);
8593 addr = emit_get_rgctx_sig (cfg, context_used,
8594 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8595 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8599 /* Prevent inlining of methods with indirect calls */
8600 INLINE_FAILURE ("indirect call");
8602 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8607 * Instead of emitting an indirect call, emit a direct call
8608 * with the contents of the aotconst as the patch info.
8610 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8611 info_type = addr->inst_c1;
8612 info_data = addr->inst_p0;
8614 info_type = addr->inst_right->inst_c1;
8615 info_data = addr->inst_right->inst_left;
8618 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8619 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8624 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8628 /* End of call, INS should contain the result of the call, if any */
8630 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8632 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8635 CHECK_CFG_EXCEPTION;
8639 constrained_class = NULL;
8643 case CEE_CALLVIRT: {
8644 MonoInst *addr = NULL;
8645 MonoMethodSignature *fsig = NULL;
8647 int virtual = *ip == CEE_CALLVIRT;
8648 gboolean pass_imt_from_rgctx = FALSE;
8649 MonoInst *imt_arg = NULL;
8650 MonoInst *keep_this_alive = NULL;
8651 gboolean pass_vtable = FALSE;
8652 gboolean pass_mrgctx = FALSE;
8653 MonoInst *vtable_arg = NULL;
8654 gboolean check_this = FALSE;
8655 gboolean supported_tail_call = FALSE;
8656 gboolean tail_call = FALSE;
8657 gboolean need_seq_point = FALSE;
8658 guint32 call_opcode = *ip;
8659 gboolean emit_widen = TRUE;
8660 gboolean push_res = TRUE;
8661 gboolean skip_ret = FALSE;
8662 gboolean delegate_invoke = FALSE;
8663 gboolean direct_icall = FALSE;
8664 gboolean constrained_partial_call = FALSE;
8665 MonoMethod *cil_method;
8668 token = read32 (ip + 1);
8672 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8673 cil_method = cmethod;
8675 if (constrained_class) {
8676 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8677 if (!mini_is_gsharedvt_klass (constrained_class)) {
8678 g_assert (!cmethod->klass->valuetype);
8679 if (!mini_type_is_reference (&constrained_class->byval_arg))
8680 constrained_partial_call = TRUE;
8684 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8685 if (cfg->verbose_level > 2)
8686 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8687 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8688 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8690 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8694 if (cfg->verbose_level > 2)
8695 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8697 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8699 * This is needed since get_method_constrained can't find
8700 * the method in klass representing a type var.
8701 * The type var is guaranteed to be a reference type in this
8704 if (!mini_is_gsharedvt_klass (constrained_class))
8705 g_assert (!cmethod->klass->valuetype);
8707 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8713 if (!cmethod || mono_loader_get_last_error ())
8715 if (!dont_verify && !cfg->skip_visibility) {
8716 MonoMethod *target_method = cil_method;
8717 if (method->is_inflated) {
8718 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8720 if (!mono_method_can_access_method (method_definition, target_method) &&
8721 !mono_method_can_access_method (method, cil_method))
8722 METHOD_ACCESS_FAILURE (method, cil_method);
8725 if (mono_security_core_clr_enabled ())
8726 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8728 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8729 /* MS.NET seems to silently convert this to a callvirt */
8734 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8735 * converts to a callvirt.
8737 * tests/bug-515884.il is an example of this behavior
8739 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8740 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8741 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8745 if (!cmethod->klass->inited)
8746 if (!mono_class_init (cmethod->klass))
8747 TYPE_LOAD_ERROR (cmethod->klass);
8749 fsig = mono_method_signature (cmethod);
8752 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8753 mini_class_is_system_array (cmethod->klass)) {
8754 array_rank = cmethod->klass->rank;
8755 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8756 direct_icall = TRUE;
8757 } else if (fsig->pinvoke) {
8758 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8759 fsig = mono_method_signature (wrapper);
8760 } else if (constrained_class) {
8762 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8766 /* See code below */
8767 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8768 MonoBasicBlock *tbb;
8770 GET_BBLOCK (cfg, tbb, ip + 5);
8771 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8773 * We want to extend the try block to cover the call, but we can't do it if the
8774 * call is made directly since its followed by an exception check.
8776 direct_icall = FALSE;
8780 mono_save_token_info (cfg, image, token, cil_method);
8782 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8783 need_seq_point = TRUE;
8785 /* Don't support calls made using type arguments for now */
8787 if (cfg->gsharedvt) {
8788 if (mini_is_gsharedvt_signature (fsig))
8789 GSHAREDVT_FAILURE (*ip);
8793 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8794 g_assert_not_reached ();
8796 n = fsig->param_count + fsig->hasthis;
8798 if (!cfg->gshared && cmethod->klass->generic_container)
8802 g_assert (!mono_method_check_context_used (cmethod));
8806 //g_assert (!virtual || fsig->hasthis);
8810 if (constrained_class) {
8811 if (mini_is_gsharedvt_klass (constrained_class)) {
8812 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8813 /* The 'Own method' case below */
8814 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8815 /* 'The type parameter is instantiated as a reference type' case below. */
8817 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8818 CHECK_CFG_EXCEPTION;
8825 * We have the `constrained.' prefix opcode.
8827 if (constrained_partial_call) {
8828 gboolean need_box = TRUE;
8831 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8832 * called method is not known at compile time either. The called method could end up being
8833 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8834 * to box the receiver.
8835 * A simple solution would be to box always and make a normal virtual call, but that would
8836 * be bad performance wise.
8838 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8840 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8845 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8846 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8847 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8848 ins->klass = constrained_class;
8849 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8850 CHECK_CFG_EXCEPTION;
8851 } else if (need_box) {
8853 MonoBasicBlock *is_ref_bb, *end_bb;
8854 MonoInst *nonbox_call;
8857 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8859 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8860 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8862 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8864 NEW_BBLOCK (cfg, is_ref_bb);
8865 NEW_BBLOCK (cfg, end_bb);
8867 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8872 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8874 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8877 MONO_START_BB (cfg, is_ref_bb);
8878 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8879 ins->klass = constrained_class;
8880 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8881 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8883 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8885 MONO_START_BB (cfg, end_bb);
8888 nonbox_call->dreg = ins->dreg;
8891 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8892 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8893 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8896 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8898 * The type parameter is instantiated as a valuetype,
8899 * but that type doesn't override the method we're
8900 * calling, so we need to box `this'.
8902 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8903 ins->klass = constrained_class;
8904 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8905 CHECK_CFG_EXCEPTION;
8906 } else if (!constrained_class->valuetype) {
8907 int dreg = alloc_ireg_ref (cfg);
8910 * The type parameter is instantiated as a reference
8911 * type. We have a managed pointer on the stack, so
8912 * we need to dereference it here.
8914 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8915 ins->type = STACK_OBJ;
8918 if (cmethod->klass->valuetype) {
8921 /* Interface method */
8924 mono_class_setup_vtable (constrained_class);
8925 CHECK_TYPELOAD (constrained_class);
8926 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8928 TYPE_LOAD_ERROR (constrained_class);
8929 slot = mono_method_get_vtable_slot (cmethod);
8931 TYPE_LOAD_ERROR (cmethod->klass);
8932 cmethod = constrained_class->vtable [ioffset + slot];
8934 if (cmethod->klass == mono_defaults.enum_class) {
8935 /* Enum implements some interfaces, so treat this as the first case */
8936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8937 ins->klass = constrained_class;
8938 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8939 CHECK_CFG_EXCEPTION;
8944 constrained_class = NULL;
8947 if (check_call_signature (cfg, fsig, sp))
8950 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8951 delegate_invoke = TRUE;
8953 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8954 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8955 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8963 * If the callee is a shared method, then its static cctor
8964 * might not get called after the call was patched.
8966 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8967 emit_class_init (cfg, cmethod->klass);
8968 CHECK_TYPELOAD (cmethod->klass);
8971 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8974 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8976 context_used = mini_method_check_context_used (cfg, cmethod);
8978 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8979 /* Generic method interface
8980 calls are resolved via a
8981 helper function and don't
8983 if (!cmethod_context || !cmethod_context->method_inst)
8984 pass_imt_from_rgctx = TRUE;
8988 * If a shared method calls another
8989 * shared method then the caller must
8990 * have a generic sharing context
8991 * because the magic trampoline
8992 * requires it. FIXME: We shouldn't
8993 * have to force the vtable/mrgctx
8994 * variable here. Instead there
8995 * should be a flag in the cfg to
8996 * request a generic sharing context.
8999 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9000 mono_get_vtable_var (cfg);
9005 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9007 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9009 CHECK_TYPELOAD (cmethod->klass);
9010 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9015 g_assert (!vtable_arg);
9017 if (!cfg->compile_aot) {
9019 * emit_get_rgctx_method () calls mono_class_vtable () so check
9020 * for type load errors before.
9022 mono_class_setup_vtable (cmethod->klass);
9023 CHECK_TYPELOAD (cmethod->klass);
9026 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9028 /* !marshalbyref is needed to properly handle generic methods + remoting */
9029 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9030 MONO_METHOD_IS_FINAL (cmethod)) &&
9031 !mono_class_is_marshalbyref (cmethod->klass)) {
9038 if (pass_imt_from_rgctx) {
9039 g_assert (!pass_vtable);
9041 imt_arg = emit_get_rgctx_method (cfg, context_used,
9042 cmethod, MONO_RGCTX_INFO_METHOD);
9046 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9048 /* Calling virtual generic methods */
9049 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9050 !(MONO_METHOD_IS_FINAL (cmethod) &&
9051 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9052 fsig->generic_param_count &&
9053 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))) {
9054 MonoInst *this_temp, *this_arg_temp, *store;
9055 MonoInst *iargs [4];
9056 gboolean use_imt = FALSE;
9058 g_assert (fsig->is_inflated);
9060 /* Prevent inlining of methods that contain indirect calls */
9061 INLINE_FAILURE ("virtual generic call");
9063 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9064 GSHAREDVT_FAILURE (*ip);
9066 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9067 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9072 g_assert (!imt_arg);
9074 g_assert (cmethod->is_inflated);
9075 imt_arg = emit_get_rgctx_method (cfg, context_used,
9076 cmethod, MONO_RGCTX_INFO_METHOD);
9077 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9079 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9080 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9081 MONO_ADD_INS (cfg->cbb, store);
9083 /* FIXME: This should be a managed pointer */
9084 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9086 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9087 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9088 cmethod, MONO_RGCTX_INFO_METHOD);
9089 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9090 addr = mono_emit_jit_icall (cfg,
9091 mono_helper_compile_generic_method, iargs);
9093 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9095 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9102 * Implement a workaround for the inherent races involved in locking:
9108 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9109 * try block, the Exit () won't be executed, see:
9110 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9111 * To work around this, we extend such try blocks to include the last x bytes
9112 * of the Monitor.Enter () call.
9114 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9115 MonoBasicBlock *tbb;
9117 GET_BBLOCK (cfg, tbb, ip + 5);
9119 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9120 * from Monitor.Enter like ArgumentNullException.
9122 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9123 /* Mark this bblock as needing to be extended */
9124 tbb->extend_try_block = TRUE;
9128 /* Conversion to a JIT intrinsic */
9129 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9130 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9131 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9138 if ((cfg->opt & MONO_OPT_INLINE) &&
9139 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9140 mono_method_check_inlining (cfg, cmethod)) {
9142 gboolean always = FALSE;
9144 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9145 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9146 /* Prevent inlining of methods that call wrappers */
9147 INLINE_FAILURE ("wrapper call");
9148 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9152 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9154 cfg->real_offset += 5;
9156 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9157 /* *sp is already set by inline_method */
9162 inline_costs += costs;
9168 /* Tail recursion elimination */
9169 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9170 gboolean has_vtargs = FALSE;
9173 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9174 INLINE_FAILURE ("tail call");
9176 /* keep it simple */
9177 for (i = fsig->param_count - 1; i >= 0; i--) {
9178 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9183 for (i = 0; i < n; ++i)
9184 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9185 MONO_INST_NEW (cfg, ins, OP_BR);
9186 MONO_ADD_INS (cfg->cbb, ins);
9187 tblock = start_bblock->out_bb [0];
9188 link_bblock (cfg, cfg->cbb, tblock);
9189 ins->inst_target_bb = tblock;
9190 start_new_bblock = 1;
9192 /* skip the CEE_RET, too */
9193 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9200 inline_costs += 10 * num_calls++;
9203 * Making generic calls out of gsharedvt methods.
9204 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9205 * patching gshared method addresses into a gsharedvt method.
9207 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9208 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9209 MonoRgctxInfoType info_type;
9212 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9213 //GSHAREDVT_FAILURE (*ip);
9214 // disable for possible remoting calls
9215 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9216 GSHAREDVT_FAILURE (*ip);
9217 if (fsig->generic_param_count) {
9218 /* virtual generic call */
9219 g_assert (!imt_arg);
9220 /* Same as the virtual generic case above */
9221 imt_arg = emit_get_rgctx_method (cfg, context_used,
9222 cmethod, MONO_RGCTX_INFO_METHOD);
9223 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9225 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9226 /* This can happen when we call a fully instantiated iface method */
9227 imt_arg = emit_get_rgctx_method (cfg, context_used,
9228 cmethod, MONO_RGCTX_INFO_METHOD);
9233 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9234 keep_this_alive = sp [0];
9236 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9237 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9239 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9240 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9242 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9246 /* Generic sharing */
9249 * Use this if the callee is gsharedvt sharable too, since
9250 * at runtime we might find an instantiation so the call cannot
9251 * be patched (the 'no_patch' code path in mini-trampolines.c).
9253 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9254 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9255 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9256 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9257 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9258 INLINE_FAILURE ("gshared");
9260 g_assert (cfg->gshared && cmethod);
9264 * We are compiling a call to a
9265 * generic method from shared code,
9266 * which means that we have to look up
9267 * the method in the rgctx and do an
9271 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9273 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9274 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9278 /* Direct calls to icalls */
9280 MonoMethod *wrapper;
9283 /* Inline the wrapper */
9284 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9286 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9287 g_assert (costs > 0);
9288 cfg->real_offset += 5;
9290 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9291 /* *sp is already set by inline_method */
9296 inline_costs += costs;
9305 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9306 MonoInst *val = sp [fsig->param_count];
9308 if (val->type == STACK_OBJ) {
9309 MonoInst *iargs [2];
9314 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9317 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9318 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9319 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9320 emit_write_barrier (cfg, addr, val);
9321 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9322 GSHAREDVT_FAILURE (*ip);
9323 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9324 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9326 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9327 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9328 if (!cmethod->klass->element_class->valuetype && !readonly)
9329 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9330 CHECK_TYPELOAD (cmethod->klass);
9333 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9336 g_assert_not_reached ();
9343 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9347 /* Tail prefix / tail call optimization */
9349 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9350 /* FIXME: runtime generic context pointer for jumps? */
9351 /* FIXME: handle this for generic sharing eventually */
9352 if ((ins_flag & MONO_INST_TAILCALL) &&
9353 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9354 supported_tail_call = TRUE;
9356 if (supported_tail_call) {
9359 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9360 INLINE_FAILURE ("tail call");
9362 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9364 if (ARCH_HAVE_OP_TAIL_CALL) {
9365 /* Handle tail calls similarly to normal calls */
9368 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9370 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9371 call->tail_call = TRUE;
9372 call->method = cmethod;
9373 call->signature = mono_method_signature (cmethod);
9376 * We implement tail calls by storing the actual arguments into the
9377 * argument variables, then emitting a CEE_JMP.
9379 for (i = 0; i < n; ++i) {
9380 /* Prevent argument from being register allocated */
9381 arg_array [i]->flags |= MONO_INST_VOLATILE;
9382 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9384 ins = (MonoInst*)call;
9385 ins->inst_p0 = cmethod;
9386 ins->inst_p1 = arg_array [0];
9387 MONO_ADD_INS (cfg->cbb, ins);
9388 link_bblock (cfg, cfg->cbb, end_bblock);
9389 start_new_bblock = 1;
9391 // FIXME: Eliminate unreachable epilogs
9394 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9395 * only reachable from this call.
9397 GET_BBLOCK (cfg, tblock, ip + 5);
9398 if (tblock == cfg->cbb || tblock->in_count == 0)
9407 * Synchronized wrappers.
9408 * Its hard to determine where to replace a method with its synchronized
9409 * wrapper without causing an infinite recursion. The current solution is
9410 * to add the synchronized wrapper in the trampolines, and to
9411 * change the called method to a dummy wrapper, and resolve that wrapper
9412 * to the real method in mono_jit_compile_method ().
9414 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9415 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9416 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9417 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9421 INLINE_FAILURE ("call");
9422 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9423 imt_arg, vtable_arg);
9426 link_bblock (cfg, cfg->cbb, end_bblock);
9427 start_new_bblock = 1;
9429 // FIXME: Eliminate unreachable epilogs
9432 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9433 * only reachable from this call.
9435 GET_BBLOCK (cfg, tblock, ip + 5);
9436 if (tblock == cfg->cbb || tblock->in_count == 0)
9443 /* End of call, INS should contain the result of the call, if any */
9445 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9448 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9453 if (keep_this_alive) {
9454 MonoInst *dummy_use;
9456 /* See mono_emit_method_call_full () */
9457 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9460 CHECK_CFG_EXCEPTION;
9464 g_assert (*ip == CEE_RET);
9468 constrained_class = NULL;
9470 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9474 if (cfg->method != method) {
9475 /* return from inlined method */
9477 * If in_count == 0, that means the ret is unreachable due to
9478 * being preceeded by a throw. In that case, inline_method () will
9479 * handle setting the return value
9480 * (test case: test_0_inline_throw ()).
9482 if (return_var && cfg->cbb->in_count) {
9483 MonoType *ret_type = mono_method_signature (method)->ret;
9489 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9492 //g_assert (returnvar != -1);
9493 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9494 cfg->ret_var_set = TRUE;
9497 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9499 if (cfg->lmf_var && cfg->cbb->in_count)
9503 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9505 if (seq_points && !sym_seq_points) {
9507 * Place a seq point here too even through the IL stack is not
9508 * empty, so a step over on
9511 * will work correctly.
9513 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9514 MONO_ADD_INS (cfg->cbb, ins);
9517 g_assert (!return_var);
9521 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9524 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9527 if (!cfg->vret_addr) {
9530 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9532 EMIT_NEW_RETLOADA (cfg, ret_addr);
9534 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9535 ins->klass = mono_class_from_mono_type (ret_type);
9538 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9539 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9540 MonoInst *iargs [1];
9544 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9545 mono_arch_emit_setret (cfg, method, conv);
9547 mono_arch_emit_setret (cfg, method, *sp);
9550 mono_arch_emit_setret (cfg, method, *sp);
9555 if (sp != stack_start)
9557 MONO_INST_NEW (cfg, ins, OP_BR);
9559 ins->inst_target_bb = end_bblock;
9560 MONO_ADD_INS (cfg->cbb, ins);
9561 link_bblock (cfg, cfg->cbb, end_bblock);
9562 start_new_bblock = 1;
9566 MONO_INST_NEW (cfg, ins, OP_BR);
9568 target = ip + 1 + (signed char)(*ip);
9570 GET_BBLOCK (cfg, tblock, target);
9571 link_bblock (cfg, cfg->cbb, tblock);
9572 ins->inst_target_bb = tblock;
9573 if (sp != stack_start) {
9574 handle_stack_args (cfg, stack_start, sp - stack_start);
9576 CHECK_UNVERIFIABLE (cfg);
9578 MONO_ADD_INS (cfg->cbb, ins);
9579 start_new_bblock = 1;
9580 inline_costs += BRANCH_COST;
9594 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9596 target = ip + 1 + *(signed char*)ip;
9602 inline_costs += BRANCH_COST;
9606 MONO_INST_NEW (cfg, ins, OP_BR);
9609 target = ip + 4 + (gint32)read32(ip);
9611 GET_BBLOCK (cfg, tblock, target);
9612 link_bblock (cfg, cfg->cbb, tblock);
9613 ins->inst_target_bb = tblock;
9614 if (sp != stack_start) {
9615 handle_stack_args (cfg, stack_start, sp - stack_start);
9617 CHECK_UNVERIFIABLE (cfg);
9620 MONO_ADD_INS (cfg->cbb, ins);
9622 start_new_bblock = 1;
9623 inline_costs += BRANCH_COST;
9630 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9631 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9632 guint32 opsize = is_short ? 1 : 4;
9634 CHECK_OPSIZE (opsize);
9636 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9639 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9644 GET_BBLOCK (cfg, tblock, target);
9645 link_bblock (cfg, cfg->cbb, tblock);
9646 GET_BBLOCK (cfg, tblock, ip);
9647 link_bblock (cfg, cfg->cbb, tblock);
9649 if (sp != stack_start) {
9650 handle_stack_args (cfg, stack_start, sp - stack_start);
9651 CHECK_UNVERIFIABLE (cfg);
9654 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9655 cmp->sreg1 = sp [0]->dreg;
9656 type_from_op (cfg, cmp, sp [0], NULL);
9659 #if SIZEOF_REGISTER == 4
9660 if (cmp->opcode == OP_LCOMPARE_IMM) {
9661 /* Convert it to OP_LCOMPARE */
9662 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9663 ins->type = STACK_I8;
9664 ins->dreg = alloc_dreg (cfg, STACK_I8);
9666 MONO_ADD_INS (cfg->cbb, ins);
9667 cmp->opcode = OP_LCOMPARE;
9668 cmp->sreg2 = ins->dreg;
9671 MONO_ADD_INS (cfg->cbb, cmp);
9673 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9674 type_from_op (cfg, ins, sp [0], NULL);
9675 MONO_ADD_INS (cfg->cbb, ins);
9676 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9677 GET_BBLOCK (cfg, tblock, target);
9678 ins->inst_true_bb = tblock;
9679 GET_BBLOCK (cfg, tblock, ip);
9680 ins->inst_false_bb = tblock;
9681 start_new_bblock = 2;
9684 inline_costs += BRANCH_COST;
9699 MONO_INST_NEW (cfg, ins, *ip);
9701 target = ip + 4 + (gint32)read32(ip);
9707 inline_costs += BRANCH_COST;
9711 MonoBasicBlock **targets;
9712 MonoBasicBlock *default_bblock;
9713 MonoJumpInfoBBTable *table;
9714 int offset_reg = alloc_preg (cfg);
9715 int target_reg = alloc_preg (cfg);
9716 int table_reg = alloc_preg (cfg);
9717 int sum_reg = alloc_preg (cfg);
9718 gboolean use_op_switch;
9722 n = read32 (ip + 1);
9725 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9729 CHECK_OPSIZE (n * sizeof (guint32));
9730 target = ip + n * sizeof (guint32);
9732 GET_BBLOCK (cfg, default_bblock, target);
9733 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9735 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9736 for (i = 0; i < n; ++i) {
9737 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9738 targets [i] = tblock;
9739 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9743 if (sp != stack_start) {
9745 * Link the current bb with the targets as well, so handle_stack_args
9746 * will set their in_stack correctly.
9748 link_bblock (cfg, cfg->cbb, default_bblock);
9749 for (i = 0; i < n; ++i)
9750 link_bblock (cfg, cfg->cbb, targets [i]);
9752 handle_stack_args (cfg, stack_start, sp - stack_start);
9754 CHECK_UNVERIFIABLE (cfg);
9756 /* Undo the links */
9757 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9758 for (i = 0; i < n; ++i)
9759 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9762 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9765 for (i = 0; i < n; ++i)
9766 link_bblock (cfg, cfg->cbb, targets [i]);
9768 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9769 table->table = targets;
9770 table->table_size = n;
9772 use_op_switch = FALSE;
9774 /* ARM implements SWITCH statements differently */
9775 /* FIXME: Make it use the generic implementation */
9776 if (!cfg->compile_aot)
9777 use_op_switch = TRUE;
9780 if (COMPILE_LLVM (cfg))
9781 use_op_switch = TRUE;
9783 cfg->cbb->has_jump_table = 1;
9785 if (use_op_switch) {
9786 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9787 ins->sreg1 = src1->dreg;
9788 ins->inst_p0 = table;
9789 ins->inst_many_bb = targets;
9790 ins->klass = GUINT_TO_POINTER (n);
9791 MONO_ADD_INS (cfg->cbb, ins);
9793 if (sizeof (gpointer) == 8)
9794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9798 #if SIZEOF_REGISTER == 8
9799 /* The upper word might not be zero, and we add it to a 64 bit address later */
9800 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9803 if (cfg->compile_aot) {
9804 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9806 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9807 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9808 ins->inst_p0 = table;
9809 ins->dreg = table_reg;
9810 MONO_ADD_INS (cfg->cbb, ins);
9813 /* FIXME: Use load_memindex */
9814 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9816 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9818 start_new_bblock = 1;
9819 inline_costs += (BRANCH_COST * 2);
9839 dreg = alloc_freg (cfg);
9842 dreg = alloc_lreg (cfg);
9845 dreg = alloc_ireg_ref (cfg);
9848 dreg = alloc_preg (cfg);
9851 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9852 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9853 if (*ip == CEE_LDIND_R4)
9854 ins->type = cfg->r4_stack_type;
9855 ins->flags |= ins_flag;
9856 MONO_ADD_INS (cfg->cbb, ins);
9858 if (ins_flag & MONO_INST_VOLATILE) {
9859 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9860 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9876 if (ins_flag & MONO_INST_VOLATILE) {
9877 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9878 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9881 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9882 ins->flags |= ins_flag;
9885 MONO_ADD_INS (cfg->cbb, ins);
9887 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9888 emit_write_barrier (cfg, sp [0], sp [1]);
9897 MONO_INST_NEW (cfg, ins, (*ip));
9899 ins->sreg1 = sp [0]->dreg;
9900 ins->sreg2 = sp [1]->dreg;
9901 type_from_op (cfg, ins, sp [0], sp [1]);
9903 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9905 /* Use the immediate opcodes if possible */
9906 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9907 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9908 if (imm_opcode != -1) {
9909 ins->opcode = imm_opcode;
9910 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9913 NULLIFY_INS (sp [1]);
9917 MONO_ADD_INS ((cfg)->cbb, (ins));
9919 *sp++ = mono_decompose_opcode (cfg, ins);
9936 MONO_INST_NEW (cfg, ins, (*ip));
9938 ins->sreg1 = sp [0]->dreg;
9939 ins->sreg2 = sp [1]->dreg;
9940 type_from_op (cfg, ins, sp [0], sp [1]);
9942 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9943 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9945 /* FIXME: Pass opcode to is_inst_imm */
9947 /* Use the immediate opcodes if possible */
9948 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9951 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9952 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9953 /* Keep emulated opcodes which are optimized away later */
9954 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9955 imm_opcode = mono_op_to_op_imm (ins->opcode);
9958 if (imm_opcode != -1) {
9959 ins->opcode = imm_opcode;
9960 if (sp [1]->opcode == OP_I8CONST) {
9961 #if SIZEOF_REGISTER == 8
9962 ins->inst_imm = sp [1]->inst_l;
9964 ins->inst_ls_word = sp [1]->inst_ls_word;
9965 ins->inst_ms_word = sp [1]->inst_ms_word;
9969 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9972 /* Might be followed by an instruction added by add_widen_op */
9973 if (sp [1]->next == NULL)
9974 NULLIFY_INS (sp [1]);
9977 MONO_ADD_INS ((cfg)->cbb, (ins));
9979 *sp++ = mono_decompose_opcode (cfg, ins);
9992 case CEE_CONV_OVF_I8:
9993 case CEE_CONV_OVF_U8:
9997 /* Special case this earlier so we have long constants in the IR */
9998 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9999 int data = sp [-1]->inst_c0;
10000 sp [-1]->opcode = OP_I8CONST;
10001 sp [-1]->type = STACK_I8;
10002 #if SIZEOF_REGISTER == 8
10003 if ((*ip) == CEE_CONV_U8)
10004 sp [-1]->inst_c0 = (guint32)data;
10006 sp [-1]->inst_c0 = data;
10008 sp [-1]->inst_ls_word = data;
10009 if ((*ip) == CEE_CONV_U8)
10010 sp [-1]->inst_ms_word = 0;
10012 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10014 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10021 case CEE_CONV_OVF_I4:
10022 case CEE_CONV_OVF_I1:
10023 case CEE_CONV_OVF_I2:
10024 case CEE_CONV_OVF_I:
10025 case CEE_CONV_OVF_U:
10028 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10029 ADD_UNOP (CEE_CONV_OVF_I8);
10036 case CEE_CONV_OVF_U1:
10037 case CEE_CONV_OVF_U2:
10038 case CEE_CONV_OVF_U4:
10041 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10042 ADD_UNOP (CEE_CONV_OVF_U8);
10049 case CEE_CONV_OVF_I1_UN:
10050 case CEE_CONV_OVF_I2_UN:
10051 case CEE_CONV_OVF_I4_UN:
10052 case CEE_CONV_OVF_I8_UN:
10053 case CEE_CONV_OVF_U1_UN:
10054 case CEE_CONV_OVF_U2_UN:
10055 case CEE_CONV_OVF_U4_UN:
10056 case CEE_CONV_OVF_U8_UN:
10057 case CEE_CONV_OVF_I_UN:
10058 case CEE_CONV_OVF_U_UN:
10065 CHECK_CFG_EXCEPTION;
10069 case CEE_ADD_OVF_UN:
10071 case CEE_MUL_OVF_UN:
10073 case CEE_SUB_OVF_UN:
10079 GSHAREDVT_FAILURE (*ip);
10082 token = read32 (ip + 1);
10083 klass = mini_get_class (method, token, generic_context);
10084 CHECK_TYPELOAD (klass);
10086 if (generic_class_is_reference_type (cfg, klass)) {
10087 MonoInst *store, *load;
10088 int dreg = alloc_ireg_ref (cfg);
10090 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10091 load->flags |= ins_flag;
10092 MONO_ADD_INS (cfg->cbb, load);
10094 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10095 store->flags |= ins_flag;
10096 MONO_ADD_INS (cfg->cbb, store);
10098 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10099 emit_write_barrier (cfg, sp [0], sp [1]);
10101 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10107 int loc_index = -1;
10113 token = read32 (ip + 1);
10114 klass = mini_get_class (method, token, generic_context);
10115 CHECK_TYPELOAD (klass);
10117 /* Optimize the common ldobj+stloc combination */
10120 loc_index = ip [6];
10127 loc_index = ip [5] - CEE_STLOC_0;
10134 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10135 CHECK_LOCAL (loc_index);
10137 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10138 ins->dreg = cfg->locals [loc_index]->dreg;
10139 ins->flags |= ins_flag;
10142 if (ins_flag & MONO_INST_VOLATILE) {
10143 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10144 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10150 /* Optimize the ldobj+stobj combination */
10151 /* The reference case ends up being a load+store anyway */
10152 /* Skip this if the operation is volatile. */
10153 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10158 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10166 ins->flags |= ins_flag;
10169 if (ins_flag & MONO_INST_VOLATILE) {
10170 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10171 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10180 CHECK_STACK_OVF (1);
10182 n = read32 (ip + 1);
10184 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10185 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10186 ins->type = STACK_OBJ;
10189 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10190 MonoInst *iargs [1];
10191 char *str = mono_method_get_wrapper_data (method, n);
10193 if (cfg->compile_aot)
10194 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10196 EMIT_NEW_PCONST (cfg, iargs [0], str);
10197 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10199 if (cfg->opt & MONO_OPT_SHARED) {
10200 MonoInst *iargs [3];
10202 if (cfg->compile_aot) {
10203 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10205 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10206 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10207 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10208 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10209 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10211 if (cfg->cbb->out_of_line) {
10212 MonoInst *iargs [2];
10214 if (image == mono_defaults.corlib) {
10216 * Avoid relocations in AOT and save some space by using a
10217 * version of helper_ldstr specialized to mscorlib.
10219 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10220 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10222 /* Avoid creating the string object */
10223 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10224 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10225 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10229 if (cfg->compile_aot) {
10230 NEW_LDSTRCONST (cfg, ins, image, n);
10232 MONO_ADD_INS (cfg->cbb, ins);
10235 NEW_PCONST (cfg, ins, NULL);
10236 ins->type = STACK_OBJ;
10237 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10239 OUT_OF_MEMORY_FAILURE;
10242 MONO_ADD_INS (cfg->cbb, ins);
10251 MonoInst *iargs [2];
10252 MonoMethodSignature *fsig;
10255 MonoInst *vtable_arg = NULL;
10258 token = read32 (ip + 1);
10259 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10260 if (!cmethod || mono_loader_get_last_error ())
10262 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10265 mono_save_token_info (cfg, image, token, cmethod);
10267 if (!mono_class_init (cmethod->klass))
10268 TYPE_LOAD_ERROR (cmethod->klass);
10270 context_used = mini_method_check_context_used (cfg, cmethod);
10272 if (mono_security_core_clr_enabled ())
10273 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10275 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10276 emit_class_init (cfg, cmethod->klass);
10277 CHECK_TYPELOAD (cmethod->klass);
10281 if (cfg->gsharedvt) {
10282 if (mini_is_gsharedvt_variable_signature (sig))
10283 GSHAREDVT_FAILURE (*ip);
10287 n = fsig->param_count;
10291 * Generate smaller code for the common newobj <exception> instruction in
10292 * argument checking code.
10294 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10295 is_exception_class (cmethod->klass) && n <= 2 &&
10296 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10297 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10298 MonoInst *iargs [3];
10302 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10305 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10308 iargs [1] = sp [0];
10309 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10312 iargs [1] = sp [0];
10313 iargs [2] = sp [1];
10314 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10317 g_assert_not_reached ();
10325 /* move the args to allow room for 'this' in the first position */
10331 /* check_call_signature () requires sp[0] to be set */
10332 this_ins.type = STACK_OBJ;
10333 sp [0] = &this_ins;
10334 if (check_call_signature (cfg, fsig, sp))
10339 if (mini_class_is_system_array (cmethod->klass)) {
10340 *sp = emit_get_rgctx_method (cfg, context_used,
10341 cmethod, MONO_RGCTX_INFO_METHOD);
10343 /* Avoid varargs in the common case */
10344 if (fsig->param_count == 1)
10345 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10346 else if (fsig->param_count == 2)
10347 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10348 else if (fsig->param_count == 3)
10349 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10350 else if (fsig->param_count == 4)
10351 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10353 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10354 } else if (cmethod->string_ctor) {
10355 g_assert (!context_used);
10356 g_assert (!vtable_arg);
10357 /* we simply pass a null pointer */
10358 EMIT_NEW_PCONST (cfg, *sp, NULL);
10359 /* now call the string ctor */
10360 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10362 if (cmethod->klass->valuetype) {
10363 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10364 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10365 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10370 * The code generated by mini_emit_virtual_call () expects
10371 * iargs [0] to be a boxed instance, but luckily the vcall
10372 * will be transformed into a normal call there.
10374 } else if (context_used) {
10375 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10378 MonoVTable *vtable = NULL;
10380 if (!cfg->compile_aot)
10381 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10382 CHECK_TYPELOAD (cmethod->klass);
10385 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10386 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10387 * As a workaround, we call class cctors before allocating objects.
10389 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10390 emit_class_init (cfg, cmethod->klass);
10391 if (cfg->verbose_level > 2)
10392 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10393 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10396 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10399 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10402 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10404 /* Now call the actual ctor */
10405 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10406 CHECK_CFG_EXCEPTION;
10409 if (alloc == NULL) {
10411 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10412 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10420 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10421 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10424 case CEE_CASTCLASS:
10428 token = read32 (ip + 1);
10429 klass = mini_get_class (method, token, generic_context);
10430 CHECK_TYPELOAD (klass);
10431 if (sp [0]->type != STACK_OBJ)
10434 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10435 CHECK_CFG_EXCEPTION;
10444 token = read32 (ip + 1);
10445 klass = mini_get_class (method, token, generic_context);
10446 CHECK_TYPELOAD (klass);
10447 if (sp [0]->type != STACK_OBJ)
10450 context_used = mini_class_check_context_used (cfg, klass);
10452 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10453 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10454 MonoInst *args [3];
10461 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10464 if (cfg->compile_aot) {
10465 idx = get_castclass_cache_idx (cfg);
10466 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10468 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10471 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10474 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10475 MonoMethod *mono_isinst;
10476 MonoInst *iargs [1];
10479 mono_isinst = mono_marshal_get_isinst (klass);
10480 iargs [0] = sp [0];
10482 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10483 iargs, ip, cfg->real_offset, TRUE);
10484 CHECK_CFG_EXCEPTION;
10485 g_assert (costs > 0);
10488 cfg->real_offset += 5;
10492 inline_costs += costs;
10495 ins = handle_isinst (cfg, klass, *sp, context_used);
10496 CHECK_CFG_EXCEPTION;
10502 case CEE_UNBOX_ANY: {
10503 MonoInst *res, *addr;
10508 token = read32 (ip + 1);
10509 klass = mini_get_class (method, token, generic_context);
10510 CHECK_TYPELOAD (klass);
10512 mono_save_token_info (cfg, image, token, klass);
10514 context_used = mini_class_check_context_used (cfg, klass);
10516 if (mini_is_gsharedvt_klass (klass)) {
10517 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10519 } else if (generic_class_is_reference_type (cfg, klass)) {
10520 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10521 CHECK_CFG_EXCEPTION;
10522 } else if (mono_class_is_nullable (klass)) {
10523 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10525 addr = handle_unbox (cfg, klass, sp, context_used);
10527 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10538 MonoClass *enum_class;
10539 MonoMethod *has_flag;
10545 token = read32 (ip + 1);
10546 klass = mini_get_class (method, token, generic_context);
10547 CHECK_TYPELOAD (klass);
10549 mono_save_token_info (cfg, image, token, klass);
10551 context_used = mini_class_check_context_used (cfg, klass);
10553 if (generic_class_is_reference_type (cfg, klass)) {
10559 if (klass == mono_defaults.void_class)
10561 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10563 /* frequent check in generic code: box (struct), brtrue */
10568 * <push int/long ptr>
10571 * constrained. MyFlags
10572 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10574 * If we find this sequence and the operand types on box and constrained
10575 * are equal, we can emit a specialized instruction sequence instead of
10576 * the very slow HasFlag () call.
10578 if ((cfg->opt & MONO_OPT_INTRINS) &&
10579 /* Cheap checks first. */
10580 ip + 5 + 6 + 5 < end &&
10581 ip [5] == CEE_PREFIX1 &&
10582 ip [6] == CEE_CONSTRAINED_ &&
10583 ip [11] == CEE_CALLVIRT &&
10584 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10585 mono_class_is_enum (klass) &&
10586 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10587 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10588 has_flag->klass == mono_defaults.enum_class &&
10589 !strcmp (has_flag->name, "HasFlag") &&
10590 has_flag->signature->hasthis &&
10591 has_flag->signature->param_count == 1) {
10592 CHECK_TYPELOAD (enum_class);
10594 if (enum_class == klass) {
10595 MonoInst *enum_this, *enum_flag;
10600 enum_this = sp [0];
10601 enum_flag = sp [1];
10603 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10608 // FIXME: LLVM can't handle the inconsistent bb linking
10609 if (!mono_class_is_nullable (klass) &&
10610 !mini_is_gsharedvt_klass (klass) &&
10611 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10612 (ip [5] == CEE_BRTRUE ||
10613 ip [5] == CEE_BRTRUE_S ||
10614 ip [5] == CEE_BRFALSE ||
10615 ip [5] == CEE_BRFALSE_S)) {
10616 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10618 MonoBasicBlock *true_bb, *false_bb;
10622 if (cfg->verbose_level > 3) {
10623 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10624 printf ("<box+brtrue opt>\n");
10629 case CEE_BRFALSE_S:
10632 target = ip + 1 + (signed char)(*ip);
10639 target = ip + 4 + (gint)(read32 (ip));
10643 g_assert_not_reached ();
10647 * We need to link both bblocks, since it is needed for handling stack
10648 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10649 * Branching to only one of them would lead to inconsistencies, so
10650 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10652 GET_BBLOCK (cfg, true_bb, target);
10653 GET_BBLOCK (cfg, false_bb, ip);
10655 mono_link_bblock (cfg, cfg->cbb, true_bb);
10656 mono_link_bblock (cfg, cfg->cbb, false_bb);
10658 if (sp != stack_start) {
10659 handle_stack_args (cfg, stack_start, sp - stack_start);
10661 CHECK_UNVERIFIABLE (cfg);
10664 if (COMPILE_LLVM (cfg)) {
10665 dreg = alloc_ireg (cfg);
10666 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10669 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10671 /* The JIT can't eliminate the iconst+compare */
10672 MONO_INST_NEW (cfg, ins, OP_BR);
10673 ins->inst_target_bb = is_true ? true_bb : false_bb;
10674 MONO_ADD_INS (cfg->cbb, ins);
10677 start_new_bblock = 1;
10681 *sp++ = handle_box (cfg, val, klass, context_used);
10683 CHECK_CFG_EXCEPTION;
10692 token = read32 (ip + 1);
10693 klass = mini_get_class (method, token, generic_context);
10694 CHECK_TYPELOAD (klass);
10696 mono_save_token_info (cfg, image, token, klass);
10698 context_used = mini_class_check_context_used (cfg, klass);
10700 if (mono_class_is_nullable (klass)) {
10703 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10704 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10708 ins = handle_unbox (cfg, klass, sp, context_used);
10721 MonoClassField *field;
10722 #ifndef DISABLE_REMOTING
10726 gboolean is_instance;
10728 gpointer addr = NULL;
10729 gboolean is_special_static;
10731 MonoInst *store_val = NULL;
10732 MonoInst *thread_ins;
10735 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10737 if (op == CEE_STFLD) {
10740 store_val = sp [1];
10745 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10747 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10750 if (op == CEE_STSFLD) {
10753 store_val = sp [0];
10758 token = read32 (ip + 1);
10759 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10760 field = mono_method_get_wrapper_data (method, token);
10761 klass = field->parent;
10764 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10767 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10768 FIELD_ACCESS_FAILURE (method, field);
10769 mono_class_init (klass);
10771 /* if the class is Critical then transparent code cannot access it's fields */
10772 if (!is_instance && mono_security_core_clr_enabled ())
10773 ensure_method_is_allowed_to_access_field (cfg, method, field);
10775 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10776 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10777 if (mono_security_core_clr_enabled ())
10778 ensure_method_is_allowed_to_access_field (cfg, method, field);
10781 ftype = mono_field_get_type (field);
10784 * LDFLD etc. is usable on static fields as well, so convert those cases to
10787 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10799 g_assert_not_reached ();
10801 is_instance = FALSE;
10804 context_used = mini_class_check_context_used (cfg, klass);
10806 /* INSTANCE CASE */
10808 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10809 if (op == CEE_STFLD) {
10810 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10812 #ifndef DISABLE_REMOTING
10813 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10814 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10815 MonoInst *iargs [5];
10817 GSHAREDVT_FAILURE (op);
10819 iargs [0] = sp [0];
10820 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10821 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10822 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10824 iargs [4] = sp [1];
10826 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10827 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10828 iargs, ip, cfg->real_offset, TRUE);
10829 CHECK_CFG_EXCEPTION;
10830 g_assert (costs > 0);
10832 cfg->real_offset += 5;
10834 inline_costs += costs;
10836 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10843 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10845 if (mini_is_gsharedvt_klass (klass)) {
10846 MonoInst *offset_ins;
10848 context_used = mini_class_check_context_used (cfg, klass);
10850 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10851 dreg = alloc_ireg_mp (cfg);
10852 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10853 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10854 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10856 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10858 if (sp [0]->opcode != OP_LDADDR)
10859 store->flags |= MONO_INST_FAULT;
10861 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10862 /* insert call to write barrier */
10866 dreg = alloc_ireg_mp (cfg);
10867 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10868 emit_write_barrier (cfg, ptr, sp [1]);
10871 store->flags |= ins_flag;
10878 #ifndef DISABLE_REMOTING
10879 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10880 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10881 MonoInst *iargs [4];
10883 GSHAREDVT_FAILURE (op);
10885 iargs [0] = sp [0];
10886 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10887 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10888 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10889 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10890 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10891 iargs, ip, cfg->real_offset, TRUE);
10892 CHECK_CFG_EXCEPTION;
10893 g_assert (costs > 0);
10895 cfg->real_offset += 5;
10899 inline_costs += costs;
10901 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10907 if (sp [0]->type == STACK_VTYPE) {
10910 /* Have to compute the address of the variable */
10912 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10914 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10916 g_assert (var->klass == klass);
10918 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10922 if (op == CEE_LDFLDA) {
10923 if (sp [0]->type == STACK_OBJ) {
10924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10925 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10928 dreg = alloc_ireg_mp (cfg);
10930 if (mini_is_gsharedvt_klass (klass)) {
10931 MonoInst *offset_ins;
10933 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10934 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10936 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10938 ins->klass = mono_class_from_mono_type (field->type);
10939 ins->type = STACK_MP;
10944 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10946 if (mini_is_gsharedvt_klass (klass)) {
10947 MonoInst *offset_ins;
10949 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10950 dreg = alloc_ireg_mp (cfg);
10951 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10952 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10956 load->flags |= ins_flag;
10957 if (sp [0]->opcode != OP_LDADDR)
10958 load->flags |= MONO_INST_FAULT;
10970 context_used = mini_class_check_context_used (cfg, klass);
10972 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10975 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10976 * to be called here.
10978 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10979 mono_class_vtable (cfg->domain, klass);
10980 CHECK_TYPELOAD (klass);
10982 mono_domain_lock (cfg->domain);
10983 if (cfg->domain->special_static_fields)
10984 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10985 mono_domain_unlock (cfg->domain);
10987 is_special_static = mono_class_field_is_special_static (field);
10989 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10990 thread_ins = mono_get_thread_intrinsic (cfg);
10994 /* Generate IR to compute the field address */
10995 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10997 * Fast access to TLS data
10998 * Inline version of get_thread_static_data () in
11002 int idx, static_data_reg, array_reg, dreg;
11004 GSHAREDVT_FAILURE (op);
11006 MONO_ADD_INS (cfg->cbb, thread_ins);
11007 static_data_reg = alloc_ireg (cfg);
11008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11010 if (cfg->compile_aot) {
11011 int offset_reg, offset2_reg, idx_reg;
11013 /* For TLS variables, this will return the TLS offset */
11014 EMIT_NEW_SFLDACONST (cfg, ins, field);
11015 offset_reg = ins->dreg;
11016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11017 idx_reg = alloc_ireg (cfg);
11018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11019 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11020 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11021 array_reg = alloc_ireg (cfg);
11022 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11023 offset2_reg = alloc_ireg (cfg);
11024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11026 dreg = alloc_ireg (cfg);
11027 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11029 offset = (gsize)addr & 0x7fffffff;
11030 idx = offset & 0x3f;
11032 array_reg = alloc_ireg (cfg);
11033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11034 dreg = alloc_ireg (cfg);
11035 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11037 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11038 (cfg->compile_aot && is_special_static) ||
11039 (context_used && is_special_static)) {
11040 MonoInst *iargs [2];
11042 g_assert (field->parent);
11043 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11044 if (context_used) {
11045 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11046 field, MONO_RGCTX_INFO_CLASS_FIELD);
11048 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11050 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11051 } else if (context_used) {
11052 MonoInst *static_data;
11055 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11056 method->klass->name_space, method->klass->name, method->name,
11057 depth, field->offset);
11060 if (mono_class_needs_cctor_run (klass, method))
11061 emit_class_init (cfg, klass);
11064 * The pointer we're computing here is
11066 * super_info.static_data + field->offset
11068 static_data = emit_get_rgctx_klass (cfg, context_used,
11069 klass, MONO_RGCTX_INFO_STATIC_DATA);
11071 if (mini_is_gsharedvt_klass (klass)) {
11072 MonoInst *offset_ins;
11074 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11075 dreg = alloc_ireg_mp (cfg);
11076 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11077 } else if (field->offset == 0) {
11080 int addr_reg = mono_alloc_preg (cfg);
11081 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11083 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11084 MonoInst *iargs [2];
11086 g_assert (field->parent);
11087 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11088 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11089 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11091 MonoVTable *vtable = NULL;
11093 if (!cfg->compile_aot)
11094 vtable = mono_class_vtable (cfg->domain, klass);
11095 CHECK_TYPELOAD (klass);
11098 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11099 if (!(g_slist_find (class_inits, klass))) {
11100 emit_class_init (cfg, klass);
11101 if (cfg->verbose_level > 2)
11102 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11103 class_inits = g_slist_prepend (class_inits, klass);
11106 if (cfg->run_cctors) {
11108 /* This makes so that inline cannot trigger */
11109 /* .cctors: too many apps depend on them */
11110 /* running with a specific order... */
11112 if (! vtable->initialized)
11113 INLINE_FAILURE ("class init");
11114 ex = mono_runtime_class_init_full (vtable, FALSE);
11116 set_exception_object (cfg, ex);
11117 goto exception_exit;
11121 if (cfg->compile_aot)
11122 EMIT_NEW_SFLDACONST (cfg, ins, field);
11125 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11127 EMIT_NEW_PCONST (cfg, ins, addr);
11130 MonoInst *iargs [1];
11131 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11132 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11136 /* Generate IR to do the actual load/store operation */
11138 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11139 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11140 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11143 if (op == CEE_LDSFLDA) {
11144 ins->klass = mono_class_from_mono_type (ftype);
11145 ins->type = STACK_PTR;
11147 } else if (op == CEE_STSFLD) {
11150 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11151 store->flags |= ins_flag;
11153 gboolean is_const = FALSE;
11154 MonoVTable *vtable = NULL;
11155 gpointer addr = NULL;
11157 if (!context_used) {
11158 vtable = mono_class_vtable (cfg->domain, klass);
11159 CHECK_TYPELOAD (klass);
11161 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11162 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11163 int ro_type = ftype->type;
11165 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11166 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11167 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11170 GSHAREDVT_FAILURE (op);
11172 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11175 case MONO_TYPE_BOOLEAN:
11177 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11181 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11184 case MONO_TYPE_CHAR:
11186 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11190 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11195 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11199 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11204 case MONO_TYPE_PTR:
11205 case MONO_TYPE_FNPTR:
11206 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11207 type_to_eval_stack_type ((cfg), field->type, *sp);
11210 case MONO_TYPE_STRING:
11211 case MONO_TYPE_OBJECT:
11212 case MONO_TYPE_CLASS:
11213 case MONO_TYPE_SZARRAY:
11214 case MONO_TYPE_ARRAY:
11215 if (!mono_gc_is_moving ()) {
11216 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11217 type_to_eval_stack_type ((cfg), field->type, *sp);
11225 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11230 case MONO_TYPE_VALUETYPE:
11240 CHECK_STACK_OVF (1);
11242 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11243 load->flags |= ins_flag;
11249 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11250 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11251 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11262 token = read32 (ip + 1);
11263 klass = mini_get_class (method, token, generic_context);
11264 CHECK_TYPELOAD (klass);
11265 if (ins_flag & MONO_INST_VOLATILE) {
11266 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11267 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11269 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11270 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11271 ins->flags |= ins_flag;
11272 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11273 generic_class_is_reference_type (cfg, klass)) {
11274 /* insert call to write barrier */
11275 emit_write_barrier (cfg, sp [0], sp [1]);
11287 const char *data_ptr;
11289 guint32 field_token;
11295 token = read32 (ip + 1);
11297 klass = mini_get_class (method, token, generic_context);
11298 CHECK_TYPELOAD (klass);
11300 context_used = mini_class_check_context_used (cfg, klass);
11302 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11303 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11304 ins->sreg1 = sp [0]->dreg;
11305 ins->type = STACK_I4;
11306 ins->dreg = alloc_ireg (cfg);
11307 MONO_ADD_INS (cfg->cbb, ins);
11308 *sp = mono_decompose_opcode (cfg, ins);
11311 if (context_used) {
11312 MonoInst *args [3];
11313 MonoClass *array_class = mono_array_class_get (klass, 1);
11314 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11316 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11319 args [0] = emit_get_rgctx_klass (cfg, context_used,
11320 array_class, MONO_RGCTX_INFO_VTABLE);
11325 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11327 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11329 if (cfg->opt & MONO_OPT_SHARED) {
11330 /* Decompose now to avoid problems with references to the domainvar */
11331 MonoInst *iargs [3];
11333 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11334 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11335 iargs [2] = sp [0];
11337 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11339 /* Decompose later since it is needed by abcrem */
11340 MonoClass *array_type = mono_array_class_get (klass, 1);
11341 mono_class_vtable (cfg->domain, array_type);
11342 CHECK_TYPELOAD (array_type);
11344 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11345 ins->dreg = alloc_ireg_ref (cfg);
11346 ins->sreg1 = sp [0]->dreg;
11347 ins->inst_newa_class = klass;
11348 ins->type = STACK_OBJ;
11349 ins->klass = array_type;
11350 MONO_ADD_INS (cfg->cbb, ins);
11351 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11352 cfg->cbb->has_array_access = TRUE;
11354 /* Needed so mono_emit_load_get_addr () gets called */
11355 mono_get_got_var (cfg);
11365 * we inline/optimize the initialization sequence if possible.
11366 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11367 * for small sizes open code the memcpy
11368 * ensure the rva field is big enough
11370 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11371 MonoMethod *memcpy_method = get_memcpy_method ();
11372 MonoInst *iargs [3];
11373 int add_reg = alloc_ireg_mp (cfg);
11375 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11376 if (cfg->compile_aot) {
11377 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11379 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11381 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11382 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11391 if (sp [0]->type != STACK_OBJ)
11394 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11395 ins->dreg = alloc_preg (cfg);
11396 ins->sreg1 = sp [0]->dreg;
11397 ins->type = STACK_I4;
11398 /* This flag will be inherited by the decomposition */
11399 ins->flags |= MONO_INST_FAULT;
11400 MONO_ADD_INS (cfg->cbb, ins);
11401 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11402 cfg->cbb->has_array_access = TRUE;
11410 if (sp [0]->type != STACK_OBJ)
11413 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11415 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11416 CHECK_TYPELOAD (klass);
11417 /* we need to make sure that this array is exactly the type it needs
11418 * to be for correctness. the wrappers are lax with their usage
11419 * so we need to ignore them here
11421 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11422 MonoClass *array_class = mono_array_class_get (klass, 1);
11423 mini_emit_check_array_type (cfg, sp [0], array_class);
11424 CHECK_TYPELOAD (array_class);
11428 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11433 case CEE_LDELEM_I1:
11434 case CEE_LDELEM_U1:
11435 case CEE_LDELEM_I2:
11436 case CEE_LDELEM_U2:
11437 case CEE_LDELEM_I4:
11438 case CEE_LDELEM_U4:
11439 case CEE_LDELEM_I8:
11441 case CEE_LDELEM_R4:
11442 case CEE_LDELEM_R8:
11443 case CEE_LDELEM_REF: {
11449 if (*ip == CEE_LDELEM) {
11451 token = read32 (ip + 1);
11452 klass = mini_get_class (method, token, generic_context);
11453 CHECK_TYPELOAD (klass);
11454 mono_class_init (klass);
11457 klass = array_access_to_klass (*ip);
11459 if (sp [0]->type != STACK_OBJ)
11462 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11464 if (mini_is_gsharedvt_variable_klass (klass)) {
11465 // FIXME-VT: OP_ICONST optimization
11466 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11467 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11468 ins->opcode = OP_LOADV_MEMBASE;
11469 } else if (sp [1]->opcode == OP_ICONST) {
11470 int array_reg = sp [0]->dreg;
11471 int index_reg = sp [1]->dreg;
11472 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11474 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11475 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11477 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11478 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11481 if (*ip == CEE_LDELEM)
11488 case CEE_STELEM_I1:
11489 case CEE_STELEM_I2:
11490 case CEE_STELEM_I4:
11491 case CEE_STELEM_I8:
11492 case CEE_STELEM_R4:
11493 case CEE_STELEM_R8:
11494 case CEE_STELEM_REF:
11499 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11501 if (*ip == CEE_STELEM) {
11503 token = read32 (ip + 1);
11504 klass = mini_get_class (method, token, generic_context);
11505 CHECK_TYPELOAD (klass);
11506 mono_class_init (klass);
11509 klass = array_access_to_klass (*ip);
11511 if (sp [0]->type != STACK_OBJ)
11514 emit_array_store (cfg, klass, sp, TRUE);
11516 if (*ip == CEE_STELEM)
11523 case CEE_CKFINITE: {
11527 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11528 ins->sreg1 = sp [0]->dreg;
11529 ins->dreg = alloc_freg (cfg);
11530 ins->type = STACK_R8;
11531 MONO_ADD_INS (cfg->cbb, ins);
11533 *sp++ = mono_decompose_opcode (cfg, ins);
11538 case CEE_REFANYVAL: {
11539 MonoInst *src_var, *src;
11541 int klass_reg = alloc_preg (cfg);
11542 int dreg = alloc_preg (cfg);
11544 GSHAREDVT_FAILURE (*ip);
11547 MONO_INST_NEW (cfg, ins, *ip);
11550 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11551 CHECK_TYPELOAD (klass);
11553 context_used = mini_class_check_context_used (cfg, klass);
11556 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11558 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11559 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11562 if (context_used) {
11563 MonoInst *klass_ins;
11565 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11566 klass, MONO_RGCTX_INFO_KLASS);
11569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11570 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11572 mini_emit_class_check (cfg, klass_reg, klass);
11574 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11575 ins->type = STACK_MP;
11576 ins->klass = klass;
11581 case CEE_MKREFANY: {
11582 MonoInst *loc, *addr;
11584 GSHAREDVT_FAILURE (*ip);
11587 MONO_INST_NEW (cfg, ins, *ip);
11590 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11591 CHECK_TYPELOAD (klass);
11593 context_used = mini_class_check_context_used (cfg, klass);
11595 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11596 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11598 if (context_used) {
11599 MonoInst *const_ins;
11600 int type_reg = alloc_preg (cfg);
11602 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11603 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11605 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11606 } else if (cfg->compile_aot) {
11607 int const_reg = alloc_preg (cfg);
11608 int type_reg = alloc_preg (cfg);
11610 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11620 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11621 ins->type = STACK_VTYPE;
11622 ins->klass = mono_defaults.typed_reference_class;
11627 case CEE_LDTOKEN: {
11629 MonoClass *handle_class;
11631 CHECK_STACK_OVF (1);
11634 n = read32 (ip + 1);
11636 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11637 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11638 handle = mono_method_get_wrapper_data (method, n);
11639 handle_class = mono_method_get_wrapper_data (method, n + 1);
11640 if (handle_class == mono_defaults.typehandle_class)
11641 handle = &((MonoClass*)handle)->byval_arg;
11644 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11649 mono_class_init (handle_class);
11650 if (cfg->gshared) {
11651 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11652 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11653 /* This case handles ldtoken
11654 of an open type, like for
11657 } else if (handle_class == mono_defaults.typehandle_class) {
11658 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11659 } else if (handle_class == mono_defaults.fieldhandle_class)
11660 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11661 else if (handle_class == mono_defaults.methodhandle_class)
11662 context_used = mini_method_check_context_used (cfg, handle);
11664 g_assert_not_reached ();
11667 if ((cfg->opt & MONO_OPT_SHARED) &&
11668 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11669 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11670 MonoInst *addr, *vtvar, *iargs [3];
11671 int method_context_used;
11673 method_context_used = mini_method_check_context_used (cfg, method);
11675 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11677 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11678 EMIT_NEW_ICONST (cfg, iargs [1], n);
11679 if (method_context_used) {
11680 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11681 method, MONO_RGCTX_INFO_METHOD);
11682 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11684 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11685 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11687 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11691 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11693 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11694 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11695 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11696 (cmethod->klass == mono_defaults.systemtype_class) &&
11697 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11698 MonoClass *tclass = mono_class_from_mono_type (handle);
11700 mono_class_init (tclass);
11701 if (context_used) {
11702 ins = emit_get_rgctx_klass (cfg, context_used,
11703 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11704 } else if (cfg->compile_aot) {
11705 if (method->wrapper_type) {
11706 mono_error_init (&error); //got to do it since there are multiple conditionals below
11707 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11708 /* Special case for static synchronized wrappers */
11709 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11711 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11712 /* FIXME: n is not a normal token */
11714 EMIT_NEW_PCONST (cfg, ins, NULL);
11717 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11720 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11722 ins->type = STACK_OBJ;
11723 ins->klass = cmethod->klass;
11726 MonoInst *addr, *vtvar;
11728 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11730 if (context_used) {
11731 if (handle_class == mono_defaults.typehandle_class) {
11732 ins = emit_get_rgctx_klass (cfg, context_used,
11733 mono_class_from_mono_type (handle),
11734 MONO_RGCTX_INFO_TYPE);
11735 } else if (handle_class == mono_defaults.methodhandle_class) {
11736 ins = emit_get_rgctx_method (cfg, context_used,
11737 handle, MONO_RGCTX_INFO_METHOD);
11738 } else if (handle_class == mono_defaults.fieldhandle_class) {
11739 ins = emit_get_rgctx_field (cfg, context_used,
11740 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11742 g_assert_not_reached ();
11744 } else if (cfg->compile_aot) {
11745 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11747 EMIT_NEW_PCONST (cfg, ins, handle);
11749 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11751 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11761 MONO_INST_NEW (cfg, ins, OP_THROW);
11763 ins->sreg1 = sp [0]->dreg;
11765 cfg->cbb->out_of_line = TRUE;
11766 MONO_ADD_INS (cfg->cbb, ins);
11767 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11768 MONO_ADD_INS (cfg->cbb, ins);
11771 link_bblock (cfg, cfg->cbb, end_bblock);
11772 start_new_bblock = 1;
11774 case CEE_ENDFINALLY:
11775 /* mono_save_seq_point_info () depends on this */
11776 if (sp != stack_start)
11777 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11778 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11779 MONO_ADD_INS (cfg->cbb, ins);
11781 start_new_bblock = 1;
11784 * Control will leave the method so empty the stack, otherwise
11785 * the next basic block will start with a nonempty stack.
11787 while (sp != stack_start) {
11792 case CEE_LEAVE_S: {
11795 if (*ip == CEE_LEAVE) {
11797 target = ip + 5 + (gint32)read32(ip + 1);
11800 target = ip + 2 + (signed char)(ip [1]);
11803 /* empty the stack */
11804 while (sp != stack_start) {
11809 * If this leave statement is in a catch block, check for a
11810 * pending exception, and rethrow it if necessary.
11811 * We avoid doing this in runtime invoke wrappers, since those are called
11812 * by native code which excepts the wrapper to catch all exceptions.
11814 for (i = 0; i < header->num_clauses; ++i) {
11815 MonoExceptionClause *clause = &header->clauses [i];
11818 * Use <= in the final comparison to handle clauses with multiple
11819 * leave statements, like in bug #78024.
11820 * The ordering of the exception clauses guarantees that we find the
11821 * innermost clause.
11823 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11825 MonoBasicBlock *dont_throw;
11830 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11833 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11835 NEW_BBLOCK (cfg, dont_throw);
11838 * Currently, we always rethrow the abort exception, despite the
11839 * fact that this is not correct. See thread6.cs for an example.
11840 * But propagating the abort exception is more important than
11841 * getting the sematics right.
11843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11845 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11847 MONO_START_BB (cfg, dont_throw);
11851 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11853 MonoExceptionClause *clause;
11855 for (tmp = handlers; tmp; tmp = tmp->next) {
11856 clause = tmp->data;
11857 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11859 link_bblock (cfg, cfg->cbb, tblock);
11860 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11861 ins->inst_target_bb = tblock;
11862 ins->inst_eh_block = clause;
11863 MONO_ADD_INS (cfg->cbb, ins);
11864 cfg->cbb->has_call_handler = 1;
11865 if (COMPILE_LLVM (cfg)) {
11866 MonoBasicBlock *target_bb;
11869 * Link the finally bblock with the target, since it will
11870 * conceptually branch there.
11871 * FIXME: Have to link the bblock containing the endfinally.
11873 GET_BBLOCK (cfg, target_bb, target);
11874 link_bblock (cfg, tblock, target_bb);
11877 g_list_free (handlers);
11880 MONO_INST_NEW (cfg, ins, OP_BR);
11881 MONO_ADD_INS (cfg->cbb, ins);
11882 GET_BBLOCK (cfg, tblock, target);
11883 link_bblock (cfg, cfg->cbb, tblock);
11884 ins->inst_target_bb = tblock;
11885 start_new_bblock = 1;
11887 if (*ip == CEE_LEAVE)
11896 * Mono specific opcodes
11898 case MONO_CUSTOM_PREFIX: {
11900 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11904 case CEE_MONO_ICALL: {
11906 MonoJitICallInfo *info;
11908 token = read32 (ip + 2);
11909 func = mono_method_get_wrapper_data (method, token);
11910 info = mono_find_jit_icall_by_addr (func);
11912 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11915 CHECK_STACK (info->sig->param_count);
11916 sp -= info->sig->param_count;
11918 ins = mono_emit_jit_icall (cfg, info->func, sp);
11919 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11923 inline_costs += 10 * num_calls++;
11927 case CEE_MONO_LDPTR_CARD_TABLE: {
11929 gpointer card_mask;
11930 CHECK_STACK_OVF (1);
11932 if (cfg->compile_aot)
11933 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11935 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
11939 inline_costs += 10 * num_calls++;
11942 case CEE_MONO_LDPTR_NURSERY_START: {
11945 CHECK_STACK_OVF (1);
11947 if (cfg->compile_aot)
11948 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11950 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
11954 inline_costs += 10 * num_calls++;
11957 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11958 CHECK_STACK_OVF (1);
11960 if (cfg->compile_aot)
11961 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11963 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
11967 inline_costs += 10 * num_calls++;
11970 case CEE_MONO_LDPTR: {
11973 CHECK_STACK_OVF (1);
11975 token = read32 (ip + 2);
11977 ptr = mono_method_get_wrapper_data (method, token);
11978 EMIT_NEW_PCONST (cfg, ins, ptr);
11981 inline_costs += 10 * num_calls++;
11982 /* Can't embed random pointers into AOT code */
11986 case CEE_MONO_JIT_ICALL_ADDR: {
11987 MonoJitICallInfo *callinfo;
11990 CHECK_STACK_OVF (1);
11992 token = read32 (ip + 2);
11994 ptr = mono_method_get_wrapper_data (method, token);
11995 callinfo = mono_find_jit_icall_by_addr (ptr);
11996 g_assert (callinfo);
11997 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12000 inline_costs += 10 * num_calls++;
12003 case CEE_MONO_ICALL_ADDR: {
12004 MonoMethod *cmethod;
12007 CHECK_STACK_OVF (1);
12009 token = read32 (ip + 2);
12011 cmethod = mono_method_get_wrapper_data (method, token);
12013 if (cfg->compile_aot) {
12014 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12016 ptr = mono_lookup_internal_call (cmethod);
12018 EMIT_NEW_PCONST (cfg, ins, ptr);
12024 case CEE_MONO_VTADDR: {
12025 MonoInst *src_var, *src;
12031 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12032 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12037 case CEE_MONO_NEWOBJ: {
12038 MonoInst *iargs [2];
12040 CHECK_STACK_OVF (1);
12042 token = read32 (ip + 2);
12043 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12044 mono_class_init (klass);
12045 NEW_DOMAINCONST (cfg, iargs [0]);
12046 MONO_ADD_INS (cfg->cbb, iargs [0]);
12047 NEW_CLASSCONST (cfg, iargs [1], klass);
12048 MONO_ADD_INS (cfg->cbb, iargs [1]);
12049 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12051 inline_costs += 10 * num_calls++;
12054 case CEE_MONO_OBJADDR:
12057 MONO_INST_NEW (cfg, ins, OP_MOVE);
12058 ins->dreg = alloc_ireg_mp (cfg);
12059 ins->sreg1 = sp [0]->dreg;
12060 ins->type = STACK_MP;
12061 MONO_ADD_INS (cfg->cbb, ins);
12065 case CEE_MONO_LDNATIVEOBJ:
12067 * Similar to LDOBJ, but instead load the unmanaged
12068 * representation of the vtype to the stack.
12073 token = read32 (ip + 2);
12074 klass = mono_method_get_wrapper_data (method, token);
12075 g_assert (klass->valuetype);
12076 mono_class_init (klass);
12079 MonoInst *src, *dest, *temp;
12082 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12083 temp->backend.is_pinvoke = 1;
12084 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12085 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12087 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12088 dest->type = STACK_VTYPE;
12089 dest->klass = klass;
12095 case CEE_MONO_RETOBJ: {
12097 * Same as RET, but return the native representation of a vtype
12100 g_assert (cfg->ret);
12101 g_assert (mono_method_signature (method)->pinvoke);
12106 token = read32 (ip + 2);
12107 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12109 if (!cfg->vret_addr) {
12110 g_assert (cfg->ret_var_is_local);
12112 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12114 EMIT_NEW_RETLOADA (cfg, ins);
12116 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12118 if (sp != stack_start)
12121 MONO_INST_NEW (cfg, ins, OP_BR);
12122 ins->inst_target_bb = end_bblock;
12123 MONO_ADD_INS (cfg->cbb, ins);
12124 link_bblock (cfg, cfg->cbb, end_bblock);
12125 start_new_bblock = 1;
12129 case CEE_MONO_CISINST:
12130 case CEE_MONO_CCASTCLASS: {
12135 token = read32 (ip + 2);
12136 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12137 if (ip [1] == CEE_MONO_CISINST)
12138 ins = handle_cisinst (cfg, klass, sp [0]);
12140 ins = handle_ccastclass (cfg, klass, sp [0]);
12145 case CEE_MONO_SAVE_LMF:
12146 case CEE_MONO_RESTORE_LMF:
12149 case CEE_MONO_CLASSCONST:
12150 CHECK_STACK_OVF (1);
12152 token = read32 (ip + 2);
12153 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12156 inline_costs += 10 * num_calls++;
12158 case CEE_MONO_NOT_TAKEN:
12159 cfg->cbb->out_of_line = TRUE;
12162 case CEE_MONO_TLS: {
12165 CHECK_STACK_OVF (1);
12167 key = (gint32)read32 (ip + 2);
12168 g_assert (key < TLS_KEY_NUM);
12170 ins = mono_create_tls_get (cfg, key);
12172 if (cfg->compile_aot) {
12174 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12175 ins->dreg = alloc_preg (cfg);
12176 ins->type = STACK_PTR;
12178 g_assert_not_reached ();
12181 ins->type = STACK_PTR;
12182 MONO_ADD_INS (cfg->cbb, ins);
12187 case CEE_MONO_DYN_CALL: {
12188 MonoCallInst *call;
12190 /* It would be easier to call a trampoline, but that would put an
12191 * extra frame on the stack, confusing exception handling. So
12192 * implement it inline using an opcode for now.
12195 if (!cfg->dyn_call_var) {
12196 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12197 /* prevent it from being register allocated */
12198 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12201 /* Has to use a call inst since it local regalloc expects it */
12202 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12203 ins = (MonoInst*)call;
12205 ins->sreg1 = sp [0]->dreg;
12206 ins->sreg2 = sp [1]->dreg;
12207 MONO_ADD_INS (cfg->cbb, ins);
12209 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12212 inline_costs += 10 * num_calls++;
12216 case CEE_MONO_MEMORY_BARRIER: {
12218 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12222 case CEE_MONO_JIT_ATTACH: {
12223 MonoInst *args [16], *domain_ins;
12224 MonoInst *ad_ins, *jit_tls_ins;
12225 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12227 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12229 EMIT_NEW_PCONST (cfg, ins, NULL);
12230 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12232 ad_ins = mono_get_domain_intrinsic (cfg);
12233 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12235 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12236 NEW_BBLOCK (cfg, next_bb);
12237 NEW_BBLOCK (cfg, call_bb);
12239 if (cfg->compile_aot) {
12240 /* AOT code is only used in the root domain */
12241 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12243 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12245 MONO_ADD_INS (cfg->cbb, ad_ins);
12246 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12249 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12250 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12251 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12254 MONO_START_BB (cfg, call_bb);
12257 if (cfg->compile_aot) {
12258 /* AOT code is only used in the root domain */
12259 EMIT_NEW_PCONST (cfg, args [0], NULL);
12261 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12263 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12264 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12267 MONO_START_BB (cfg, next_bb);
12271 case CEE_MONO_JIT_DETACH: {
12272 MonoInst *args [16];
12274 /* Restore the original domain */
12275 dreg = alloc_ireg (cfg);
12276 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12277 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12282 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12288 case CEE_PREFIX1: {
12291 case CEE_ARGLIST: {
12292 /* somewhat similar to LDTOKEN */
12293 MonoInst *addr, *vtvar;
12294 CHECK_STACK_OVF (1);
12295 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12297 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12298 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12300 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12301 ins->type = STACK_VTYPE;
12302 ins->klass = mono_defaults.argumenthandle_class;
12312 MonoInst *cmp, *arg1, *arg2;
12320 * The following transforms:
12321 * CEE_CEQ into OP_CEQ
12322 * CEE_CGT into OP_CGT
12323 * CEE_CGT_UN into OP_CGT_UN
12324 * CEE_CLT into OP_CLT
12325 * CEE_CLT_UN into OP_CLT_UN
12327 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12329 MONO_INST_NEW (cfg, ins, cmp->opcode);
12330 cmp->sreg1 = arg1->dreg;
12331 cmp->sreg2 = arg2->dreg;
12332 type_from_op (cfg, cmp, arg1, arg2);
12334 add_widen_op (cfg, cmp, &arg1, &arg2);
12335 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12336 cmp->opcode = OP_LCOMPARE;
12337 else if (arg1->type == STACK_R4)
12338 cmp->opcode = OP_RCOMPARE;
12339 else if (arg1->type == STACK_R8)
12340 cmp->opcode = OP_FCOMPARE;
12342 cmp->opcode = OP_ICOMPARE;
12343 MONO_ADD_INS (cfg->cbb, cmp);
12344 ins->type = STACK_I4;
12345 ins->dreg = alloc_dreg (cfg, ins->type);
12346 type_from_op (cfg, ins, arg1, arg2);
12348 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12350 * The backends expect the fceq opcodes to do the
12353 ins->sreg1 = cmp->sreg1;
12354 ins->sreg2 = cmp->sreg2;
12357 MONO_ADD_INS (cfg->cbb, ins);
12363 MonoInst *argconst;
12364 MonoMethod *cil_method;
12366 CHECK_STACK_OVF (1);
12368 n = read32 (ip + 2);
12369 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12370 if (!cmethod || mono_loader_get_last_error ())
12372 mono_class_init (cmethod->klass);
12374 mono_save_token_info (cfg, image, n, cmethod);
12376 context_used = mini_method_check_context_used (cfg, cmethod);
12378 cil_method = cmethod;
12379 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12380 METHOD_ACCESS_FAILURE (method, cil_method);
12382 if (mono_security_core_clr_enabled ())
12383 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12386 * Optimize the common case of ldftn+delegate creation
12388 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12389 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12390 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12391 MonoInst *target_ins, *handle_ins;
12392 MonoMethod *invoke;
12393 int invoke_context_used;
12395 invoke = mono_get_delegate_invoke (ctor_method->klass);
12396 if (!invoke || !mono_method_signature (invoke))
12399 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12401 target_ins = sp [-1];
12403 if (mono_security_core_clr_enabled ())
12404 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12406 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12407 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12408 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12410 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12414 /* FIXME: SGEN support */
12415 if (invoke_context_used == 0) {
12417 if (cfg->verbose_level > 3)
12418 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12419 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12422 CHECK_CFG_EXCEPTION;
12432 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12433 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12437 inline_costs += 10 * num_calls++;
12440 case CEE_LDVIRTFTN: {
12441 MonoInst *args [2];
12445 n = read32 (ip + 2);
12446 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12447 if (!cmethod || mono_loader_get_last_error ())
12449 mono_class_init (cmethod->klass);
12451 context_used = mini_method_check_context_used (cfg, cmethod);
12453 if (mono_security_core_clr_enabled ())
12454 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12457 * Optimize the common case of ldvirtftn+delegate creation
12459 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12460 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12461 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12462 MonoInst *target_ins, *handle_ins;
12463 MonoMethod *invoke;
12464 int invoke_context_used;
12465 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12467 invoke = mono_get_delegate_invoke (ctor_method->klass);
12468 if (!invoke || !mono_method_signature (invoke))
12471 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12473 target_ins = sp [-1];
12475 if (mono_security_core_clr_enabled ())
12476 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12478 /* FIXME: SGEN support */
12479 if (invoke_context_used == 0) {
12481 if (cfg->verbose_level > 3)
12482 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12483 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12486 CHECK_CFG_EXCEPTION;
12499 args [1] = emit_get_rgctx_method (cfg, context_used,
12500 cmethod, MONO_RGCTX_INFO_METHOD);
12503 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12505 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12508 inline_costs += 10 * num_calls++;
12512 CHECK_STACK_OVF (1);
12514 n = read16 (ip + 2);
12516 EMIT_NEW_ARGLOAD (cfg, ins, n);
12521 CHECK_STACK_OVF (1);
12523 n = read16 (ip + 2);
12525 NEW_ARGLOADA (cfg, ins, n);
12526 MONO_ADD_INS (cfg->cbb, ins);
12534 n = read16 (ip + 2);
12536 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12538 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12542 CHECK_STACK_OVF (1);
12544 n = read16 (ip + 2);
12546 EMIT_NEW_LOCLOAD (cfg, ins, n);
12551 unsigned char *tmp_ip;
12552 CHECK_STACK_OVF (1);
12554 n = read16 (ip + 2);
12557 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12563 EMIT_NEW_LOCLOADA (cfg, ins, n);
12572 n = read16 (ip + 2);
12574 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12576 emit_stloc_ir (cfg, sp, header, n);
12583 if (sp != stack_start)
12585 if (cfg->method != method)
12587 * Inlining this into a loop in a parent could lead to
12588 * stack overflows which is different behavior than the
12589 * non-inlined case, thus disable inlining in this case.
12591 INLINE_FAILURE("localloc");
12593 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12594 ins->dreg = alloc_preg (cfg);
12595 ins->sreg1 = sp [0]->dreg;
12596 ins->type = STACK_PTR;
12597 MONO_ADD_INS (cfg->cbb, ins);
12599 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12601 ins->flags |= MONO_INST_INIT;
12606 case CEE_ENDFILTER: {
12607 MonoExceptionClause *clause, *nearest;
12612 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12614 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12615 ins->sreg1 = (*sp)->dreg;
12616 MONO_ADD_INS (cfg->cbb, ins);
12617 start_new_bblock = 1;
12621 for (cc = 0; cc < header->num_clauses; ++cc) {
12622 clause = &header->clauses [cc];
12623 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12624 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12625 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12628 g_assert (nearest);
12629 if ((ip - header->code) != nearest->handler_offset)
12634 case CEE_UNALIGNED_:
12635 ins_flag |= MONO_INST_UNALIGNED;
12636 /* FIXME: record alignment? we can assume 1 for now */
12640 case CEE_VOLATILE_:
12641 ins_flag |= MONO_INST_VOLATILE;
12645 ins_flag |= MONO_INST_TAILCALL;
12646 cfg->flags |= MONO_CFG_HAS_TAIL;
12647 /* Can't inline tail calls at this time */
12648 inline_costs += 100000;
12655 token = read32 (ip + 2);
12656 klass = mini_get_class (method, token, generic_context);
12657 CHECK_TYPELOAD (klass);
12658 if (generic_class_is_reference_type (cfg, klass))
12659 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12661 mini_emit_initobj (cfg, *sp, NULL, klass);
12665 case CEE_CONSTRAINED_:
12667 token = read32 (ip + 2);
12668 constrained_class = mini_get_class (method, token, generic_context);
12669 CHECK_TYPELOAD (constrained_class);
12673 case CEE_INITBLK: {
12674 MonoInst *iargs [3];
12678 /* Skip optimized paths for volatile operations. */
12679 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12680 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12681 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12682 /* emit_memset only works when val == 0 */
12683 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12686 iargs [0] = sp [0];
12687 iargs [1] = sp [1];
12688 iargs [2] = sp [2];
12689 if (ip [1] == CEE_CPBLK) {
12691 * FIXME: It's unclear whether we should be emitting both the acquire
12692 * and release barriers for cpblk. It is technically both a load and
12693 * store operation, so it seems like that's the sensible thing to do.
12695 * FIXME: We emit full barriers on both sides of the operation for
12696 * simplicity. We should have a separate atomic memcpy method instead.
12698 MonoMethod *memcpy_method = get_memcpy_method ();
12700 if (ins_flag & MONO_INST_VOLATILE)
12701 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12703 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12704 call->flags |= ins_flag;
12706 if (ins_flag & MONO_INST_VOLATILE)
12707 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12709 MonoMethod *memset_method = get_memset_method ();
12710 if (ins_flag & MONO_INST_VOLATILE) {
12711 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12712 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12714 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12715 call->flags |= ins_flag;
12726 ins_flag |= MONO_INST_NOTYPECHECK;
12728 ins_flag |= MONO_INST_NORANGECHECK;
12729 /* we ignore the no-nullcheck for now since we
12730 * really do it explicitly only when doing callvirt->call
12734 case CEE_RETHROW: {
12736 int handler_offset = -1;
12738 for (i = 0; i < header->num_clauses; ++i) {
12739 MonoExceptionClause *clause = &header->clauses [i];
12740 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12741 handler_offset = clause->handler_offset;
12746 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12748 if (handler_offset == -1)
12751 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12752 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12753 ins->sreg1 = load->dreg;
12754 MONO_ADD_INS (cfg->cbb, ins);
12756 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12757 MONO_ADD_INS (cfg->cbb, ins);
12760 link_bblock (cfg, cfg->cbb, end_bblock);
12761 start_new_bblock = 1;
12769 CHECK_STACK_OVF (1);
12771 token = read32 (ip + 2);
12772 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12773 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12776 val = mono_type_size (type, &ialign);
12778 MonoClass *klass = mini_get_class (method, token, generic_context);
12779 CHECK_TYPELOAD (klass);
12781 val = mono_type_size (&klass->byval_arg, &ialign);
12783 if (mini_is_gsharedvt_klass (klass))
12784 GSHAREDVT_FAILURE (*ip);
12786 EMIT_NEW_ICONST (cfg, ins, val);
12791 case CEE_REFANYTYPE: {
12792 MonoInst *src_var, *src;
12794 GSHAREDVT_FAILURE (*ip);
12800 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12802 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12803 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12804 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12809 case CEE_READONLY_:
12822 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12832 g_warning ("opcode 0x%02x not handled", *ip);
12836 if (start_new_bblock != 1)
12839 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12840 if (cfg->cbb->next_bb) {
12841 /* This could already be set because of inlining, #693905 */
12842 MonoBasicBlock *bb = cfg->cbb;
12844 while (bb->next_bb)
12846 bb->next_bb = end_bblock;
12848 cfg->cbb->next_bb = end_bblock;
12851 if (cfg->method == method && cfg->domainvar) {
12853 MonoInst *get_domain;
12855 cfg->cbb = init_localsbb;
12857 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12858 MONO_ADD_INS (cfg->cbb, get_domain);
12860 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12862 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12863 MONO_ADD_INS (cfg->cbb, store);
12866 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12867 if (cfg->compile_aot)
12868 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12869 mono_get_got_var (cfg);
12872 if (cfg->method == method && cfg->got_var)
12873 mono_emit_load_got_addr (cfg);
12875 if (init_localsbb) {
12876 cfg->cbb = init_localsbb;
12878 for (i = 0; i < header->num_locals; ++i) {
12879 emit_init_local (cfg, i, header->locals [i], init_locals);
12883 if (cfg->init_ref_vars && cfg->method == method) {
12884 /* Emit initialization for ref vars */
12885 // FIXME: Avoid duplication initialization for IL locals.
12886 for (i = 0; i < cfg->num_varinfo; ++i) {
12887 MonoInst *ins = cfg->varinfo [i];
12889 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12890 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12894 if (cfg->lmf_var && cfg->method == method) {
12895 cfg->cbb = init_localsbb;
12896 emit_push_lmf (cfg);
12899 cfg->cbb = init_localsbb;
12900 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12903 MonoBasicBlock *bb;
12906 * Make seq points at backward branch targets interruptable.
12908 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12909 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12910 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12913 /* Add a sequence point for method entry/exit events */
12914 if (seq_points && cfg->gen_sdb_seq_points) {
12915 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12916 MONO_ADD_INS (init_localsbb, ins);
12917 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12918 MONO_ADD_INS (cfg->bb_exit, ins);
12922 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12923 * the code they refer to was dead (#11880).
12925 if (sym_seq_points) {
12926 for (i = 0; i < header->code_size; ++i) {
12927 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12930 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12931 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12938 if (cfg->method == method) {
12939 MonoBasicBlock *bb;
12940 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12941 bb->region = mono_find_block_region (cfg, bb->real_offset);
12943 mono_create_spvar_for_region (cfg, bb->region);
12944 if (cfg->verbose_level > 2)
12945 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12949 if (inline_costs < 0) {
12952 /* Method is too large */
12953 mname = mono_method_full_name (method, TRUE);
12954 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12955 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12959 if ((cfg->verbose_level > 2) && (cfg->method == method))
12960 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12965 g_assert (!mono_error_ok (&cfg->error));
12969 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12973 set_exception_type_from_invalid_il (cfg, method, ip);
12977 g_slist_free (class_inits);
12978 mono_basic_block_free (original_bb);
12979 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12980 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12981 if (cfg->exception_type)
12984 return inline_costs;
12988 store_membase_reg_to_store_membase_imm (int opcode)
12991 case OP_STORE_MEMBASE_REG:
12992 return OP_STORE_MEMBASE_IMM;
12993 case OP_STOREI1_MEMBASE_REG:
12994 return OP_STOREI1_MEMBASE_IMM;
12995 case OP_STOREI2_MEMBASE_REG:
12996 return OP_STOREI2_MEMBASE_IMM;
12997 case OP_STOREI4_MEMBASE_REG:
12998 return OP_STOREI4_MEMBASE_IMM;
12999 case OP_STOREI8_MEMBASE_REG:
13000 return OP_STOREI8_MEMBASE_IMM;
13002 g_assert_not_reached ();
13009 mono_op_to_op_imm (int opcode)
13013 return OP_IADD_IMM;
13015 return OP_ISUB_IMM;
13017 return OP_IDIV_IMM;
13019 return OP_IDIV_UN_IMM;
13021 return OP_IREM_IMM;
13023 return OP_IREM_UN_IMM;
13025 return OP_IMUL_IMM;
13027 return OP_IAND_IMM;
13031 return OP_IXOR_IMM;
13033 return OP_ISHL_IMM;
13035 return OP_ISHR_IMM;
13037 return OP_ISHR_UN_IMM;
13040 return OP_LADD_IMM;
13042 return OP_LSUB_IMM;
13044 return OP_LAND_IMM;
13048 return OP_LXOR_IMM;
13050 return OP_LSHL_IMM;
13052 return OP_LSHR_IMM;
13054 return OP_LSHR_UN_IMM;
13055 #if SIZEOF_REGISTER == 8
13057 return OP_LREM_IMM;
13061 return OP_COMPARE_IMM;
13063 return OP_ICOMPARE_IMM;
13065 return OP_LCOMPARE_IMM;
13067 case OP_STORE_MEMBASE_REG:
13068 return OP_STORE_MEMBASE_IMM;
13069 case OP_STOREI1_MEMBASE_REG:
13070 return OP_STOREI1_MEMBASE_IMM;
13071 case OP_STOREI2_MEMBASE_REG:
13072 return OP_STOREI2_MEMBASE_IMM;
13073 case OP_STOREI4_MEMBASE_REG:
13074 return OP_STOREI4_MEMBASE_IMM;
13076 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13078 return OP_X86_PUSH_IMM;
13079 case OP_X86_COMPARE_MEMBASE_REG:
13080 return OP_X86_COMPARE_MEMBASE_IMM;
13082 #if defined(TARGET_AMD64)
13083 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13084 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13086 case OP_VOIDCALL_REG:
13087 return OP_VOIDCALL;
13095 return OP_LOCALLOC_IMM;
13102 ldind_to_load_membase (int opcode)
13106 return OP_LOADI1_MEMBASE;
13108 return OP_LOADU1_MEMBASE;
13110 return OP_LOADI2_MEMBASE;
13112 return OP_LOADU2_MEMBASE;
13114 return OP_LOADI4_MEMBASE;
13116 return OP_LOADU4_MEMBASE;
13118 return OP_LOAD_MEMBASE;
13119 case CEE_LDIND_REF:
13120 return OP_LOAD_MEMBASE;
13122 return OP_LOADI8_MEMBASE;
13124 return OP_LOADR4_MEMBASE;
13126 return OP_LOADR8_MEMBASE;
13128 g_assert_not_reached ();
13135 stind_to_store_membase (int opcode)
13139 return OP_STOREI1_MEMBASE_REG;
13141 return OP_STOREI2_MEMBASE_REG;
13143 return OP_STOREI4_MEMBASE_REG;
13145 case CEE_STIND_REF:
13146 return OP_STORE_MEMBASE_REG;
13148 return OP_STOREI8_MEMBASE_REG;
13150 return OP_STORER4_MEMBASE_REG;
13152 return OP_STORER8_MEMBASE_REG;
13154 g_assert_not_reached ();
13161 mono_load_membase_to_load_mem (int opcode)
13163 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13164 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13166 case OP_LOAD_MEMBASE:
13167 return OP_LOAD_MEM;
13168 case OP_LOADU1_MEMBASE:
13169 return OP_LOADU1_MEM;
13170 case OP_LOADU2_MEMBASE:
13171 return OP_LOADU2_MEM;
13172 case OP_LOADI4_MEMBASE:
13173 return OP_LOADI4_MEM;
13174 case OP_LOADU4_MEMBASE:
13175 return OP_LOADU4_MEM;
13176 #if SIZEOF_REGISTER == 8
13177 case OP_LOADI8_MEMBASE:
13178 return OP_LOADI8_MEM;
13187 op_to_op_dest_membase (int store_opcode, int opcode)
13189 #if defined(TARGET_X86)
13190 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13195 return OP_X86_ADD_MEMBASE_REG;
13197 return OP_X86_SUB_MEMBASE_REG;
13199 return OP_X86_AND_MEMBASE_REG;
13201 return OP_X86_OR_MEMBASE_REG;
13203 return OP_X86_XOR_MEMBASE_REG;
13206 return OP_X86_ADD_MEMBASE_IMM;
13209 return OP_X86_SUB_MEMBASE_IMM;
13212 return OP_X86_AND_MEMBASE_IMM;
13215 return OP_X86_OR_MEMBASE_IMM;
13218 return OP_X86_XOR_MEMBASE_IMM;
13224 #if defined(TARGET_AMD64)
13225 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13230 return OP_X86_ADD_MEMBASE_REG;
13232 return OP_X86_SUB_MEMBASE_REG;
13234 return OP_X86_AND_MEMBASE_REG;
13236 return OP_X86_OR_MEMBASE_REG;
13238 return OP_X86_XOR_MEMBASE_REG;
13240 return OP_X86_ADD_MEMBASE_IMM;
13242 return OP_X86_SUB_MEMBASE_IMM;
13244 return OP_X86_AND_MEMBASE_IMM;
13246 return OP_X86_OR_MEMBASE_IMM;
13248 return OP_X86_XOR_MEMBASE_IMM;
13250 return OP_AMD64_ADD_MEMBASE_REG;
13252 return OP_AMD64_SUB_MEMBASE_REG;
13254 return OP_AMD64_AND_MEMBASE_REG;
13256 return OP_AMD64_OR_MEMBASE_REG;
13258 return OP_AMD64_XOR_MEMBASE_REG;
13261 return OP_AMD64_ADD_MEMBASE_IMM;
13264 return OP_AMD64_SUB_MEMBASE_IMM;
13267 return OP_AMD64_AND_MEMBASE_IMM;
13270 return OP_AMD64_OR_MEMBASE_IMM;
13273 return OP_AMD64_XOR_MEMBASE_IMM;
13283 op_to_op_store_membase (int store_opcode, int opcode)
13285 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13288 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13289 return OP_X86_SETEQ_MEMBASE;
13291 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13292 return OP_X86_SETNE_MEMBASE;
13300 op_to_op_src1_membase (int load_opcode, int opcode)
13303 /* FIXME: This has sign extension issues */
13305 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13306 return OP_X86_COMPARE_MEMBASE8_IMM;
13309 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13314 return OP_X86_PUSH_MEMBASE;
13315 case OP_COMPARE_IMM:
13316 case OP_ICOMPARE_IMM:
13317 return OP_X86_COMPARE_MEMBASE_IMM;
13320 return OP_X86_COMPARE_MEMBASE_REG;
13324 #ifdef TARGET_AMD64
13325 /* FIXME: This has sign extension issues */
13327 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13328 return OP_X86_COMPARE_MEMBASE8_IMM;
13333 #ifdef __mono_ilp32__
13334 if (load_opcode == OP_LOADI8_MEMBASE)
13336 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13338 return OP_X86_PUSH_MEMBASE;
13340 /* FIXME: This only works for 32 bit immediates
13341 case OP_COMPARE_IMM:
13342 case OP_LCOMPARE_IMM:
13343 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13344 return OP_AMD64_COMPARE_MEMBASE_IMM;
13346 case OP_ICOMPARE_IMM:
13347 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13348 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13352 #ifdef __mono_ilp32__
13353 if (load_opcode == OP_LOAD_MEMBASE)
13354 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13355 if (load_opcode == OP_LOADI8_MEMBASE)
13357 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13359 return OP_AMD64_COMPARE_MEMBASE_REG;
13362 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13363 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13372 op_to_op_src2_membase (int load_opcode, int opcode)
13375 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13381 return OP_X86_COMPARE_REG_MEMBASE;
13383 return OP_X86_ADD_REG_MEMBASE;
13385 return OP_X86_SUB_REG_MEMBASE;
13387 return OP_X86_AND_REG_MEMBASE;
13389 return OP_X86_OR_REG_MEMBASE;
13391 return OP_X86_XOR_REG_MEMBASE;
13395 #ifdef TARGET_AMD64
13396 #ifdef __mono_ilp32__
13397 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13399 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13403 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13405 return OP_X86_ADD_REG_MEMBASE;
13407 return OP_X86_SUB_REG_MEMBASE;
13409 return OP_X86_AND_REG_MEMBASE;
13411 return OP_X86_OR_REG_MEMBASE;
13413 return OP_X86_XOR_REG_MEMBASE;
13415 #ifdef __mono_ilp32__
13416 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13418 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13423 return OP_AMD64_COMPARE_REG_MEMBASE;
13425 return OP_AMD64_ADD_REG_MEMBASE;
13427 return OP_AMD64_SUB_REG_MEMBASE;
13429 return OP_AMD64_AND_REG_MEMBASE;
13431 return OP_AMD64_OR_REG_MEMBASE;
13433 return OP_AMD64_XOR_REG_MEMBASE;
13442 mono_op_to_op_imm_noemul (int opcode)
13445 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13451 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13458 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13463 return mono_op_to_op_imm (opcode);
13468 * mono_handle_global_vregs:
13470 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13474 mono_handle_global_vregs (MonoCompile *cfg)
13476 gint32 *vreg_to_bb;
13477 MonoBasicBlock *bb;
13480 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13482 #ifdef MONO_ARCH_SIMD_INTRINSICS
13483 if (cfg->uses_simd_intrinsics)
13484 mono_simd_simplify_indirection (cfg);
13487 /* Find local vregs used in more than one bb */
13488 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13489 MonoInst *ins = bb->code;
13490 int block_num = bb->block_num;
13492 if (cfg->verbose_level > 2)
13493 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13496 for (; ins; ins = ins->next) {
13497 const char *spec = INS_INFO (ins->opcode);
13498 int regtype = 0, regindex;
13501 if (G_UNLIKELY (cfg->verbose_level > 2))
13502 mono_print_ins (ins);
13504 g_assert (ins->opcode >= MONO_CEE_LAST);
13506 for (regindex = 0; regindex < 4; regindex ++) {
13509 if (regindex == 0) {
13510 regtype = spec [MONO_INST_DEST];
13511 if (regtype == ' ')
13514 } else if (regindex == 1) {
13515 regtype = spec [MONO_INST_SRC1];
13516 if (regtype == ' ')
13519 } else if (regindex == 2) {
13520 regtype = spec [MONO_INST_SRC2];
13521 if (regtype == ' ')
13524 } else if (regindex == 3) {
13525 regtype = spec [MONO_INST_SRC3];
13526 if (regtype == ' ')
13531 #if SIZEOF_REGISTER == 4
13532 /* In the LLVM case, the long opcodes are not decomposed */
13533 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13535 * Since some instructions reference the original long vreg,
13536 * and some reference the two component vregs, it is quite hard
13537 * to determine when it needs to be global. So be conservative.
13539 if (!get_vreg_to_inst (cfg, vreg)) {
13540 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13542 if (cfg->verbose_level > 2)
13543 printf ("LONG VREG R%d made global.\n", vreg);
13547 * Make the component vregs volatile since the optimizations can
13548 * get confused otherwise.
13550 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13551 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13555 g_assert (vreg != -1);
13557 prev_bb = vreg_to_bb [vreg];
13558 if (prev_bb == 0) {
13559 /* 0 is a valid block num */
13560 vreg_to_bb [vreg] = block_num + 1;
13561 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13562 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13565 if (!get_vreg_to_inst (cfg, vreg)) {
13566 if (G_UNLIKELY (cfg->verbose_level > 2))
13567 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13571 if (vreg_is_ref (cfg, vreg))
13572 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13574 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13577 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13580 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13583 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13586 g_assert_not_reached ();
13590 /* Flag as having been used in more than one bb */
13591 vreg_to_bb [vreg] = -1;
13597 /* If a variable is used in only one bblock, convert it into a local vreg */
13598 for (i = 0; i < cfg->num_varinfo; i++) {
13599 MonoInst *var = cfg->varinfo [i];
13600 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13602 switch (var->type) {
13608 #if SIZEOF_REGISTER == 8
13611 #if !defined(TARGET_X86)
13612 /* Enabling this screws up the fp stack on x86 */
13615 if (mono_arch_is_soft_float ())
13618 /* Arguments are implicitly global */
13619 /* Putting R4 vars into registers doesn't work currently */
13620 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13621 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13623 * Make that the variable's liveness interval doesn't contain a call, since
13624 * that would cause the lvreg to be spilled, making the whole optimization
13627 /* This is too slow for JIT compilation */
13629 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13631 int def_index, call_index, ins_index;
13632 gboolean spilled = FALSE;
13637 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13638 const char *spec = INS_INFO (ins->opcode);
13640 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13641 def_index = ins_index;
13643 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13644 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13645 if (call_index > def_index) {
13651 if (MONO_IS_CALL (ins))
13652 call_index = ins_index;
13662 if (G_UNLIKELY (cfg->verbose_level > 2))
13663 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13664 var->flags |= MONO_INST_IS_DEAD;
13665 cfg->vreg_to_inst [var->dreg] = NULL;
13672 * Compress the varinfo and vars tables so the liveness computation is faster and
13673 * takes up less space.
13676 for (i = 0; i < cfg->num_varinfo; ++i) {
13677 MonoInst *var = cfg->varinfo [i];
13678 if (pos < i && cfg->locals_start == i)
13679 cfg->locals_start = pos;
13680 if (!(var->flags & MONO_INST_IS_DEAD)) {
13682 cfg->varinfo [pos] = cfg->varinfo [i];
13683 cfg->varinfo [pos]->inst_c0 = pos;
13684 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13685 cfg->vars [pos].idx = pos;
13686 #if SIZEOF_REGISTER == 4
13687 if (cfg->varinfo [pos]->type == STACK_I8) {
13688 /* Modify the two component vars too */
13691 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13692 var1->inst_c0 = pos;
13693 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13694 var1->inst_c0 = pos;
13701 cfg->num_varinfo = pos;
13702 if (cfg->locals_start > cfg->num_varinfo)
13703 cfg->locals_start = cfg->num_varinfo;
13707 * mono_spill_global_vars:
13709 * Generate spill code for variables which are not allocated to registers,
13710 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13711 * code is generated which could be optimized by the local optimization passes.
13714 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13716 MonoBasicBlock *bb;
13718 int orig_next_vreg;
13719 guint32 *vreg_to_lvreg;
13721 guint32 i, lvregs_len;
13722 gboolean dest_has_lvreg = FALSE;
13723 guint32 stacktypes [128];
13724 MonoInst **live_range_start, **live_range_end;
13725 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13726 int *gsharedvt_vreg_to_idx = NULL;
13728 *need_local_opts = FALSE;
13730 memset (spec2, 0, sizeof (spec2));
13732 /* FIXME: Move this function to mini.c */
13733 stacktypes ['i'] = STACK_PTR;
13734 stacktypes ['l'] = STACK_I8;
13735 stacktypes ['f'] = STACK_R8;
13736 #ifdef MONO_ARCH_SIMD_INTRINSICS
13737 stacktypes ['x'] = STACK_VTYPE;
13740 #if SIZEOF_REGISTER == 4
13741 /* Create MonoInsts for longs */
13742 for (i = 0; i < cfg->num_varinfo; i++) {
13743 MonoInst *ins = cfg->varinfo [i];
13745 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13746 switch (ins->type) {
13751 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13754 g_assert (ins->opcode == OP_REGOFFSET);
13756 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13758 tree->opcode = OP_REGOFFSET;
13759 tree->inst_basereg = ins->inst_basereg;
13760 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13762 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13764 tree->opcode = OP_REGOFFSET;
13765 tree->inst_basereg = ins->inst_basereg;
13766 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13776 if (cfg->compute_gc_maps) {
13777 /* registers need liveness info even for !non refs */
13778 for (i = 0; i < cfg->num_varinfo; i++) {
13779 MonoInst *ins = cfg->varinfo [i];
13781 if (ins->opcode == OP_REGVAR)
13782 ins->flags |= MONO_INST_GC_TRACK;
13786 if (cfg->gsharedvt) {
13787 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13789 for (i = 0; i < cfg->num_varinfo; ++i) {
13790 MonoInst *ins = cfg->varinfo [i];
13793 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13794 if (i >= cfg->locals_start) {
13796 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13797 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13798 ins->opcode = OP_GSHAREDVT_LOCAL;
13799 ins->inst_imm = idx;
13802 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13803 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13809 /* FIXME: widening and truncation */
13812 * As an optimization, when a variable allocated to the stack is first loaded into
13813 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13814 * the variable again.
13816 orig_next_vreg = cfg->next_vreg;
13817 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13818 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13822 * These arrays contain the first and last instructions accessing a given
13824 * Since we emit bblocks in the same order we process them here, and we
13825 * don't split live ranges, these will precisely describe the live range of
13826 * the variable, i.e. the instruction range where a valid value can be found
13827 * in the variables location.
13828 * The live range is computed using the liveness info computed by the liveness pass.
13829 * We can't use vmv->range, since that is an abstract live range, and we need
13830 * one which is instruction precise.
13831 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13833 /* FIXME: Only do this if debugging info is requested */
13834 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13835 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13836 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13837 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13839 /* Add spill loads/stores */
13840 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13843 if (cfg->verbose_level > 2)
13844 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13846 /* Clear vreg_to_lvreg array */
13847 for (i = 0; i < lvregs_len; i++)
13848 vreg_to_lvreg [lvregs [i]] = 0;
13852 MONO_BB_FOR_EACH_INS (bb, ins) {
13853 const char *spec = INS_INFO (ins->opcode);
13854 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13855 gboolean store, no_lvreg;
13856 int sregs [MONO_MAX_SRC_REGS];
13858 if (G_UNLIKELY (cfg->verbose_level > 2))
13859 mono_print_ins (ins);
13861 if (ins->opcode == OP_NOP)
13865 * We handle LDADDR here as well, since it can only be decomposed
13866 * when variable addresses are known.
13868 if (ins->opcode == OP_LDADDR) {
13869 MonoInst *var = ins->inst_p0;
13871 if (var->opcode == OP_VTARG_ADDR) {
13872 /* Happens on SPARC/S390 where vtypes are passed by reference */
13873 MonoInst *vtaddr = var->inst_left;
13874 if (vtaddr->opcode == OP_REGVAR) {
13875 ins->opcode = OP_MOVE;
13876 ins->sreg1 = vtaddr->dreg;
13878 else if (var->inst_left->opcode == OP_REGOFFSET) {
13879 ins->opcode = OP_LOAD_MEMBASE;
13880 ins->inst_basereg = vtaddr->inst_basereg;
13881 ins->inst_offset = vtaddr->inst_offset;
13884 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13885 /* gsharedvt arg passed by ref */
13886 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13888 ins->opcode = OP_LOAD_MEMBASE;
13889 ins->inst_basereg = var->inst_basereg;
13890 ins->inst_offset = var->inst_offset;
13891 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13892 MonoInst *load, *load2, *load3;
13893 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13894 int reg1, reg2, reg3;
13895 MonoInst *info_var = cfg->gsharedvt_info_var;
13896 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13900 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13903 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13905 g_assert (info_var);
13906 g_assert (locals_var);
13908 /* Mark the instruction used to compute the locals var as used */
13909 cfg->gsharedvt_locals_var_ins = NULL;
13911 /* Load the offset */
13912 if (info_var->opcode == OP_REGOFFSET) {
13913 reg1 = alloc_ireg (cfg);
13914 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13915 } else if (info_var->opcode == OP_REGVAR) {
13917 reg1 = info_var->dreg;
13919 g_assert_not_reached ();
13921 reg2 = alloc_ireg (cfg);
13922 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13923 /* Load the locals area address */
13924 reg3 = alloc_ireg (cfg);
13925 if (locals_var->opcode == OP_REGOFFSET) {
13926 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13927 } else if (locals_var->opcode == OP_REGVAR) {
13928 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13930 g_assert_not_reached ();
13932 /* Compute the address */
13933 ins->opcode = OP_PADD;
13937 mono_bblock_insert_before_ins (bb, ins, load3);
13938 mono_bblock_insert_before_ins (bb, load3, load2);
13940 mono_bblock_insert_before_ins (bb, load2, load);
13942 g_assert (var->opcode == OP_REGOFFSET);
13944 ins->opcode = OP_ADD_IMM;
13945 ins->sreg1 = var->inst_basereg;
13946 ins->inst_imm = var->inst_offset;
13949 *need_local_opts = TRUE;
13950 spec = INS_INFO (ins->opcode);
13953 if (ins->opcode < MONO_CEE_LAST) {
13954 mono_print_ins (ins);
13955 g_assert_not_reached ();
13959 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13963 if (MONO_IS_STORE_MEMBASE (ins)) {
13964 tmp_reg = ins->dreg;
13965 ins->dreg = ins->sreg2;
13966 ins->sreg2 = tmp_reg;
13969 spec2 [MONO_INST_DEST] = ' ';
13970 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13971 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13972 spec2 [MONO_INST_SRC3] = ' ';
13974 } else if (MONO_IS_STORE_MEMINDEX (ins))
13975 g_assert_not_reached ();
13980 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13981 printf ("\t %.3s %d", spec, ins->dreg);
13982 num_sregs = mono_inst_get_src_registers (ins, sregs);
13983 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13984 printf (" %d", sregs [srcindex]);
13991 regtype = spec [MONO_INST_DEST];
13992 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13995 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13996 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13997 MonoInst *store_ins;
13999 MonoInst *def_ins = ins;
14000 int dreg = ins->dreg; /* The original vreg */
14002 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14004 if (var->opcode == OP_REGVAR) {
14005 ins->dreg = var->dreg;
14006 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14008 * Instead of emitting a load+store, use a _membase opcode.
14010 g_assert (var->opcode == OP_REGOFFSET);
14011 if (ins->opcode == OP_MOVE) {
14015 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14016 ins->inst_basereg = var->inst_basereg;
14017 ins->inst_offset = var->inst_offset;
14020 spec = INS_INFO (ins->opcode);
14024 g_assert (var->opcode == OP_REGOFFSET);
14026 prev_dreg = ins->dreg;
14028 /* Invalidate any previous lvreg for this vreg */
14029 vreg_to_lvreg [ins->dreg] = 0;
14033 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14035 store_opcode = OP_STOREI8_MEMBASE_REG;
14038 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14040 #if SIZEOF_REGISTER != 8
14041 if (regtype == 'l') {
14042 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14043 mono_bblock_insert_after_ins (bb, ins, store_ins);
14044 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14045 mono_bblock_insert_after_ins (bb, ins, store_ins);
14046 def_ins = store_ins;
14051 g_assert (store_opcode != OP_STOREV_MEMBASE);
14053 /* Try to fuse the store into the instruction itself */
14054 /* FIXME: Add more instructions */
14055 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14056 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14057 ins->inst_imm = ins->inst_c0;
14058 ins->inst_destbasereg = var->inst_basereg;
14059 ins->inst_offset = var->inst_offset;
14060 spec = INS_INFO (ins->opcode);
14061 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14062 ins->opcode = store_opcode;
14063 ins->inst_destbasereg = var->inst_basereg;
14064 ins->inst_offset = var->inst_offset;
14068 tmp_reg = ins->dreg;
14069 ins->dreg = ins->sreg2;
14070 ins->sreg2 = tmp_reg;
14073 spec2 [MONO_INST_DEST] = ' ';
14074 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14075 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14076 spec2 [MONO_INST_SRC3] = ' ';
14078 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14079 // FIXME: The backends expect the base reg to be in inst_basereg
14080 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14082 ins->inst_basereg = var->inst_basereg;
14083 ins->inst_offset = var->inst_offset;
14084 spec = INS_INFO (ins->opcode);
14086 /* printf ("INS: "); mono_print_ins (ins); */
14087 /* Create a store instruction */
14088 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14090 /* Insert it after the instruction */
14091 mono_bblock_insert_after_ins (bb, ins, store_ins);
14093 def_ins = store_ins;
14096 * We can't assign ins->dreg to var->dreg here, since the
14097 * sregs could use it. So set a flag, and do it after
14100 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14101 dest_has_lvreg = TRUE;
14106 if (def_ins && !live_range_start [dreg]) {
14107 live_range_start [dreg] = def_ins;
14108 live_range_start_bb [dreg] = bb;
14111 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14114 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14115 tmp->inst_c1 = dreg;
14116 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14123 num_sregs = mono_inst_get_src_registers (ins, sregs);
14124 for (srcindex = 0; srcindex < 3; ++srcindex) {
14125 regtype = spec [MONO_INST_SRC1 + srcindex];
14126 sreg = sregs [srcindex];
14128 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14129 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14130 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14131 MonoInst *use_ins = ins;
14132 MonoInst *load_ins;
14133 guint32 load_opcode;
14135 if (var->opcode == OP_REGVAR) {
14136 sregs [srcindex] = var->dreg;
14137 //mono_inst_set_src_registers (ins, sregs);
14138 live_range_end [sreg] = use_ins;
14139 live_range_end_bb [sreg] = bb;
14141 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14144 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14145 /* var->dreg is a hreg */
14146 tmp->inst_c1 = sreg;
14147 mono_bblock_insert_after_ins (bb, ins, tmp);
14153 g_assert (var->opcode == OP_REGOFFSET);
14155 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14157 g_assert (load_opcode != OP_LOADV_MEMBASE);
14159 if (vreg_to_lvreg [sreg]) {
14160 g_assert (vreg_to_lvreg [sreg] != -1);
14162 /* The variable is already loaded to an lvreg */
14163 if (G_UNLIKELY (cfg->verbose_level > 2))
14164 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14165 sregs [srcindex] = vreg_to_lvreg [sreg];
14166 //mono_inst_set_src_registers (ins, sregs);
14170 /* Try to fuse the load into the instruction */
14171 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14172 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14173 sregs [0] = var->inst_basereg;
14174 //mono_inst_set_src_registers (ins, sregs);
14175 ins->inst_offset = var->inst_offset;
14176 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14177 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14178 sregs [1] = var->inst_basereg;
14179 //mono_inst_set_src_registers (ins, sregs);
14180 ins->inst_offset = var->inst_offset;
14182 if (MONO_IS_REAL_MOVE (ins)) {
14183 ins->opcode = OP_NOP;
14186 //printf ("%d ", srcindex); mono_print_ins (ins);
14188 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14190 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14191 if (var->dreg == prev_dreg) {
14193 * sreg refers to the value loaded by the load
14194 * emitted below, but we need to use ins->dreg
14195 * since it refers to the store emitted earlier.
14199 g_assert (sreg != -1);
14200 vreg_to_lvreg [var->dreg] = sreg;
14201 g_assert (lvregs_len < 1024);
14202 lvregs [lvregs_len ++] = var->dreg;
14206 sregs [srcindex] = sreg;
14207 //mono_inst_set_src_registers (ins, sregs);
14209 #if SIZEOF_REGISTER != 8
14210 if (regtype == 'l') {
14211 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14212 mono_bblock_insert_before_ins (bb, ins, load_ins);
14213 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14214 mono_bblock_insert_before_ins (bb, ins, load_ins);
14215 use_ins = load_ins;
14220 #if SIZEOF_REGISTER == 4
14221 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14223 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14224 mono_bblock_insert_before_ins (bb, ins, load_ins);
14225 use_ins = load_ins;
14229 if (var->dreg < orig_next_vreg) {
14230 live_range_end [var->dreg] = use_ins;
14231 live_range_end_bb [var->dreg] = bb;
14234 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14237 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14238 tmp->inst_c1 = var->dreg;
14239 mono_bblock_insert_after_ins (bb, ins, tmp);
14243 mono_inst_set_src_registers (ins, sregs);
14245 if (dest_has_lvreg) {
14246 g_assert (ins->dreg != -1);
14247 vreg_to_lvreg [prev_dreg] = ins->dreg;
14248 g_assert (lvregs_len < 1024);
14249 lvregs [lvregs_len ++] = prev_dreg;
14250 dest_has_lvreg = FALSE;
14254 tmp_reg = ins->dreg;
14255 ins->dreg = ins->sreg2;
14256 ins->sreg2 = tmp_reg;
14259 if (MONO_IS_CALL (ins)) {
14260 /* Clear vreg_to_lvreg array */
14261 for (i = 0; i < lvregs_len; i++)
14262 vreg_to_lvreg [lvregs [i]] = 0;
14264 } else if (ins->opcode == OP_NOP) {
14266 MONO_INST_NULLIFY_SREGS (ins);
14269 if (cfg->verbose_level > 2)
14270 mono_print_ins_index (1, ins);
14273 /* Extend the live range based on the liveness info */
14274 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14275 for (i = 0; i < cfg->num_varinfo; i ++) {
14276 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14278 if (vreg_is_volatile (cfg, vi->vreg))
14279 /* The liveness info is incomplete */
14282 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14283 /* Live from at least the first ins of this bb */
14284 live_range_start [vi->vreg] = bb->code;
14285 live_range_start_bb [vi->vreg] = bb;
14288 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14289 /* Live at least until the last ins of this bb */
14290 live_range_end [vi->vreg] = bb->last_ins;
14291 live_range_end_bb [vi->vreg] = bb;
14297 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14299 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14300 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14302 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14303 for (i = 0; i < cfg->num_varinfo; ++i) {
14304 int vreg = MONO_VARINFO (cfg, i)->vreg;
14307 if (live_range_start [vreg]) {
14308 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14310 ins->inst_c1 = vreg;
14311 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14313 if (live_range_end [vreg]) {
14314 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14316 ins->inst_c1 = vreg;
14317 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14318 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14320 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14326 if (cfg->gsharedvt_locals_var_ins) {
14327 /* Nullify if unused */
14328 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14329 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14332 g_free (live_range_start);
14333 g_free (live_range_end);
14334 g_free (live_range_start_bb);
14335 g_free (live_range_end_bb);
14340 * - use 'iadd' instead of 'int_add'
14341 * - handling ovf opcodes: decompose in method_to_ir.
14342 * - unify iregs/fregs
14343 * -> partly done, the missing parts are:
14344 * - a more complete unification would involve unifying the hregs as well, so
14345 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14346 * would no longer map to the machine hregs, so the code generators would need to
14347 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14348 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14349 * fp/non-fp branches speeds it up by about 15%.
14350 * - use sext/zext opcodes instead of shifts
14352 * - get rid of TEMPLOADs if possible and use vregs instead
14353 * - clean up usage of OP_P/OP_ opcodes
14354 * - cleanup usage of DUMMY_USE
14355 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14357 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14358 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14359 * - make sure handle_stack_args () is called before the branch is emitted
14360 * - when the new IR is done, get rid of all unused stuff
14361 * - COMPARE/BEQ as separate instructions or unify them ?
14362 * - keeping them separate allows specialized compare instructions like
14363 * compare_imm, compare_membase
14364 * - most back ends unify fp compare+branch, fp compare+ceq
14365 * - integrate mono_save_args into inline_method
14366 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14367 * - handle long shift opts on 32 bit platforms somehow: they require
14368 * 3 sregs (2 for arg1 and 1 for arg2)
14369 * - make byref a 'normal' type.
14370 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14371 * variable if needed.
14372 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14373 * like inline_method.
14374 * - remove inlining restrictions
14375 * - fix LNEG and enable cfold of INEG
14376 * - generalize x86 optimizations like ldelema as a peephole optimization
14377 * - add store_mem_imm for amd64
14378 * - optimize the loading of the interruption flag in the managed->native wrappers
14379 * - avoid special handling of OP_NOP in passes
14380 * - move code inserting instructions into one function/macro.
14381 * - try a coalescing phase after liveness analysis
14382 * - add float -> vreg conversion + local optimizations on !x86
14383 * - figure out how to handle decomposed branches during optimizations, ie.
14384 * compare+branch, op_jump_table+op_br etc.
14385 * - promote RuntimeXHandles to vregs
14386 * - vtype cleanups:
14387 * - add a NEW_VARLOADA_VREG macro
14388 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14389 * accessing vtype fields.
14390 * - get rid of I8CONST on 64 bit platforms
14391 * - dealing with the increase in code size due to branches created during opcode
14393 * - use extended basic blocks
14394 * - all parts of the JIT
14395 * - handle_global_vregs () && local regalloc
14396 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14397 * - sources of increase in code size:
14400 * - isinst and castclass
14401 * - lvregs not allocated to global registers even if used multiple times
14402 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14404 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14405 * - add all micro optimizations from the old JIT
14406 * - put tree optimizations into the deadce pass
14407 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14408 * specific function.
14409 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14410 * fcompare + branchCC.
14411 * - create a helper function for allocating a stack slot, taking into account
14412 * MONO_CFG_HAS_SPILLUP.
14414 * - merge the ia64 switch changes.
14415 * - optimize mono_regstate2_alloc_int/float.
14416 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14417 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14418 * parts of the tree could be separated by other instructions, killing the tree
14419 * arguments, or stores killing loads etc. Also, should we fold loads into other
14420 * instructions if the result of the load is used multiple times ?
14421 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14422 * - LAST MERGE: 108395.
14423 * - when returning vtypes in registers, generate IR and append it to the end of the
14424 * last bb instead of doing it in the epilog.
14425 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14433 - When to decompose opcodes:
14434 - earlier: this makes some optimizations hard to implement, since the low level IR
14435 no longer contains the neccessary information. But it is easier to do.
14436 - later: harder to implement, enables more optimizations.
14437 - Branches inside bblocks:
14438 - created when decomposing complex opcodes.
14439 - branches to another bblock: harmless, but not tracked by the branch
14440 optimizations, so need to branch to a label at the start of the bblock.
14441 - branches to inside the same bblock: very problematic, trips up the local
14442 reg allocator. Can be fixed by spitting the current bblock, but that is a
14443 complex operation, since some local vregs can become global vregs etc.
14444 - Local/global vregs:
14445 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14446 local register allocator.
14447 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14448 structure, created by mono_create_var (). Assigned to hregs or the stack by
14449 the global register allocator.
14450 - When to do optimizations like alu->alu_imm:
14451 - earlier -> saves work later on since the IR will be smaller/simpler
14452 - later -> can work on more instructions
14453 - Handling of valuetypes:
14454 - When a vtype is pushed on the stack, a new temporary is created, an
14455 instruction computing its address (LDADDR) is emitted and pushed on
14456 the stack. Need to optimize cases when the vtype is used immediately as in
14457 argument passing, stloc etc.
14458 - Instead of the to_end stuff in the old JIT, simply call the function handling
14459 the values on the stack before emitting the last instruction of the bb.
14462 #endif /* DISABLE_JIT */