2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 * Instruction metadata
161 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
162 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
168 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
173 /* keep in sync with the enum in mini.h */
176 #include "mini-ops.h"
181 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
182 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
184 * This should contain the index of the last sreg + 1. This is not the same
185 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
187 const gint8 ins_sreg_counts[] = {
188 #include "mini-ops.h"
193 #define MONO_INIT_VARINFO(vi,id) do { \
194 (vi)->range.first_use.pos.bid = 0xffff; \
200 mono_alloc_ireg (MonoCompile *cfg)
202 return alloc_ireg (cfg);
206 mono_alloc_lreg (MonoCompile *cfg)
208 return alloc_lreg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_get_underlying_type (type);
275 switch (type->type) {
288 case MONO_TYPE_FNPTR:
290 case MONO_TYPE_CLASS:
291 case MONO_TYPE_STRING:
292 case MONO_TYPE_OBJECT:
293 case MONO_TYPE_SZARRAY:
294 case MONO_TYPE_ARRAY:
298 #if SIZEOF_REGISTER == 8
304 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
307 case MONO_TYPE_VALUETYPE:
308 if (type->data.klass->enumtype) {
309 type = mono_class_enum_basetype (type->data.klass);
312 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
315 case MONO_TYPE_TYPEDBYREF:
317 case MONO_TYPE_GENERICINST:
318 type = &type->data.generic_class->container_class->byval_arg;
322 g_assert (cfg->gshared);
323 if (mini_type_var_is_vt (type))
326 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
328 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
334 mono_print_bb (MonoBasicBlock *bb, const char *msg)
339 printf ("\n%s %d: [IN: ", msg, bb->block_num);
340 for (i = 0; i < bb->in_count; ++i)
341 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
343 for (i = 0; i < bb->out_count; ++i)
344 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
346 for (tree = bb->code; tree; tree = tree->next)
347 mono_print_ins_index (-1, tree);
351 mono_create_helper_signatures (void)
353 helper_sig_domain_get = mono_create_icall_signature ("ptr");
354 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
357 static MONO_NEVER_INLINE void
358 break_on_unverified (void)
360 if (mini_get_debug_options ()->break_on_unverified)
364 static MONO_NEVER_INLINE void
365 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
367 char *method_fname = mono_method_full_name (method, TRUE);
368 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
370 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
371 g_free (method_fname);
372 g_free (cil_method_fname);
375 static MONO_NEVER_INLINE void
376 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *field_fname = mono_field_full_name (field);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
382 g_free (method_fname);
383 g_free (field_fname);
386 static MONO_NEVER_INLINE void
387 inline_failure (MonoCompile *cfg, const char *msg)
389 if (cfg->verbose_level >= 2)
390 printf ("inline failed: %s\n", msg);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
394 static MONO_NEVER_INLINE void
395 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
397 if (cfg->verbose_level > 2) \
398 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
399 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
402 static MONO_NEVER_INLINE void
403 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
405 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
406 if (cfg->verbose_level >= 2)
407 printf ("%s\n", cfg->exception_message);
408 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
412 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
413 * foo<T> (int i) { ldarg.0; box T; }
415 #define UNVERIFIED do { \
416 if (cfg->gsharedvt) { \
417 if (cfg->verbose_level > 2) \
418 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
420 goto exception_exit; \
422 break_on_unverified (); \
426 #define GET_BBLOCK(cfg,tblock,ip) do { \
427 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
430 NEW_BBLOCK (cfg, (tblock)); \
431 (tblock)->cil_code = (ip); \
432 ADD_BBLOCK (cfg, (tblock)); \
436 #if defined(TARGET_X86) || defined(TARGET_AMD64)
437 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
438 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
439 (dest)->dreg = alloc_ireg_mp ((cfg)); \
440 (dest)->sreg1 = (sr1); \
441 (dest)->sreg2 = (sr2); \
442 (dest)->inst_imm = (imm); \
443 (dest)->backend.shift_amount = (shift); \
444 MONO_ADD_INS ((cfg)->cbb, (dest)); \
448 /* Emit conversions so both operands of a binary opcode are of the same type */
450 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
452 MonoInst *arg1 = *arg1_ref;
453 MonoInst *arg2 = *arg2_ref;
456 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
457 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
460 /* Mixing r4/r8 is allowed by the spec */
461 if (arg1->type == STACK_R4) {
462 int dreg = alloc_freg (cfg);
464 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
465 conv->type = STACK_R8;
469 if (arg2->type == STACK_R4) {
470 int dreg = alloc_freg (cfg);
472 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
473 conv->type = STACK_R8;
479 #if SIZEOF_REGISTER == 8
480 /* FIXME: Need to add many more cases */
481 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
484 int dr = alloc_preg (cfg);
485 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
486 (ins)->sreg2 = widen->dreg;
491 #define ADD_BINOP(op) do { \
492 MONO_INST_NEW (cfg, ins, (op)); \
494 ins->sreg1 = sp [0]->dreg; \
495 ins->sreg2 = sp [1]->dreg; \
496 type_from_op (cfg, ins, sp [0], sp [1]); \
498 /* Have to insert a widening op */ \
499 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
500 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
501 MONO_ADD_INS ((cfg)->cbb, (ins)); \
502 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
505 #define ADD_UNOP(op) do { \
506 MONO_INST_NEW (cfg, ins, (op)); \
508 ins->sreg1 = sp [0]->dreg; \
509 type_from_op (cfg, ins, sp [0], NULL); \
511 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
512 MONO_ADD_INS ((cfg)->cbb, (ins)); \
513 *sp++ = mono_decompose_opcode (cfg, ins); \
516 #define ADD_BINCOND(next_block) do { \
519 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
520 cmp->sreg1 = sp [0]->dreg; \
521 cmp->sreg2 = sp [1]->dreg; \
522 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
525 type_from_op (cfg, ins, sp [0], sp [1]); \
526 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
527 GET_BBLOCK (cfg, tblock, target); \
528 link_bblock (cfg, cfg->cbb, tblock); \
529 ins->inst_true_bb = tblock; \
530 if ((next_block)) { \
531 link_bblock (cfg, cfg->cbb, (next_block)); \
532 ins->inst_false_bb = (next_block); \
533 start_new_bblock = 1; \
535 GET_BBLOCK (cfg, tblock, ip); \
536 link_bblock (cfg, cfg->cbb, tblock); \
537 ins->inst_false_bb = tblock; \
538 start_new_bblock = 2; \
540 if (sp != stack_start) { \
541 handle_stack_args (cfg, stack_start, sp - stack_start); \
542 CHECK_UNVERIFIABLE (cfg); \
544 MONO_ADD_INS (cfg->cbb, cmp); \
545 MONO_ADD_INS (cfg->cbb, ins); \
549 * link_bblock: Links two basic blocks
551 * links two basic blocks in the control flow graph, the 'from'
552 * argument is the starting block and the 'to' argument is the block
553 * the control flow ends to after 'from'.
556 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
558 MonoBasicBlock **newa;
562 if (from->cil_code) {
564 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
569 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 printf ("edge from entry to exit\n");
576 for (i = 0; i < from->out_count; ++i) {
577 if (to == from->out_bb [i]) {
583 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
584 for (i = 0; i < from->out_count; ++i) {
585 newa [i] = from->out_bb [i];
593 for (i = 0; i < to->in_count; ++i) {
594 if (from == to->in_bb [i]) {
600 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
601 for (i = 0; i < to->in_count; ++i) {
602 newa [i] = to->in_bb [i];
611 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
613 link_bblock (cfg, from, to);
617 * mono_find_block_region:
619 * We mark each basic block with a region ID. We use that to avoid BB
620 * optimizations when blocks are in different regions.
623 * A region token that encodes where this region is, and information
624 * about the clause owner for this block.
626 * The region encodes the try/catch/filter clause that owns this block
627 * as well as the type. -1 is a special value that represents a block
628 * that is in none of try/catch/filter.
631 mono_find_block_region (MonoCompile *cfg, int offset)
633 MonoMethodHeader *header = cfg->header;
634 MonoExceptionClause *clause;
637 for (i = 0; i < header->num_clauses; ++i) {
638 clause = &header->clauses [i];
639 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
640 (offset < (clause->handler_offset)))
641 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
643 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
644 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
645 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
646 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
647 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
652 for (i = 0; i < header->num_clauses; ++i) {
653 clause = &header->clauses [i];
655 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
656 return ((i + 1) << 8) | clause->flags;
663 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
665 MonoMethodHeader *header = cfg->header;
666 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
673 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
674 if (clause->flags == type)
675 res = g_list_append (res, clause);
682 mono_create_spvar_for_region (MonoCompile *cfg, int region)
686 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
690 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
698 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
700 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
704 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
708 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
712 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
713 /* prevent it from being register allocated */
714 var->flags |= MONO_INST_VOLATILE;
716 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
722 * Returns the type used in the eval stack when @type is loaded.
723 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
726 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
730 type = mini_get_underlying_type (type);
731 inst->klass = klass = mono_class_from_mono_type (type);
733 inst->type = STACK_MP;
738 switch (type->type) {
740 inst->type = STACK_INV;
748 inst->type = STACK_I4;
753 case MONO_TYPE_FNPTR:
754 inst->type = STACK_PTR;
756 case MONO_TYPE_CLASS:
757 case MONO_TYPE_STRING:
758 case MONO_TYPE_OBJECT:
759 case MONO_TYPE_SZARRAY:
760 case MONO_TYPE_ARRAY:
761 inst->type = STACK_OBJ;
765 inst->type = STACK_I8;
768 inst->type = cfg->r4_stack_type;
771 inst->type = STACK_R8;
773 case MONO_TYPE_VALUETYPE:
774 if (type->data.klass->enumtype) {
775 type = mono_class_enum_basetype (type->data.klass);
779 inst->type = STACK_VTYPE;
782 case MONO_TYPE_TYPEDBYREF:
783 inst->klass = mono_defaults.typed_reference_class;
784 inst->type = STACK_VTYPE;
786 case MONO_TYPE_GENERICINST:
787 type = &type->data.generic_class->container_class->byval_arg;
791 g_assert (cfg->gshared);
792 if (mini_is_gsharedvt_type (type)) {
793 g_assert (cfg->gsharedvt);
794 inst->type = STACK_VTYPE;
796 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
800 g_error ("unknown type 0x%02x in eval stack type", type->type);
805 * The following tables are used to quickly validate the IL code in type_from_op ().
808 bin_num_table [STACK_MAX] [STACK_MAX] = {
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
814 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
822 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
825 /* reduce the size of this table */
827 bin_int_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
839 bin_comp_table [STACK_MAX] [STACK_MAX] = {
840 /* Inv i L p F & O vt r4 */
842 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
843 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
844 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
845 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
846 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
847 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
848 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
849 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
852 /* reduce the size of this table */
854 shift_table [STACK_MAX] [STACK_MAX] = {
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
866 * Tables to map from the non-specific opcode to the matching
867 * type-specific opcode.
869 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
871 binops_op_map [STACK_MAX] = {
872 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
875 /* handles from CEE_NEG to CEE_CONV_U8 */
877 unops_op_map [STACK_MAX] = {
878 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
881 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
883 ovfops_op_map [STACK_MAX] = {
884 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
887 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
889 ovf2ops_op_map [STACK_MAX] = {
890 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
893 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
895 ovf3ops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
899 /* handles from CEE_BEQ to CEE_BLT_UN */
901 beqops_op_map [STACK_MAX] = {
902 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
905 /* handles from CEE_CEQ to CEE_CLT_UN */
907 ceqops_op_map [STACK_MAX] = {
908 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
912 * Sets ins->type (the type on the eval stack) according to the
913 * type of the opcode and the arguments to it.
914 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
916 * FIXME: this function sets ins->type unconditionally in some cases, but
917 * it should set it to invalid for some types (a conv.x on an object)
920 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
922 switch (ins->opcode) {
929 /* FIXME: check unverifiable args for STACK_MP */
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += binops_op_map [ins->type];
938 ins->type = bin_int_table [src1->type] [src2->type];
939 ins->opcode += binops_op_map [ins->type];
944 ins->type = shift_table [src1->type] [src2->type];
945 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
951 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
952 ins->opcode = OP_LCOMPARE;
953 else if (src1->type == STACK_R4)
954 ins->opcode = OP_RCOMPARE;
955 else if (src1->type == STACK_R8)
956 ins->opcode = OP_FCOMPARE;
958 ins->opcode = OP_ICOMPARE;
960 case OP_ICOMPARE_IMM:
961 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
962 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
963 ins->opcode = OP_LCOMPARE_IMM;
975 ins->opcode += beqops_op_map [src1->type];
978 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
979 ins->opcode += ceqops_op_map [src1->type];
985 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
990 ins->type = neg_table [src1->type];
991 ins->opcode += unops_op_map [ins->type];
994 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
995 ins->type = src1->type;
997 ins->type = STACK_INV;
998 ins->opcode += unops_op_map [ins->type];
1004 ins->type = STACK_I4;
1005 ins->opcode += unops_op_map [src1->type];
1008 ins->type = STACK_R8;
1009 switch (src1->type) {
1012 ins->opcode = OP_ICONV_TO_R_UN;
1015 ins->opcode = OP_LCONV_TO_R_UN;
1019 case CEE_CONV_OVF_I1:
1020 case CEE_CONV_OVF_U1:
1021 case CEE_CONV_OVF_I2:
1022 case CEE_CONV_OVF_U2:
1023 case CEE_CONV_OVF_I4:
1024 case CEE_CONV_OVF_U4:
1025 ins->type = STACK_I4;
1026 ins->opcode += ovf3ops_op_map [src1->type];
1028 case CEE_CONV_OVF_I_UN:
1029 case CEE_CONV_OVF_U_UN:
1030 ins->type = STACK_PTR;
1031 ins->opcode += ovf2ops_op_map [src1->type];
1033 case CEE_CONV_OVF_I1_UN:
1034 case CEE_CONV_OVF_I2_UN:
1035 case CEE_CONV_OVF_I4_UN:
1036 case CEE_CONV_OVF_U1_UN:
1037 case CEE_CONV_OVF_U2_UN:
1038 case CEE_CONV_OVF_U4_UN:
1039 ins->type = STACK_I4;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1043 ins->type = STACK_PTR;
1044 switch (src1->type) {
1046 ins->opcode = OP_ICONV_TO_U;
1050 #if SIZEOF_VOID_P == 8
1051 ins->opcode = OP_LCONV_TO_U;
1053 ins->opcode = OP_MOVE;
1057 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_FCONV_TO_U;
1066 ins->type = STACK_I8;
1067 ins->opcode += unops_op_map [src1->type];
1069 case CEE_CONV_OVF_I8:
1070 case CEE_CONV_OVF_U8:
1071 ins->type = STACK_I8;
1072 ins->opcode += ovf3ops_op_map [src1->type];
1074 case CEE_CONV_OVF_U8_UN:
1075 case CEE_CONV_OVF_I8_UN:
1076 ins->type = STACK_I8;
1077 ins->opcode += ovf2ops_op_map [src1->type];
1080 ins->type = cfg->r4_stack_type;
1081 ins->opcode += unops_op_map [src1->type];
1084 ins->type = STACK_R8;
1085 ins->opcode += unops_op_map [src1->type];
1088 ins->type = STACK_R8;
1092 ins->type = STACK_I4;
1093 ins->opcode += ovfops_op_map [src1->type];
1096 case CEE_CONV_OVF_I:
1097 case CEE_CONV_OVF_U:
1098 ins->type = STACK_PTR;
1099 ins->opcode += ovfops_op_map [src1->type];
1102 case CEE_ADD_OVF_UN:
1104 case CEE_MUL_OVF_UN:
1106 case CEE_SUB_OVF_UN:
1107 ins->type = bin_num_table [src1->type] [src2->type];
1108 ins->opcode += ovfops_op_map [src1->type];
1109 if (ins->type == STACK_R8)
1110 ins->type = STACK_INV;
1112 case OP_LOAD_MEMBASE:
1113 ins->type = STACK_PTR;
1115 case OP_LOADI1_MEMBASE:
1116 case OP_LOADU1_MEMBASE:
1117 case OP_LOADI2_MEMBASE:
1118 case OP_LOADU2_MEMBASE:
1119 case OP_LOADI4_MEMBASE:
1120 case OP_LOADU4_MEMBASE:
1121 ins->type = STACK_PTR;
1123 case OP_LOADI8_MEMBASE:
1124 ins->type = STACK_I8;
1126 case OP_LOADR4_MEMBASE:
1127 ins->type = cfg->r4_stack_type;
1129 case OP_LOADR8_MEMBASE:
1130 ins->type = STACK_R8;
1133 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1137 if (ins->type == STACK_MP)
1138 ins->klass = mono_defaults.object_class;
1143 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1149 param_table [STACK_MAX] [STACK_MAX] = {
1154 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1159 switch (args->type) {
1169 for (i = 0; i < sig->param_count; ++i) {
1170 switch (args [i].type) {
1174 if (!sig->params [i]->byref)
1178 if (sig->params [i]->byref)
1180 switch (sig->params [i]->type) {
1181 case MONO_TYPE_CLASS:
1182 case MONO_TYPE_STRING:
1183 case MONO_TYPE_OBJECT:
1184 case MONO_TYPE_SZARRAY:
1185 case MONO_TYPE_ARRAY:
1192 if (sig->params [i]->byref)
1194 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1203 /*if (!param_table [args [i].type] [sig->params [i]->type])
1211 * When we need a pointer to the current domain many times in a method, we
1212 * call mono_domain_get() once and we store the result in a local variable.
1213 * This function returns the variable that represents the MonoDomain*.
1215 inline static MonoInst *
1216 mono_get_domainvar (MonoCompile *cfg)
1218 if (!cfg->domainvar)
1219 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 return cfg->domainvar;
1224 * The got_var contains the address of the Global Offset Table when AOT
1228 mono_get_got_var (MonoCompile *cfg)
1230 #ifdef MONO_ARCH_NEED_GOT_VAR
1231 if (!cfg->compile_aot)
1233 if (!cfg->got_var) {
1234 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1236 return cfg->got_var;
1243 mono_get_vtable_var (MonoCompile *cfg)
1245 g_assert (cfg->gshared);
1247 if (!cfg->rgctx_var) {
1248 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1249 /* force the var to be stack allocated */
1250 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1253 return cfg->rgctx_var;
1257 type_from_stack_type (MonoInst *ins) {
1258 switch (ins->type) {
1259 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1260 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1261 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1262 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1263 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1265 return &ins->klass->this_arg;
1266 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1267 case STACK_VTYPE: return &ins->klass->byval_arg;
1269 g_error ("stack type %d to monotype not handled\n", ins->type);
1274 static G_GNUC_UNUSED int
1275 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1277 t = mono_type_get_underlying_type (t);
1289 case MONO_TYPE_FNPTR:
1291 case MONO_TYPE_CLASS:
1292 case MONO_TYPE_STRING:
1293 case MONO_TYPE_OBJECT:
1294 case MONO_TYPE_SZARRAY:
1295 case MONO_TYPE_ARRAY:
1301 return cfg->r4_stack_type;
1304 case MONO_TYPE_VALUETYPE:
1305 case MONO_TYPE_TYPEDBYREF:
1307 case MONO_TYPE_GENERICINST:
1308 if (mono_type_generic_inst_is_valuetype (t))
1314 g_assert_not_reached ();
1321 array_access_to_klass (int opcode)
1325 return mono_defaults.byte_class;
1327 return mono_defaults.uint16_class;
1330 return mono_defaults.int_class;
1333 return mono_defaults.sbyte_class;
1336 return mono_defaults.int16_class;
1339 return mono_defaults.int32_class;
1341 return mono_defaults.uint32_class;
1344 return mono_defaults.int64_class;
1347 return mono_defaults.single_class;
1350 return mono_defaults.double_class;
1351 case CEE_LDELEM_REF:
1352 case CEE_STELEM_REF:
1353 return mono_defaults.object_class;
1355 g_assert_not_reached ();
1361 * We try to share variables when possible
1364 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1369 /* inlining can result in deeper stacks */
1370 if (slot >= cfg->header->max_stack)
1371 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1373 pos = ins->type - 1 + slot * STACK_MAX;
1375 switch (ins->type) {
1382 if ((vnum = cfg->intvars [pos]))
1383 return cfg->varinfo [vnum];
1384 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1385 cfg->intvars [pos] = res->inst_c0;
1388 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1397 * Don't use this if a generic_context is set, since that means AOT can't
1398 * look up the method using just the image+token.
1399 * table == 0 means this is a reference made from a wrapper.
1401 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1402 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1403 jump_info_token->image = image;
1404 jump_info_token->token = token;
1405 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1410 * This function is called to handle items that are left on the evaluation stack
1411 * at basic block boundaries. What happens is that we save the values to local variables
1412 * and we reload them later when first entering the target basic block (with the
1413 * handle_loaded_temps () function).
1414 * A single joint point will use the same variables (stored in the array bb->out_stack or
1415 * bb->in_stack, if the basic block is before or after the joint point).
1417 * This function needs to be called _before_ emitting the last instruction of
1418 * the bb (i.e. before emitting a branch).
1419 * If the stack merge fails at a join point, cfg->unverifiable is set.
1422 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1425 MonoBasicBlock *bb = cfg->cbb;
1426 MonoBasicBlock *outb;
1427 MonoInst *inst, **locals;
1432 if (cfg->verbose_level > 3)
1433 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1434 if (!bb->out_scount) {
1435 bb->out_scount = count;
1436 //printf ("bblock %d has out:", bb->block_num);
1438 for (i = 0; i < bb->out_count; ++i) {
1439 outb = bb->out_bb [i];
1440 /* exception handlers are linked, but they should not be considered for stack args */
1441 if (outb->flags & BB_EXCEPTION_HANDLER)
1443 //printf (" %d", outb->block_num);
1444 if (outb->in_stack) {
1446 bb->out_stack = outb->in_stack;
1452 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1453 for (i = 0; i < count; ++i) {
1455 * try to reuse temps already allocated for this purpouse, if they occupy the same
1456 * stack slot and if they are of the same type.
1457 * This won't cause conflicts since if 'local' is used to
1458 * store one of the values in the in_stack of a bblock, then
1459 * the same variable will be used for the same outgoing stack
1461 * This doesn't work when inlining methods, since the bblocks
1462 * in the inlined methods do not inherit their in_stack from
1463 * the bblock they are inlined to. See bug #58863 for an
1466 if (cfg->inlined_method)
1467 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1469 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1474 for (i = 0; i < bb->out_count; ++i) {
1475 outb = bb->out_bb [i];
1476 /* exception handlers are linked, but they should not be considered for stack args */
1477 if (outb->flags & BB_EXCEPTION_HANDLER)
1479 if (outb->in_scount) {
1480 if (outb->in_scount != bb->out_scount) {
1481 cfg->unverifiable = TRUE;
1484 continue; /* check they are the same locals */
1486 outb->in_scount = count;
1487 outb->in_stack = bb->out_stack;
1490 locals = bb->out_stack;
1492 for (i = 0; i < count; ++i) {
1493 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1494 inst->cil_code = sp [i]->cil_code;
1495 sp [i] = locals [i];
1496 if (cfg->verbose_level > 3)
1497 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1501 * It is possible that the out bblocks already have in_stack assigned, and
1502 * the in_stacks differ. In this case, we will store to all the different
1509 /* Find a bblock which has a different in_stack */
1511 while (bindex < bb->out_count) {
1512 outb = bb->out_bb [bindex];
1513 /* exception handlers are linked, but they should not be considered for stack args */
1514 if (outb->flags & BB_EXCEPTION_HANDLER) {
1518 if (outb->in_stack != locals) {
1519 for (i = 0; i < count; ++i) {
1520 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1521 inst->cil_code = sp [i]->cil_code;
1522 sp [i] = locals [i];
1523 if (cfg->verbose_level > 3)
1524 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1526 locals = outb->in_stack;
1536 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1538 int ibitmap_reg = alloc_preg (cfg);
1539 #ifdef COMPRESSED_INTERFACE_BITMAP
1541 MonoInst *res, *ins;
1542 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1543 MONO_ADD_INS (cfg->cbb, ins);
1545 if (cfg->compile_aot)
1546 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1548 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1549 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1550 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1552 int ibitmap_byte_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1556 if (cfg->compile_aot) {
1557 int iid_reg = alloc_preg (cfg);
1558 int shifted_iid_reg = alloc_preg (cfg);
1559 int ibitmap_byte_address_reg = alloc_preg (cfg);
1560 int masked_iid_reg = alloc_preg (cfg);
1561 int iid_one_bit_reg = alloc_preg (cfg);
1562 int iid_bit_reg = alloc_preg (cfg);
1563 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1565 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1568 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1580 * stored in "klass_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1589 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1590 * stored in "vtable_reg" implements the interface "klass".
1593 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1595 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1599 * Emit code which checks whenever the interface id of @klass is smaller than
1600 * than the value given by max_iid_reg.
1603 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1604 MonoBasicBlock *false_target)
1606 if (cfg->compile_aot) {
1607 int iid_reg = alloc_preg (cfg);
1608 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1609 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1616 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1619 /* Same as above, but obtains max_iid from a vtable */
1621 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1622 MonoBasicBlock *false_target)
1624 int max_iid_reg = alloc_preg (cfg);
1626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1627 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1630 /* Same as above, but obtains max_iid from a klass */
1632 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1633 MonoBasicBlock *false_target)
1635 int max_iid_reg = alloc_preg (cfg);
1637 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1638 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1642 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1644 int idepth_reg = alloc_preg (cfg);
1645 int stypes_reg = alloc_preg (cfg);
1646 int stype = alloc_preg (cfg);
1648 mono_class_setup_supertypes (klass);
1650 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1658 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1659 } else if (cfg->compile_aot) {
1660 int const_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1662 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1666 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1670 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1672 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1676 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1678 int intf_reg = alloc_preg (cfg);
1680 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1681 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1686 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1690 * Variant of the above that takes a register to the class, not the vtable.
1693 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1695 int intf_bit_reg = alloc_preg (cfg);
1697 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1698 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1703 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1707 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1711 } else if (cfg->compile_aot) {
1712 int const_reg = alloc_preg (cfg);
1713 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1714 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1718 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1722 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1724 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1728 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1730 if (cfg->compile_aot) {
1731 int const_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1733 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1741 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1744 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1747 int rank_reg = alloc_preg (cfg);
1748 int eclass_reg = alloc_preg (cfg);
1750 g_assert (!klass_inst);
1751 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1753 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1754 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1756 if (klass->cast_class == mono_defaults.object_class) {
1757 int parent_reg = alloc_preg (cfg);
1758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1759 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1760 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1761 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1762 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1763 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1764 } else if (klass->cast_class == mono_defaults.enum_class) {
1765 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1766 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1767 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1769 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1770 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1773 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1774 /* Check that the object is a vector too */
1775 int bounds_reg = alloc_preg (cfg);
1776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1778 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1781 int idepth_reg = alloc_preg (cfg);
1782 int stypes_reg = alloc_preg (cfg);
1783 int stype = alloc_preg (cfg);
1785 mono_class_setup_supertypes (klass);
1787 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1790 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1794 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1799 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1801 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1805 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1809 g_assert (val == 0);
1814 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1820 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1825 #if SIZEOF_REGISTER == 8
1827 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1833 val_reg = alloc_preg (cfg);
1835 if (SIZEOF_REGISTER == 8)
1836 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1838 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1841 /* This could be optimized further if neccesary */
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1850 #if !NO_UNALIGNED_ACCESS
1851 if (SIZEOF_REGISTER == 8) {
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1871 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1876 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1883 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1890 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1891 g_assert (size < 10000);
1894 /* This could be optimized further if neccesary */
1896 cur_reg = alloc_preg (cfg);
1897 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1905 #if !NO_UNALIGNED_ACCESS
1906 if (SIZEOF_REGISTER == 8) {
1908 cur_reg = alloc_preg (cfg);
1909 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1919 cur_reg = alloc_preg (cfg);
1920 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1921 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1927 cur_reg = alloc_preg (cfg);
1928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1945 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1949 if (cfg->compile_aot) {
1950 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1951 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1953 ins->sreg2 = c->dreg;
1954 MONO_ADD_INS (cfg->cbb, ins);
1956 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1958 ins->inst_offset = mini_get_tls_offset (tls_key);
1959 MONO_ADD_INS (cfg->cbb, ins);
1966 * Emit IR to push the current LMF onto the LMF stack.
1969 emit_push_lmf (MonoCompile *cfg)
1972 * Emit IR to push the LMF:
1973 * lmf_addr = <lmf_addr from tls>
1974 * lmf->lmf_addr = lmf_addr
1975 * lmf->prev_lmf = *lmf_addr
1978 int lmf_reg, prev_lmf_reg;
1979 MonoInst *ins, *lmf_ins;
1984 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1985 /* Load current lmf */
1986 lmf_ins = mono_get_lmf_intrinsic (cfg);
1988 MONO_ADD_INS (cfg->cbb, lmf_ins);
1989 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1990 lmf_reg = ins->dreg;
1991 /* Save previous_lmf */
1992 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1994 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1997 * Store lmf_addr in a variable, so it can be allocated to a global register.
1999 if (!cfg->lmf_addr_var)
2000 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2003 ins = mono_get_jit_tls_intrinsic (cfg);
2005 int jit_tls_dreg = ins->dreg;
2007 MONO_ADD_INS (cfg->cbb, ins);
2008 lmf_reg = alloc_preg (cfg);
2009 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2011 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2014 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2016 MONO_ADD_INS (cfg->cbb, lmf_ins);
2019 MonoInst *args [16], *jit_tls_ins, *ins;
2021 /* Inline mono_get_lmf_addr () */
2022 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2024 /* Load mono_jit_tls_id */
2025 if (cfg->compile_aot)
2026 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2028 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2029 /* call pthread_getspecific () */
2030 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2031 /* lmf_addr = &jit_tls->lmf */
2032 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2035 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2039 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2041 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2042 lmf_reg = ins->dreg;
2044 prev_lmf_reg = alloc_preg (cfg);
2045 /* Save previous_lmf */
2046 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2047 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2049 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2056 * Emit IR to pop the current LMF from the LMF stack.
2059 emit_pop_lmf (MonoCompile *cfg)
2061 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2067 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2068 lmf_reg = ins->dreg;
2070 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2071 /* Load previous_lmf */
2072 prev_lmf_reg = alloc_preg (cfg);
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2078 * Emit IR to pop the LMF:
2079 * *(lmf->lmf_addr) = lmf->prev_lmf
2081 /* This could be called before emit_push_lmf () */
2082 if (!cfg->lmf_addr_var)
2083 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2084 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2086 prev_lmf_reg = alloc_preg (cfg);
2087 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2088 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2093 emit_instrumentation_call (MonoCompile *cfg, void *func)
2095 MonoInst *iargs [1];
2098 * Avoid instrumenting inlined methods since it can
2099 * distort profiling results.
2101 if (cfg->method != cfg->current_method)
2104 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2105 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2106 mono_emit_jit_icall (cfg, func, iargs);
2111 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2114 type = mini_get_underlying_type (type);
2115 switch (type->type) {
2116 case MONO_TYPE_VOID:
2117 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2124 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2128 case MONO_TYPE_FNPTR:
2129 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2130 case MONO_TYPE_CLASS:
2131 case MONO_TYPE_STRING:
2132 case MONO_TYPE_OBJECT:
2133 case MONO_TYPE_SZARRAY:
2134 case MONO_TYPE_ARRAY:
2135 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2138 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2141 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2143 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2145 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2146 case MONO_TYPE_VALUETYPE:
2147 if (type->data.klass->enumtype) {
2148 type = mono_class_enum_basetype (type->data.klass);
2151 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2152 case MONO_TYPE_TYPEDBYREF:
2153 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2154 case MONO_TYPE_GENERICINST:
2155 type = &type->data.generic_class->container_class->byval_arg;
2158 case MONO_TYPE_MVAR:
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2162 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2168 * target_type_is_incompatible:
2169 * @cfg: MonoCompile context
2171 * Check that the item @arg on the evaluation stack can be stored
2172 * in the target type (can be a local, or field, etc).
2173 * The cfg arg can be used to check if we need verification or just
2176 * Returns: non-0 value if arg can't be stored on a target.
2179 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2181 MonoType *simple_type;
2184 if (target->byref) {
2185 /* FIXME: check that the pointed to types match */
2186 if (arg->type == STACK_MP)
2187 return arg->klass != mono_class_from_mono_type (target);
2188 if (arg->type == STACK_PTR)
2193 simple_type = mini_get_underlying_type (target);
2194 switch (simple_type->type) {
2195 case MONO_TYPE_VOID:
2203 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2207 /* STACK_MP is needed when setting pinned locals */
2208 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_FNPTR:
2215 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2216 * in native int. (#688008).
2218 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2221 case MONO_TYPE_CLASS:
2222 case MONO_TYPE_STRING:
2223 case MONO_TYPE_OBJECT:
2224 case MONO_TYPE_SZARRAY:
2225 case MONO_TYPE_ARRAY:
2226 if (arg->type != STACK_OBJ)
2228 /* FIXME: check type compatibility */
2232 if (arg->type != STACK_I8)
2236 if (arg->type != cfg->r4_stack_type)
2240 if (arg->type != STACK_R8)
2243 case MONO_TYPE_VALUETYPE:
2244 if (arg->type != STACK_VTYPE)
2246 klass = mono_class_from_mono_type (simple_type);
2247 if (klass != arg->klass)
2250 case MONO_TYPE_TYPEDBYREF:
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 if (klass != arg->klass)
2257 case MONO_TYPE_GENERICINST:
2258 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2259 if (arg->type != STACK_VTYPE)
2261 klass = mono_class_from_mono_type (simple_type);
2262 /* The second cases is needed when doing partial sharing */
2263 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2267 if (arg->type != STACK_OBJ)
2269 /* FIXME: check type compatibility */
2273 case MONO_TYPE_MVAR:
2274 g_assert (cfg->gshared);
2275 if (mini_type_var_is_vt (simple_type)) {
2276 if (arg->type != STACK_VTYPE)
2279 if (arg->type != STACK_OBJ)
2284 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2290 * Prepare arguments for passing to a function call.
2291 * Return a non-zero value if the arguments can't be passed to the given
2293 * The type checks are not yet complete and some conversions may need
2294 * casts on 32 or 64 bit architectures.
2296 * FIXME: implement this using target_type_is_incompatible ()
2299 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2301 MonoType *simple_type;
2305 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2309 for (i = 0; i < sig->param_count; ++i) {
2310 if (sig->params [i]->byref) {
2311 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2315 simple_type = mini_get_underlying_type (sig->params [i]);
2317 switch (simple_type->type) {
2318 case MONO_TYPE_VOID:
2327 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2333 case MONO_TYPE_FNPTR:
2334 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2337 case MONO_TYPE_CLASS:
2338 case MONO_TYPE_STRING:
2339 case MONO_TYPE_OBJECT:
2340 case MONO_TYPE_SZARRAY:
2341 case MONO_TYPE_ARRAY:
2342 if (args [i]->type != STACK_OBJ)
2347 if (args [i]->type != STACK_I8)
2351 if (args [i]->type != cfg->r4_stack_type)
2355 if (args [i]->type != STACK_R8)
2358 case MONO_TYPE_VALUETYPE:
2359 if (simple_type->data.klass->enumtype) {
2360 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2363 if (args [i]->type != STACK_VTYPE)
2366 case MONO_TYPE_TYPEDBYREF:
2367 if (args [i]->type != STACK_VTYPE)
2370 case MONO_TYPE_GENERICINST:
2371 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2374 case MONO_TYPE_MVAR:
2376 if (args [i]->type != STACK_VTYPE)
2380 g_error ("unknown type 0x%02x in check_call_signature",
2388 callvirt_to_call (int opcode)
2391 case OP_CALL_MEMBASE:
2393 case OP_VOIDCALL_MEMBASE:
2395 case OP_FCALL_MEMBASE:
2397 case OP_RCALL_MEMBASE:
2399 case OP_VCALL_MEMBASE:
2401 case OP_LCALL_MEMBASE:
2404 g_assert_not_reached ();
2410 /* Either METHOD or IMT_ARG needs to be set */
2412 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2416 if (COMPILE_LLVM (cfg)) {
2417 method_reg = alloc_preg (cfg);
2420 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2421 } else if (cfg->compile_aot) {
2422 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2425 MONO_INST_NEW (cfg, ins, OP_PCONST);
2426 ins->inst_p0 = method;
2427 ins->dreg = method_reg;
2428 MONO_ADD_INS (cfg->cbb, ins);
2432 call->imt_arg_reg = method_reg;
2434 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2438 method_reg = alloc_preg (cfg);
2441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2442 } else if (cfg->compile_aot) {
2443 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2446 MONO_INST_NEW (cfg, ins, OP_PCONST);
2447 ins->inst_p0 = method;
2448 ins->dreg = method_reg;
2449 MONO_ADD_INS (cfg->cbb, ins);
2452 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2455 static MonoJumpInfo *
2456 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2458 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2462 ji->data.target = target;
2468 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2471 return mono_class_check_context_used (klass);
2477 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2480 return mono_method_check_context_used (method);
2486 * check_method_sharing:
2488 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2491 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2493 gboolean pass_vtable = FALSE;
2494 gboolean pass_mrgctx = FALSE;
2496 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2497 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2498 gboolean sharable = FALSE;
2500 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2504 * Pass vtable iff target method might
2505 * be shared, which means that sharing
2506 * is enabled for its class and its
2507 * context is sharable (and it's not a
2510 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2514 if (mini_method_get_context (cmethod) &&
2515 mini_method_get_context (cmethod)->method_inst) {
2516 g_assert (!pass_vtable);
2518 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2521 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2526 if (out_pass_vtable)
2527 *out_pass_vtable = pass_vtable;
2528 if (out_pass_mrgctx)
2529 *out_pass_mrgctx = pass_mrgctx;
2532 inline static MonoCallInst *
2533 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2534 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2538 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2543 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2545 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2547 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2550 call->signature = sig;
2551 call->rgctx_reg = rgctx;
2552 sig_ret = mini_get_underlying_type (sig->ret);
2554 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2557 if (mini_type_is_vtype (sig_ret)) {
2558 call->vret_var = cfg->vret_addr;
2559 //g_assert_not_reached ();
2561 } else if (mini_type_is_vtype (sig_ret)) {
2562 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2565 temp->backend.is_pinvoke = sig->pinvoke;
2568 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2569 * address of return value to increase optimization opportunities.
2570 * Before vtype decomposition, the dreg of the call ins itself represents the
2571 * fact the call modifies the return value. After decomposition, the call will
2572 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2573 * will be transformed into an LDADDR.
2575 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2576 loada->dreg = alloc_preg (cfg);
2577 loada->inst_p0 = temp;
2578 /* We reference the call too since call->dreg could change during optimization */
2579 loada->inst_p1 = call;
2580 MONO_ADD_INS (cfg->cbb, loada);
2582 call->inst.dreg = temp->dreg;
2584 call->vret_var = loada;
2585 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2586 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2588 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2589 if (COMPILE_SOFT_FLOAT (cfg)) {
2591 * If the call has a float argument, we would need to do an r8->r4 conversion using
2592 * an icall, but that cannot be done during the call sequence since it would clobber
2593 * the call registers + the stack. So we do it before emitting the call.
2595 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2597 MonoInst *in = call->args [i];
2599 if (i >= sig->hasthis)
2600 t = sig->params [i - sig->hasthis];
2602 t = &mono_defaults.int_class->byval_arg;
2603 t = mono_type_get_underlying_type (t);
2605 if (!t->byref && t->type == MONO_TYPE_R4) {
2606 MonoInst *iargs [1];
2610 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2612 /* The result will be in an int vreg */
2613 call->args [i] = conv;
2619 call->need_unbox_trampoline = unbox_trampoline;
2622 if (COMPILE_LLVM (cfg))
2623 mono_llvm_emit_call (cfg, call);
2625 mono_arch_emit_call (cfg, call);
2627 mono_arch_emit_call (cfg, call);
2630 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2631 cfg->flags |= MONO_CFG_HAS_CALLS;
2637 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2639 #ifdef MONO_ARCH_RGCTX_REG
2640 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2641 cfg->uses_rgctx_reg = TRUE;
2642 call->rgctx_reg = TRUE;
2644 call->rgctx_arg_reg = rgctx_reg;
2651 inline static MonoInst*
2652 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2657 gboolean check_sp = FALSE;
2659 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2660 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2662 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2667 rgctx_reg = mono_alloc_preg (cfg);
2668 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2672 if (!cfg->stack_inbalance_var)
2673 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2675 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2676 ins->dreg = cfg->stack_inbalance_var->dreg;
2677 MONO_ADD_INS (cfg->cbb, ins);
2680 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2682 call->inst.sreg1 = addr->dreg;
2685 emit_imt_argument (cfg, call, NULL, imt_arg);
2687 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2692 sp_reg = mono_alloc_preg (cfg);
2694 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2696 MONO_ADD_INS (cfg->cbb, ins);
2698 /* Restore the stack so we don't crash when throwing the exception */
2699 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2700 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2701 MONO_ADD_INS (cfg->cbb, ins);
2703 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2704 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2708 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2710 return (MonoInst*)call;
2714 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2717 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2719 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2722 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2723 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2725 #ifndef DISABLE_REMOTING
2726 gboolean might_be_remote = FALSE;
2728 gboolean virtual = this_ins != NULL;
2729 gboolean enable_for_aot = TRUE;
2733 gboolean need_unbox_trampoline;
2736 sig = mono_method_signature (method);
2739 rgctx_reg = mono_alloc_preg (cfg);
2740 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2743 if (method->string_ctor) {
2744 /* Create the real signature */
2745 /* FIXME: Cache these */
2746 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2747 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2752 context_used = mini_method_check_context_used (cfg, method);
2754 #ifndef DISABLE_REMOTING
2755 might_be_remote = this_ins && sig->hasthis &&
2756 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2757 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2759 if (might_be_remote && context_used) {
2762 g_assert (cfg->gshared);
2764 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2766 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2770 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2772 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2774 #ifndef DISABLE_REMOTING
2775 if (might_be_remote)
2776 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2779 call->method = method;
2780 call->inst.flags |= MONO_INST_HAS_METHOD;
2781 call->inst.inst_left = this_ins;
2782 call->tail_call = tail;
2785 int vtable_reg, slot_reg, this_reg;
2788 this_reg = this_ins->dreg;
2790 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2791 MonoInst *dummy_use;
2793 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2795 /* Make a call to delegate->invoke_impl */
2796 call->inst.inst_basereg = this_reg;
2797 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2798 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2800 /* We must emit a dummy use here because the delegate trampoline will
2801 replace the 'this' argument with the delegate target making this activation
2802 no longer a root for the delegate.
2803 This is an issue for delegates that target collectible code such as dynamic
2804 methods of GC'able assemblies.
2806 For a test case look into #667921.
2808 FIXME: a dummy use is not the best way to do it as the local register allocator
2809 will put it on a caller save register and spil it around the call.
2810 Ideally, we would either put it on a callee save register or only do the store part.
2812 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2814 return (MonoInst*)call;
2817 if ((!cfg->compile_aot || enable_for_aot) &&
2818 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2819 (MONO_METHOD_IS_FINAL (method) &&
2820 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2821 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2823 * the method is not virtual, we just need to ensure this is not null
2824 * and then we can call the method directly.
2826 #ifndef DISABLE_REMOTING
2827 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2829 * The check above ensures method is not gshared, this is needed since
2830 * gshared methods can't have wrappers.
2832 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2836 if (!method->string_ctor)
2837 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2839 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2840 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2842 * the method is virtual, but we can statically dispatch since either
2843 * it's class or the method itself are sealed.
2844 * But first we need to ensure it's not a null reference.
2846 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2848 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2850 vtable_reg = alloc_preg (cfg);
2851 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2852 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2853 guint32 imt_slot = mono_method_get_imt_slot (method);
2854 emit_imt_argument (cfg, call, call->method, imt_arg);
2855 slot_reg = vtable_reg;
2856 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2858 slot_reg = vtable_reg;
2859 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2860 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2862 g_assert (mono_method_signature (method)->generic_param_count);
2863 emit_imt_argument (cfg, call, call->method, imt_arg);
2867 call->inst.sreg1 = slot_reg;
2868 call->inst.inst_offset = offset;
2869 call->virtual = TRUE;
2873 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2876 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2878 return (MonoInst*)call;
2882 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2884 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2888 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2895 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2898 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2900 return (MonoInst*)call;
2904 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2906 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2910 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2914 * mono_emit_abs_call:
2916 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2918 inline static MonoInst*
2919 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2920 MonoMethodSignature *sig, MonoInst **args)
2922 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2926 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2929 if (cfg->abs_patches == NULL)
2930 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2931 g_hash_table_insert (cfg->abs_patches, ji, ji);
2932 ins = mono_emit_native_call (cfg, ji, sig, args);
2933 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2938 direct_icalls_enabled (MonoCompile *cfg)
2940 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2942 if (cfg->compile_llvm)
2945 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2951 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
2954 * Call the jit icall without a wrapper if possible.
2955 * The wrapper is needed for the following reasons:
2956 * - to handle exceptions thrown using mono_raise_exceptions () from the
2957 * icall function. The EH code needs the lmf frame pushed by the
2958 * wrapper to be able to unwind back to managed code.
2959 * - to be able to do stack walks for asynchronously suspended
2960 * threads when debugging.
2962 if (info->no_raise && direct_icalls_enabled (cfg)) {
2966 if (!info->wrapper_method) {
2967 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2968 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2970 mono_memory_barrier ();
2974 * Inline the wrapper method, which is basically a call to the C icall, and
2975 * an exception check.
2977 costs = inline_method (cfg, info->wrapper_method, NULL,
2978 args, NULL, cfg->real_offset, TRUE);
2979 g_assert (costs > 0);
2980 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2984 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2989 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2991 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2992 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2996 * Native code might return non register sized integers
2997 * without initializing the upper bits.
2999 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3000 case OP_LOADI1_MEMBASE:
3001 widen_op = OP_ICONV_TO_I1;
3003 case OP_LOADU1_MEMBASE:
3004 widen_op = OP_ICONV_TO_U1;
3006 case OP_LOADI2_MEMBASE:
3007 widen_op = OP_ICONV_TO_I2;
3009 case OP_LOADU2_MEMBASE:
3010 widen_op = OP_ICONV_TO_U2;
3016 if (widen_op != -1) {
3017 int dreg = alloc_preg (cfg);
3020 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3021 widen->type = ins->type;
3031 get_memcpy_method (void)
3033 static MonoMethod *memcpy_method = NULL;
3034 if (!memcpy_method) {
3035 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3037 g_error ("Old corlib found. Install a new one");
3039 return memcpy_method;
3043 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3045 MonoClassField *field;
3046 gpointer iter = NULL;
3048 while ((field = mono_class_get_fields (klass, &iter))) {
3051 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3053 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3054 if (mini_type_is_reference (mono_field_get_type (field))) {
3055 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3056 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3058 MonoClass *field_class = mono_class_from_mono_type (field->type);
3059 if (field_class->has_references)
3060 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3066 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3068 int card_table_shift_bits;
3069 gpointer card_table_mask;
3071 MonoInst *dummy_use;
3072 int nursery_shift_bits;
3073 size_t nursery_size;
3074 gboolean has_card_table_wb = FALSE;
3076 if (!cfg->gen_write_barriers)
3079 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3081 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3083 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3084 has_card_table_wb = TRUE;
3087 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3090 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3091 wbarrier->sreg1 = ptr->dreg;
3092 wbarrier->sreg2 = value->dreg;
3093 MONO_ADD_INS (cfg->cbb, wbarrier);
3094 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3095 int offset_reg = alloc_preg (cfg);
3096 int card_reg = alloc_preg (cfg);
3099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3100 if (card_table_mask)
3101 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3103 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3104 * IMM's larger than 32bits.
3106 if (cfg->compile_aot) {
3107 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3109 MONO_INST_NEW (cfg, ins, OP_PCONST);
3110 ins->inst_p0 = card_table;
3111 ins->dreg = card_reg;
3112 MONO_ADD_INS (cfg->cbb, ins);
3115 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3116 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3118 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3119 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3122 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3126 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3128 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3129 unsigned need_wb = 0;
3134 /*types with references can't have alignment smaller than sizeof(void*) */
3135 if (align < SIZEOF_VOID_P)
3138 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3139 if (size > 32 * SIZEOF_VOID_P)
3142 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3144 /* We don't unroll more than 5 stores to avoid code bloat. */
3145 if (size > 5 * SIZEOF_VOID_P) {
3146 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3147 size += (SIZEOF_VOID_P - 1);
3148 size &= ~(SIZEOF_VOID_P - 1);
3150 EMIT_NEW_ICONST (cfg, iargs [2], size);
3151 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3152 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3156 destreg = iargs [0]->dreg;
3157 srcreg = iargs [1]->dreg;
3160 dest_ptr_reg = alloc_preg (cfg);
3161 tmp_reg = alloc_preg (cfg);
3164 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3166 while (size >= SIZEOF_VOID_P) {
3167 MonoInst *load_inst;
3168 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3169 load_inst->dreg = tmp_reg;
3170 load_inst->inst_basereg = srcreg;
3171 load_inst->inst_offset = offset;
3172 MONO_ADD_INS (cfg->cbb, load_inst);
3174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3177 emit_write_barrier (cfg, iargs [0], load_inst);
3179 offset += SIZEOF_VOID_P;
3180 size -= SIZEOF_VOID_P;
3183 /*tmp += sizeof (void*)*/
3184 if (size >= SIZEOF_VOID_P) {
3185 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3186 MONO_ADD_INS (cfg->cbb, iargs [0]);
3190 /* Those cannot be references since size < sizeof (void*) */
3192 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3193 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3199 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3200 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3206 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3207 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3216 * Emit code to copy a valuetype of type @klass whose address is stored in
3217 * @src->dreg to memory whose address is stored at @dest->dreg.
3220 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3222 MonoInst *iargs [4];
3225 MonoMethod *memcpy_method;
3226 MonoInst *size_ins = NULL;
3227 MonoInst *memcpy_ins = NULL;
3231 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3234 * This check breaks with spilled vars... need to handle it during verification anyway.
3235 * g_assert (klass && klass == src->klass && klass == dest->klass);
3238 if (mini_is_gsharedvt_klass (klass)) {
3240 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3241 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3245 n = mono_class_native_size (klass, &align);
3247 n = mono_class_value_size (klass, &align);
3249 /* if native is true there should be no references in the struct */
3250 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3251 /* Avoid barriers when storing to the stack */
3252 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3253 (dest->opcode == OP_LDADDR))) {
3259 context_used = mini_class_check_context_used (cfg, klass);
3261 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3262 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3264 } else if (context_used) {
3265 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3267 if (cfg->compile_aot) {
3268 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3270 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3271 mono_class_compute_gc_descriptor (klass);
3276 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3278 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3283 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3284 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3285 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3290 iargs [2] = size_ins;
3292 EMIT_NEW_ICONST (cfg, iargs [2], n);
3294 memcpy_method = get_memcpy_method ();
3296 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3298 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3303 get_memset_method (void)
3305 static MonoMethod *memset_method = NULL;
3306 if (!memset_method) {
3307 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3309 g_error ("Old corlib found. Install a new one");
3311 return memset_method;
3315 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3317 MonoInst *iargs [3];
3320 MonoMethod *memset_method;
3321 MonoInst *size_ins = NULL;
3322 MonoInst *bzero_ins = NULL;
3323 static MonoMethod *bzero_method;
3325 /* FIXME: Optimize this for the case when dest is an LDADDR */
3326 mono_class_init (klass);
3327 if (mini_is_gsharedvt_klass (klass)) {
3328 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3329 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3331 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3332 g_assert (bzero_method);
3334 iargs [1] = size_ins;
3335 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3339 n = mono_class_value_size (klass, &align);
3341 if (n <= sizeof (gpointer) * 8) {
3342 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3345 memset_method = get_memset_method ();
3347 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3348 EMIT_NEW_ICONST (cfg, iargs [2], n);
3349 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3356 * Emit IR to return either the this pointer for instance method,
3357 * or the mrgctx for static methods.
3360 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3362 MonoInst *this_ins = NULL;
3364 g_assert (cfg->gshared);
3366 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3367 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3368 !method->klass->valuetype)
3369 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3371 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3372 MonoInst *mrgctx_loc, *mrgctx_var;
3374 g_assert (!this_ins);
3375 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3377 mrgctx_loc = mono_get_vtable_var (cfg);
3378 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3381 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3382 MonoInst *vtable_loc, *vtable_var;
3384 g_assert (!this_ins);
3386 vtable_loc = mono_get_vtable_var (cfg);
3387 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3389 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3390 MonoInst *mrgctx_var = vtable_var;
3393 vtable_reg = alloc_preg (cfg);
3394 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3395 vtable_var->type = STACK_PTR;
3403 vtable_reg = alloc_preg (cfg);
3404 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3409 static MonoJumpInfoRgctxEntry *
3410 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3412 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3413 res->method = method;
3414 res->in_mrgctx = in_mrgctx;
3415 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3416 res->data->type = patch_type;
3417 res->data->data.target = patch_data;
3418 res->info_type = info_type;
3426 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3429 static inline MonoInst*
3430 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3432 /* Inline version, not currently used */
3433 // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
3435 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3437 MonoBasicBlock *is_null_bb, *end_bb;
3438 MonoInst *res, *ins, *call;
3441 slot = mini_get_rgctx_entry_slot (entry);
3443 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3444 index = MONO_RGCTX_SLOT_INDEX (slot);
3446 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3447 for (depth = 0; ; ++depth) {
3448 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3450 if (index < size - 1)
3455 NEW_BBLOCK (cfg, end_bb);
3456 NEW_BBLOCK (cfg, is_null_bb);
3459 rgctx_reg = rgctx->dreg;
3461 rgctx_reg = alloc_preg (cfg);
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3464 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3465 NEW_BBLOCK (cfg, is_null_bb);
3467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3471 for (i = 0; i < depth; ++i) {
3472 int array_reg = alloc_preg (cfg);
3474 /* load ptr to next array */
3475 if (mrgctx && i == 0)
3476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3479 rgctx_reg = array_reg;
3480 /* is the ptr null? */
3481 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3482 /* if yes, jump to actual trampoline */
3483 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3487 val_reg = alloc_preg (cfg);
3488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3489 /* is the slot null? */
3490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3491 /* if yes, jump to actual trampoline */
3492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3495 res_reg = alloc_preg (cfg);
3496 MONO_INST_NEW (cfg, ins, OP_MOVE);
3497 ins->dreg = res_reg;
3498 ins->sreg1 = val_reg;
3499 MONO_ADD_INS (cfg->cbb, ins);
3501 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3504 MONO_START_BB (cfg, is_null_bb);
3506 EMIT_NEW_ICONST (cfg, args [1], index);
3508 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3510 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3511 MONO_INST_NEW (cfg, ins, OP_MOVE);
3512 ins->dreg = res_reg;
3513 ins->sreg1 = call->dreg;
3514 MONO_ADD_INS (cfg->cbb, ins);
3515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3517 MONO_START_BB (cfg, end_bb);
3521 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3526 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3527 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3529 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3530 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3532 return emit_rgctx_fetch (cfg, rgctx, entry);
3536 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3537 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3539 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3540 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3542 return emit_rgctx_fetch (cfg, rgctx, entry);
3546 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3547 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3549 MonoJumpInfoGSharedVtCall *call_info;
3550 MonoJumpInfoRgctxEntry *entry;
3553 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3554 call_info->sig = sig;
3555 call_info->method = cmethod;
3557 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3558 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3560 return emit_rgctx_fetch (cfg, rgctx, entry);
3564 * emit_get_rgctx_virt_method:
3566 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3569 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3570 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3572 MonoJumpInfoVirtMethod *info;
3573 MonoJumpInfoRgctxEntry *entry;
3576 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3577 info->klass = klass;
3578 info->method = virt_method;
3580 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3581 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3583 return emit_rgctx_fetch (cfg, rgctx, entry);
3587 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3588 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3590 MonoJumpInfoRgctxEntry *entry;
3593 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3594 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3596 return emit_rgctx_fetch (cfg, rgctx, entry);
3600 * emit_get_rgctx_method:
3602 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3603 * normal constants, else emit a load from the rgctx.
3606 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3607 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3609 if (!context_used) {
3612 switch (rgctx_type) {
3613 case MONO_RGCTX_INFO_METHOD:
3614 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3616 case MONO_RGCTX_INFO_METHOD_RGCTX:
3617 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3620 g_assert_not_reached ();
3623 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3624 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3626 return emit_rgctx_fetch (cfg, rgctx, entry);
3631 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3632 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3634 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3635 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3637 return emit_rgctx_fetch (cfg, rgctx, entry);
3641 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3643 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3644 MonoRuntimeGenericContextInfoTemplate *template;
3649 for (i = 0; i < info->num_entries; ++i) {
3650 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3652 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3656 if (info->num_entries == info->count_entries) {
3657 MonoRuntimeGenericContextInfoTemplate *new_entries;
3658 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3660 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3662 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3663 info->entries = new_entries;
3664 info->count_entries = new_count_entries;
3667 idx = info->num_entries;
3668 template = &info->entries [idx];
3669 template->info_type = rgctx_type;
3670 template->data = data;
3672 info->num_entries ++;
3678 * emit_get_gsharedvt_info:
3680 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3683 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3688 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3689 /* Load info->entries [idx] */
3690 dreg = alloc_preg (cfg);
3691 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3697 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3699 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3703 * On return the caller must check @klass for load errors.
3706 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3708 MonoInst *vtable_arg;
3710 gboolean use_op_generic_class_init = FALSE;
3712 context_used = mini_class_check_context_used (cfg, klass);
3715 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3716 klass, MONO_RGCTX_INFO_VTABLE);
3718 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3722 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3725 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3726 if (!COMPILE_LLVM (cfg))
3727 use_op_generic_class_init = TRUE;
3730 if (use_op_generic_class_init) {
3734 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3735 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3737 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3738 ins->sreg1 = vtable_arg->dreg;
3739 MONO_ADD_INS (cfg->cbb, ins);
3741 static int byte_offset = -1;
3742 static guint8 bitmask;
3743 int bits_reg, inited_reg;
3744 MonoBasicBlock *inited_bb;
3745 MonoInst *args [16];
3747 if (byte_offset < 0)
3748 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3750 bits_reg = alloc_ireg (cfg);
3751 inited_reg = alloc_ireg (cfg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3756 NEW_BBLOCK (cfg, inited_bb);
3758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3761 args [0] = vtable_arg;
3762 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3764 MONO_START_BB (cfg, inited_bb);
3769 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3773 if (cfg->gen_seq_points && cfg->method == method) {
3774 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3776 ins->flags |= MONO_INST_NONEMPTY_STACK;
3777 MONO_ADD_INS (cfg->cbb, ins);
3782 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3784 if (mini_get_debug_options ()->better_cast_details) {
3785 int vtable_reg = alloc_preg (cfg);
3786 int klass_reg = alloc_preg (cfg);
3787 MonoBasicBlock *is_null_bb = NULL;
3789 int to_klass_reg, context_used;
3792 NEW_BBLOCK (cfg, is_null_bb);
3794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3795 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3798 tls_get = mono_get_jit_tls_intrinsic (cfg);
3800 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3804 MONO_ADD_INS (cfg->cbb, tls_get);
3805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3810 context_used = mini_class_check_context_used (cfg, klass);
3812 MonoInst *class_ins;
3814 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3815 to_klass_reg = class_ins->dreg;
3817 to_klass_reg = alloc_preg (cfg);
3818 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3823 MONO_START_BB (cfg, is_null_bb);
3828 reset_cast_details (MonoCompile *cfg)
3830 /* Reset the variables holding the cast details */
3831 if (mini_get_debug_options ()->better_cast_details) {
3832 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3834 MONO_ADD_INS (cfg->cbb, tls_get);
3835 /* It is enough to reset the from field */
3836 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3841 * On return the caller must check @array_class for load errors
3844 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3846 int vtable_reg = alloc_preg (cfg);
3849 context_used = mini_class_check_context_used (cfg, array_class);
3851 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3853 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3855 if (cfg->opt & MONO_OPT_SHARED) {
3856 int class_reg = alloc_preg (cfg);
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3858 if (cfg->compile_aot) {
3859 int klass_reg = alloc_preg (cfg);
3860 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3861 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3863 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3865 } else if (context_used) {
3866 MonoInst *vtable_ins;
3868 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3869 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3871 if (cfg->compile_aot) {
3875 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3877 vt_reg = alloc_preg (cfg);
3878 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3879 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3882 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3884 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3888 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3890 reset_cast_details (cfg);
3894 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3895 * generic code is generated.
3898 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3900 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3903 MonoInst *rgctx, *addr;
3905 /* FIXME: What if the class is shared? We might not
3906 have to get the address of the method from the
3908 addr = emit_get_rgctx_method (cfg, context_used, method,
3909 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3911 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3913 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3915 gboolean pass_vtable, pass_mrgctx;
3916 MonoInst *rgctx_arg = NULL;
3918 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3919 g_assert (!pass_mrgctx);
3922 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3925 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3928 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3933 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3937 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3938 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3939 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3940 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3942 obj_reg = sp [0]->dreg;
3943 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3944 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3946 /* FIXME: generics */
3947 g_assert (klass->rank == 0);
3950 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3951 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3957 MonoInst *element_class;
3959 /* This assertion is from the unboxcast insn */
3960 g_assert (klass->rank == 0);
3962 element_class = emit_get_rgctx_klass (cfg, context_used,
3963 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3965 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3966 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3968 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3969 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3970 reset_cast_details (cfg);
3973 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3974 MONO_ADD_INS (cfg->cbb, add);
3975 add->type = STACK_MP;
3982 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3984 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3985 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3989 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3995 args [1] = klass_inst;
3998 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4000 NEW_BBLOCK (cfg, is_ref_bb);
4001 NEW_BBLOCK (cfg, is_nullable_bb);
4002 NEW_BBLOCK (cfg, end_bb);
4003 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4010 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4011 addr_reg = alloc_dreg (cfg, STACK_MP);
4015 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4016 MONO_ADD_INS (cfg->cbb, addr);
4018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4021 MONO_START_BB (cfg, is_ref_bb);
4023 /* Save the ref to a temporary */
4024 dreg = alloc_ireg (cfg);
4025 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4026 addr->dreg = addr_reg;
4027 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4028 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4031 MONO_START_BB (cfg, is_nullable_bb);
4034 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4035 MonoInst *unbox_call;
4036 MonoMethodSignature *unbox_sig;
4038 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4039 unbox_sig->ret = &klass->byval_arg;
4040 unbox_sig->param_count = 1;
4041 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4042 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4044 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4045 addr->dreg = addr_reg;
4048 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4051 MONO_START_BB (cfg, end_bb);
4054 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4060 * Returns NULL and set the cfg exception on error.
4063 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4065 MonoInst *iargs [2];
4071 MonoInst *iargs [2];
4072 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4074 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4076 if (cfg->opt & MONO_OPT_SHARED)
4077 rgctx_info = MONO_RGCTX_INFO_KLASS;
4079 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4080 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4082 if (cfg->opt & MONO_OPT_SHARED) {
4083 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4085 alloc_ftn = mono_object_new;
4088 alloc_ftn = mono_object_new_specific;
4091 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4092 if (known_instance_size) {
4093 int size = mono_class_instance_size (klass);
4094 if (size < sizeof (MonoObject))
4095 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4097 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4099 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4102 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4105 if (cfg->opt & MONO_OPT_SHARED) {
4106 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4107 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4109 alloc_ftn = mono_object_new;
4110 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4111 /* This happens often in argument checking code, eg. throw new FooException... */
4112 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4113 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4114 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4116 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4117 MonoMethod *managed_alloc = NULL;
4121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4122 cfg->exception_ptr = klass;
4126 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4128 if (managed_alloc) {
4129 int size = mono_class_instance_size (klass);
4130 if (size < sizeof (MonoObject))
4131 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4133 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4134 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4135 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4137 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4139 guint32 lw = vtable->klass->instance_size;
4140 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4141 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4142 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4145 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4149 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4153 * Returns NULL and set the cfg exception on error.
4156 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4158 MonoInst *alloc, *ins;
4160 if (mono_class_is_nullable (klass)) {
4161 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4164 /* FIXME: What if the class is shared? We might not
4165 have to get the method address from the RGCTX. */
4166 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4167 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4168 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4170 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4172 gboolean pass_vtable, pass_mrgctx;
4173 MonoInst *rgctx_arg = NULL;
4175 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4176 g_assert (!pass_mrgctx);
4179 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4182 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4185 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4189 if (mini_is_gsharedvt_klass (klass)) {
4190 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4191 MonoInst *res, *is_ref, *src_var, *addr;
4194 dreg = alloc_ireg (cfg);
4196 NEW_BBLOCK (cfg, is_ref_bb);
4197 NEW_BBLOCK (cfg, is_nullable_bb);
4198 NEW_BBLOCK (cfg, end_bb);
4199 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4201 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4203 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4204 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4207 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4210 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4211 ins->opcode = OP_STOREV_MEMBASE;
4213 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4214 res->type = STACK_OBJ;
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4219 MONO_START_BB (cfg, is_ref_bb);
4221 /* val is a vtype, so has to load the value manually */
4222 src_var = get_vreg_to_inst (cfg, val->dreg);
4224 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4225 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4230 MONO_START_BB (cfg, is_nullable_bb);
4233 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4234 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4236 MonoMethodSignature *box_sig;
4239 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4240 * construct that method at JIT time, so have to do things by hand.
4242 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4243 box_sig->ret = &mono_defaults.object_class->byval_arg;
4244 box_sig->param_count = 1;
4245 box_sig->params [0] = &klass->byval_arg;
4246 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4247 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4248 res->type = STACK_OBJ;
4252 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4254 MONO_START_BB (cfg, end_bb);
4258 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4268 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4271 MonoGenericContainer *container;
4272 MonoGenericInst *ginst;
4274 if (klass->generic_class) {
4275 container = klass->generic_class->container_class->generic_container;
4276 ginst = klass->generic_class->context.class_inst;
4277 } else if (klass->generic_container && context_used) {
4278 container = klass->generic_container;
4279 ginst = container->context.class_inst;
4284 for (i = 0; i < container->type_argc; ++i) {
4286 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4288 type = ginst->type_argv [i];
4289 if (mini_type_is_reference (type))
4295 static GHashTable* direct_icall_type_hash;
4298 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4300 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4301 if (!direct_icalls_enabled (cfg))
4305 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4306 * Whitelist a few icalls for now.
4308 if (!direct_icall_type_hash) {
4309 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4311 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4312 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4313 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4314 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4315 mono_memory_barrier ();
4316 direct_icall_type_hash = h;
4319 if (cmethod->klass == mono_defaults.math_class)
4321 /* No locking needed */
4322 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4327 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4330 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4332 MonoMethod *mono_castclass;
4335 mono_castclass = mono_marshal_get_castclass_with_cache ();
4337 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4338 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4339 reset_cast_details (cfg);
4345 get_castclass_cache_idx (MonoCompile *cfg)
4347 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4348 cfg->castclass_cache_index ++;
4349 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4353 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4362 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4365 if (cfg->compile_aot) {
4366 idx = get_castclass_cache_idx (cfg);
4367 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4369 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4372 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4373 return emit_castclass_with_cache (cfg, klass, args);
4377 * Returns NULL and set the cfg exception on error.
4380 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4382 MonoBasicBlock *is_null_bb;
4383 int obj_reg = src->dreg;
4384 int vtable_reg = alloc_preg (cfg);
4386 MonoInst *klass_inst = NULL, *res;
4388 context_used = mini_class_check_context_used (cfg, klass);
4390 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4391 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4392 (*inline_costs) += 2;
4394 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4395 MonoMethod *mono_castclass;
4396 MonoInst *iargs [1];
4399 mono_castclass = mono_marshal_get_castclass (klass);
4402 save_cast_details (cfg, klass, src->dreg, TRUE);
4403 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4404 iargs, ip, cfg->real_offset, TRUE);
4405 reset_cast_details (cfg);
4406 CHECK_CFG_EXCEPTION;
4407 g_assert (costs > 0);
4409 cfg->real_offset += 5;
4411 (*inline_costs) += costs;
4419 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4420 MonoInst *cache_ins;
4422 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4427 /* klass - it's the second element of the cache entry*/
4428 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4431 args [2] = cache_ins;
4433 return emit_castclass_with_cache (cfg, klass, args);
4436 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4439 NEW_BBLOCK (cfg, is_null_bb);
4441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4444 save_cast_details (cfg, klass, obj_reg, FALSE);
4446 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4448 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4450 int klass_reg = alloc_preg (cfg);
4452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4454 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4455 /* the remoting code is broken, access the class for now */
4456 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4457 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4459 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4460 cfg->exception_ptr = klass;
4463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4468 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4471 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4475 MONO_START_BB (cfg, is_null_bb);
4477 reset_cast_details (cfg);
4486 * Returns NULL and set the cfg exception on error.
4489 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4492 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4493 int obj_reg = src->dreg;
4494 int vtable_reg = alloc_preg (cfg);
4495 int res_reg = alloc_ireg_ref (cfg);
4496 MonoInst *klass_inst = NULL;
4501 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4502 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4503 MonoInst *cache_ins;
4505 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4510 /* klass - it's the second element of the cache entry*/
4511 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4514 args [2] = cache_ins;
4516 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4519 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4522 NEW_BBLOCK (cfg, is_null_bb);
4523 NEW_BBLOCK (cfg, false_bb);
4524 NEW_BBLOCK (cfg, end_bb);
4526 /* Do the assignment at the beginning, so the other assignment can be if converted */
4527 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4528 ins->type = STACK_OBJ;
4531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4534 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4536 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4537 g_assert (!context_used);
4538 /* the is_null_bb target simply copies the input register to the output */
4539 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4541 int klass_reg = alloc_preg (cfg);
4544 int rank_reg = alloc_preg (cfg);
4545 int eclass_reg = alloc_preg (cfg);
4547 g_assert (!context_used);
4548 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4553 if (klass->cast_class == mono_defaults.object_class) {
4554 int parent_reg = alloc_preg (cfg);
4555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4556 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4557 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4558 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4559 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4560 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4561 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4563 } else if (klass->cast_class == mono_defaults.enum_class) {
4564 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4566 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4567 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4569 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4570 /* Check that the object is a vector too */
4571 int bounds_reg = alloc_preg (cfg);
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4577 /* the is_null_bb target simply copies the input register to the output */
4578 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4580 } else if (mono_class_is_nullable (klass)) {
4581 g_assert (!context_used);
4582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4583 /* the is_null_bb target simply copies the input register to the output */
4584 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4586 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4587 g_assert (!context_used);
4588 /* the remoting code is broken, access the class for now */
4589 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4590 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4592 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4593 cfg->exception_ptr = klass;
4596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4605 /* the is_null_bb target simply copies the input register to the output */
4606 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4611 MONO_START_BB (cfg, false_bb);
4613 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4616 MONO_START_BB (cfg, is_null_bb);
4618 MONO_START_BB (cfg, end_bb);
4624 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4626 /* This opcode takes as input an object reference and a class, and returns:
4627 0) if the object is an instance of the class,
4628 1) if the object is not instance of the class,
4629 2) if the object is a proxy whose type cannot be determined */
4632 #ifndef DISABLE_REMOTING
4633 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4635 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4637 int obj_reg = src->dreg;
4638 int dreg = alloc_ireg (cfg);
4640 #ifndef DISABLE_REMOTING
4641 int klass_reg = alloc_preg (cfg);
4644 NEW_BBLOCK (cfg, true_bb);
4645 NEW_BBLOCK (cfg, false_bb);
4646 NEW_BBLOCK (cfg, end_bb);
4647 #ifndef DISABLE_REMOTING
4648 NEW_BBLOCK (cfg, false2_bb);
4649 NEW_BBLOCK (cfg, no_proxy_bb);
4652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4655 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4656 #ifndef DISABLE_REMOTING
4657 NEW_BBLOCK (cfg, interface_fail_bb);
4660 tmp_reg = alloc_preg (cfg);
4661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4662 #ifndef DISABLE_REMOTING
4663 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4664 MONO_START_BB (cfg, interface_fail_bb);
4665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4667 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4669 tmp_reg = alloc_preg (cfg);
4670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4674 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4677 #ifndef DISABLE_REMOTING
4678 tmp_reg = alloc_preg (cfg);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4682 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4683 tmp_reg = alloc_preg (cfg);
4684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4687 tmp_reg = alloc_preg (cfg);
4688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4692 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4695 MONO_START_BB (cfg, no_proxy_bb);
4697 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4699 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4703 MONO_START_BB (cfg, false_bb);
4705 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4706 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4708 #ifndef DISABLE_REMOTING
4709 MONO_START_BB (cfg, false2_bb);
4711 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4715 MONO_START_BB (cfg, true_bb);
4717 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4719 MONO_START_BB (cfg, end_bb);
4722 MONO_INST_NEW (cfg, ins, OP_ICONST);
4724 ins->type = STACK_I4;
4730 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4732 /* This opcode takes as input an object reference and a class, and returns:
4733 0) if the object is an instance of the class,
4734 1) if the object is a proxy whose type cannot be determined
4735 an InvalidCastException exception is thrown otherwhise*/
4738 #ifndef DISABLE_REMOTING
4739 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4741 MonoBasicBlock *ok_result_bb;
4743 int obj_reg = src->dreg;
4744 int dreg = alloc_ireg (cfg);
4745 int tmp_reg = alloc_preg (cfg);
4747 #ifndef DISABLE_REMOTING
4748 int klass_reg = alloc_preg (cfg);
4749 NEW_BBLOCK (cfg, end_bb);
4752 NEW_BBLOCK (cfg, ok_result_bb);
4754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4757 save_cast_details (cfg, klass, obj_reg, FALSE);
4759 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4760 #ifndef DISABLE_REMOTING
4761 NEW_BBLOCK (cfg, interface_fail_bb);
4763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4764 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4765 MONO_START_BB (cfg, interface_fail_bb);
4766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4768 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4770 tmp_reg = alloc_preg (cfg);
4771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4773 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4775 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4776 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4779 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4783 #ifndef DISABLE_REMOTING
4784 NEW_BBLOCK (cfg, no_proxy_bb);
4786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4788 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4790 tmp_reg = alloc_preg (cfg);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4794 tmp_reg = alloc_preg (cfg);
4795 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4796 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4799 NEW_BBLOCK (cfg, fail_1_bb);
4801 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4803 MONO_START_BB (cfg, fail_1_bb);
4805 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4806 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4808 MONO_START_BB (cfg, no_proxy_bb);
4810 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4812 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4816 MONO_START_BB (cfg, ok_result_bb);
4818 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4820 #ifndef DISABLE_REMOTING
4821 MONO_START_BB (cfg, end_bb);
4825 MONO_INST_NEW (cfg, ins, OP_ICONST);
4827 ins->type = STACK_I4;
4832 static G_GNUC_UNUSED MonoInst*
4833 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4835 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4836 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4839 switch (enum_type->type) {
4842 #if SIZEOF_REGISTER == 8
4854 MonoInst *load, *and, *cmp, *ceq;
4855 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4856 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4857 int dest_reg = alloc_ireg (cfg);
4859 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4860 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4861 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4862 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4864 ceq->type = STACK_I4;
4867 load = mono_decompose_opcode (cfg, load);
4868 and = mono_decompose_opcode (cfg, and);
4869 cmp = mono_decompose_opcode (cfg, cmp);
4870 ceq = mono_decompose_opcode (cfg, ceq);
4878 * Returns NULL and set the cfg exception on error.
4880 static G_GNUC_UNUSED MonoInst*
4881 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4885 gpointer trampoline;
4886 MonoInst *obj, *method_ins, *tramp_ins;
4891 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4894 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4898 obj = handle_alloc (cfg, klass, FALSE, 0);
4902 /* Inline the contents of mono_delegate_ctor */
4904 /* Set target field */
4905 /* Optimize away setting of NULL target */
4906 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4907 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4908 if (cfg->gen_write_barriers) {
4909 dreg = alloc_preg (cfg);
4910 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4911 emit_write_barrier (cfg, ptr, target);
4915 /* Set method field */
4916 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4920 * To avoid looking up the compiled code belonging to the target method
4921 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4922 * store it, and we fill it after the method has been compiled.
4924 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4925 MonoInst *code_slot_ins;
4928 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4930 domain = mono_domain_get ();
4931 mono_domain_lock (domain);
4932 if (!domain_jit_info (domain)->method_code_hash)
4933 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4934 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4936 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4937 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4939 mono_domain_unlock (domain);
4941 if (cfg->compile_aot)
4942 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4944 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4946 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4949 if (cfg->compile_aot) {
4950 MonoDelegateClassMethodPair *del_tramp;
4952 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4953 del_tramp->klass = klass;
4954 del_tramp->method = context_used ? NULL : method;
4955 del_tramp->virtual = virtual;
4956 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4959 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4961 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4962 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4965 /* Set invoke_impl field */
4967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4969 dreg = alloc_preg (cfg);
4970 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4971 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4973 dreg = alloc_preg (cfg);
4974 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4978 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4984 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4986 MonoJitICallInfo *info;
4988 /* Need to register the icall so it gets an icall wrapper */
4989 info = mono_get_array_new_va_icall (rank);
4991 cfg->flags |= MONO_CFG_HAS_VARARGS;
4993 /* mono_array_new_va () needs a vararg calling convention */
4994 cfg->disable_llvm = TRUE;
4996 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4997 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5001 * handle_constrained_gsharedvt_call:
5003 * Handle constrained calls where the receiver is a gsharedvt type.
5004 * Return the instruction representing the call. Set the cfg exception on failure.
5007 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5008 gboolean *ref_emit_widen)
5010 MonoInst *ins = NULL;
5011 gboolean emit_widen = *ref_emit_widen;
5014 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5015 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5016 * pack the arguments into an array, and do the rest of the work in in an icall.
5018 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5019 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5020 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5021 MonoInst *args [16];
5024 * This case handles calls to
5025 * - object:ToString()/Equals()/GetHashCode(),
5026 * - System.IComparable<T>:CompareTo()
5027 * - System.IEquatable<T>:Equals ()
5028 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5032 if (mono_method_check_context_used (cmethod))
5033 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5035 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5036 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5038 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5039 if (fsig->hasthis && fsig->param_count) {
5040 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5041 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5042 ins->dreg = alloc_preg (cfg);
5043 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5044 MONO_ADD_INS (cfg->cbb, ins);
5047 if (mini_is_gsharedvt_type (fsig->params [0])) {
5050 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5052 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5053 addr_reg = ins->dreg;
5054 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5056 EMIT_NEW_ICONST (cfg, args [3], 0);
5057 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5060 EMIT_NEW_ICONST (cfg, args [3], 0);
5061 EMIT_NEW_ICONST (cfg, args [4], 0);
5063 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5066 if (mini_is_gsharedvt_type (fsig->ret)) {
5067 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5068 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5072 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5073 MONO_ADD_INS (cfg->cbb, add);
5075 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5076 MONO_ADD_INS (cfg->cbb, ins);
5077 /* ins represents the call result */
5080 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5083 *ref_emit_widen = emit_widen;
5092 mono_emit_load_got_addr (MonoCompile *cfg)
5094 MonoInst *getaddr, *dummy_use;
5096 if (!cfg->got_var || cfg->got_var_allocated)
5099 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5100 getaddr->cil_code = cfg->header->code;
5101 getaddr->dreg = cfg->got_var->dreg;
5103 /* Add it to the start of the first bblock */
5104 if (cfg->bb_entry->code) {
5105 getaddr->next = cfg->bb_entry->code;
5106 cfg->bb_entry->code = getaddr;
5109 MONO_ADD_INS (cfg->bb_entry, getaddr);
5111 cfg->got_var_allocated = TRUE;
5114 * Add a dummy use to keep the got_var alive, since real uses might
5115 * only be generated by the back ends.
5116 * Add it to end_bblock, so the variable's lifetime covers the whole
5118 * It would be better to make the usage of the got var explicit in all
5119 * cases when the backend needs it (i.e. calls, throw etc.), so this
5120 * wouldn't be needed.
5122 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5123 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5126 static int inline_limit;
5127 static gboolean inline_limit_inited;
5130 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5132 MonoMethodHeaderSummary header;
5134 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5135 MonoMethodSignature *sig = mono_method_signature (method);
5139 if (cfg->disable_inline)
5144 if (cfg->inline_depth > 10)
5147 #ifdef MONO_ARCH_HAVE_LMF_OPS
5148 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5149 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5150 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5155 if (!mono_method_get_header_summary (method, &header))
5158 /*runtime, icall and pinvoke are checked by summary call*/
5159 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5160 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5161 (mono_class_is_marshalbyref (method->klass)) ||
5165 /* also consider num_locals? */
5166 /* Do the size check early to avoid creating vtables */
5167 if (!inline_limit_inited) {
5168 if (g_getenv ("MONO_INLINELIMIT"))
5169 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5171 inline_limit = INLINE_LENGTH_LIMIT;
5172 inline_limit_inited = TRUE;
5174 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5178 * if we can initialize the class of the method right away, we do,
5179 * otherwise we don't allow inlining if the class needs initialization,
5180 * since it would mean inserting a call to mono_runtime_class_init()
5181 * inside the inlined code
5183 if (!(cfg->opt & MONO_OPT_SHARED)) {
5184 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5185 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5186 vtable = mono_class_vtable (cfg->domain, method->klass);
5189 if (!cfg->compile_aot)
5190 mono_runtime_class_init (vtable);
5191 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5192 if (cfg->run_cctors && method->klass->has_cctor) {
5193 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5194 if (!method->klass->runtime_info)
5195 /* No vtable created yet */
5197 vtable = mono_class_vtable (cfg->domain, method->klass);
5200 /* This makes so that inline cannot trigger */
5201 /* .cctors: too many apps depend on them */
5202 /* running with a specific order... */
5203 if (! vtable->initialized)
5205 mono_runtime_class_init (vtable);
5207 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5208 if (!method->klass->runtime_info)
5209 /* No vtable created yet */
5211 vtable = mono_class_vtable (cfg->domain, method->klass);
5214 if (!vtable->initialized)
5219 * If we're compiling for shared code
5220 * the cctor will need to be run at aot method load time, for example,
5221 * or at the end of the compilation of the inlining method.
5223 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5227 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5228 if (mono_arch_is_soft_float ()) {
5230 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5232 for (i = 0; i < sig->param_count; ++i)
5233 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5238 if (g_list_find (cfg->dont_inline, method))
5245 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5247 if (!cfg->compile_aot) {
5249 if (vtable->initialized)
5253 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5254 if (cfg->method == method)
5258 if (!mono_class_needs_cctor_run (klass, method))
5261 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5262 /* The initialization is already done before the method is called */
5269 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5273 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5276 if (mini_is_gsharedvt_variable_klass (klass)) {
5279 mono_class_init (klass);
5280 size = mono_class_array_element_size (klass);
5283 mult_reg = alloc_preg (cfg);
5284 array_reg = arr->dreg;
5285 index_reg = index->dreg;
5287 #if SIZEOF_REGISTER == 8
5288 /* The array reg is 64 bits but the index reg is only 32 */
5289 if (COMPILE_LLVM (cfg)) {
5291 index2_reg = index_reg;
5293 index2_reg = alloc_preg (cfg);
5294 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5297 if (index->type == STACK_I8) {
5298 index2_reg = alloc_preg (cfg);
5299 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5301 index2_reg = index_reg;
5306 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5308 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5309 if (size == 1 || size == 2 || size == 4 || size == 8) {
5310 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5312 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5313 ins->klass = mono_class_get_element_class (klass);
5314 ins->type = STACK_MP;
5320 add_reg = alloc_ireg_mp (cfg);
5323 MonoInst *rgctx_ins;
5326 g_assert (cfg->gshared);
5327 context_used = mini_class_check_context_used (cfg, klass);
5328 g_assert (context_used);
5329 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5330 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5334 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5335 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5336 ins->klass = mono_class_get_element_class (klass);
5337 ins->type = STACK_MP;
5338 MONO_ADD_INS (cfg->cbb, ins);
5343 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5345 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5347 int bounds_reg = alloc_preg (cfg);
5348 int add_reg = alloc_ireg_mp (cfg);
5349 int mult_reg = alloc_preg (cfg);
5350 int mult2_reg = alloc_preg (cfg);
5351 int low1_reg = alloc_preg (cfg);
5352 int low2_reg = alloc_preg (cfg);
5353 int high1_reg = alloc_preg (cfg);
5354 int high2_reg = alloc_preg (cfg);
5355 int realidx1_reg = alloc_preg (cfg);
5356 int realidx2_reg = alloc_preg (cfg);
5357 int sum_reg = alloc_preg (cfg);
5358 int index1, index2, tmpreg;
5362 mono_class_init (klass);
5363 size = mono_class_array_element_size (klass);
5365 index1 = index_ins1->dreg;
5366 index2 = index_ins2->dreg;
5368 #if SIZEOF_REGISTER == 8
5369 /* The array reg is 64 bits but the index reg is only 32 */
5370 if (COMPILE_LLVM (cfg)) {
5373 tmpreg = alloc_preg (cfg);
5374 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5376 tmpreg = alloc_preg (cfg);
5377 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5381 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5385 /* range checking */
5386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5387 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5389 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5390 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5391 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5392 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5393 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5394 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5395 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5397 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5398 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5399 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5401 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5402 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5403 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5405 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5406 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5408 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5409 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5411 ins->type = STACK_MP;
5413 MONO_ADD_INS (cfg->cbb, ins);
5420 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5424 MonoMethod *addr_method;
5426 MonoClass *eclass = cmethod->klass->element_class;
5428 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5431 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5433 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5434 /* emit_ldelema_2 depends on OP_LMUL */
5435 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5436 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5440 if (mini_is_gsharedvt_variable_klass (eclass))
5443 element_size = mono_class_array_element_size (eclass);
5444 addr_method = mono_marshal_get_array_address (rank, element_size);
5445 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5450 static MonoBreakPolicy
5451 always_insert_breakpoint (MonoMethod *method)
5453 return MONO_BREAK_POLICY_ALWAYS;
5456 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5459 * mono_set_break_policy:
5460 * policy_callback: the new callback function
5462 * Allow embedders to decide wherther to actually obey breakpoint instructions
5463 * (both break IL instructions and Debugger.Break () method calls), for example
5464 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5465 * untrusted or semi-trusted code.
5467 * @policy_callback will be called every time a break point instruction needs to
5468 * be inserted with the method argument being the method that calls Debugger.Break()
5469 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5470 * if it wants the breakpoint to not be effective in the given method.
5471 * #MONO_BREAK_POLICY_ALWAYS is the default.
5474 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5476 if (policy_callback)
5477 break_policy_func = policy_callback;
5479 break_policy_func = always_insert_breakpoint;
5483 should_insert_brekpoint (MonoMethod *method) {
5484 switch (break_policy_func (method)) {
5485 case MONO_BREAK_POLICY_ALWAYS:
5487 case MONO_BREAK_POLICY_NEVER:
5489 case MONO_BREAK_POLICY_ON_DBG:
5490 g_warning ("mdb no longer supported");
5493 g_warning ("Incorrect value returned from break policy callback");
5498 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5500 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5502 MonoInst *addr, *store, *load;
5503 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5505 /* the bounds check is already done by the callers */
5506 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5508 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5509 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5510 if (mini_type_is_reference (fsig->params [2]))
5511 emit_write_barrier (cfg, addr, load);
5513 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5514 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5521 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5523 return mini_type_is_reference (&klass->byval_arg);
5527 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5529 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5530 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5531 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5532 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5533 MonoInst *iargs [3];
5536 mono_class_setup_vtable (obj_array);
5537 g_assert (helper->slot);
5539 if (sp [0]->type != STACK_OBJ)
5541 if (sp [2]->type != STACK_OBJ)
5548 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5552 if (mini_is_gsharedvt_variable_klass (klass)) {
5555 // FIXME-VT: OP_ICONST optimization
5556 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5557 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5558 ins->opcode = OP_STOREV_MEMBASE;
5559 } else if (sp [1]->opcode == OP_ICONST) {
5560 int array_reg = sp [0]->dreg;
5561 int index_reg = sp [1]->dreg;
5562 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5565 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5566 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5568 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5569 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5570 if (generic_class_is_reference_type (cfg, klass))
5571 emit_write_barrier (cfg, addr, sp [2]);
5578 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5583 eklass = mono_class_from_mono_type (fsig->params [2]);
5585 eklass = mono_class_from_mono_type (fsig->ret);
5588 return emit_array_store (cfg, eklass, args, FALSE);
5590 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5591 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5597 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5601 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5603 //Only allow for valuetypes
5604 if (!param_klass->valuetype || !return_klass->valuetype)
5608 if (param_klass->has_references || return_klass->has_references)
5611 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5612 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5613 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5616 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5617 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5620 //And have the same size
5621 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5627 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5629 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5630 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5632 //Valuetypes that are semantically equivalent
5633 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5636 //Arrays of valuetypes that are semantically equivalent
5637 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5644 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5646 #ifdef MONO_ARCH_SIMD_INTRINSICS
5647 MonoInst *ins = NULL;
5649 if (cfg->opt & MONO_OPT_SIMD) {
5650 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5656 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5660 emit_memory_barrier (MonoCompile *cfg, int kind)
5662 MonoInst *ins = NULL;
5663 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5664 MONO_ADD_INS (cfg->cbb, ins);
5665 ins->backend.memory_barrier_kind = kind;
5671 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5673 MonoInst *ins = NULL;
5676 /* The LLVM backend supports these intrinsics */
5677 if (cmethod->klass == mono_defaults.math_class) {
5678 if (strcmp (cmethod->name, "Sin") == 0) {
5680 } else if (strcmp (cmethod->name, "Cos") == 0) {
5682 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5684 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5688 if (opcode && fsig->param_count == 1) {
5689 MONO_INST_NEW (cfg, ins, opcode);
5690 ins->type = STACK_R8;
5691 ins->dreg = mono_alloc_freg (cfg);
5692 ins->sreg1 = args [0]->dreg;
5693 MONO_ADD_INS (cfg->cbb, ins);
5697 if (cfg->opt & MONO_OPT_CMOV) {
5698 if (strcmp (cmethod->name, "Min") == 0) {
5699 if (fsig->params [0]->type == MONO_TYPE_I4)
5701 if (fsig->params [0]->type == MONO_TYPE_U4)
5702 opcode = OP_IMIN_UN;
5703 else if (fsig->params [0]->type == MONO_TYPE_I8)
5705 else if (fsig->params [0]->type == MONO_TYPE_U8)
5706 opcode = OP_LMIN_UN;
5707 } else if (strcmp (cmethod->name, "Max") == 0) {
5708 if (fsig->params [0]->type == MONO_TYPE_I4)
5710 if (fsig->params [0]->type == MONO_TYPE_U4)
5711 opcode = OP_IMAX_UN;
5712 else if (fsig->params [0]->type == MONO_TYPE_I8)
5714 else if (fsig->params [0]->type == MONO_TYPE_U8)
5715 opcode = OP_LMAX_UN;
5719 if (opcode && fsig->param_count == 2) {
5720 MONO_INST_NEW (cfg, ins, opcode);
5721 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5722 ins->dreg = mono_alloc_ireg (cfg);
5723 ins->sreg1 = args [0]->dreg;
5724 ins->sreg2 = args [1]->dreg;
5725 MONO_ADD_INS (cfg->cbb, ins);
5733 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5735 if (cmethod->klass == mono_defaults.array_class) {
5736 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5737 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5738 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5739 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5740 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5741 return emit_array_unsafe_mov (cfg, fsig, args);
5748 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5750 MonoInst *ins = NULL;
5752 static MonoClass *runtime_helpers_class = NULL;
5753 if (! runtime_helpers_class)
5754 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5755 "System.Runtime.CompilerServices", "RuntimeHelpers");
5757 if (cmethod->klass == mono_defaults.string_class) {
5758 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5759 int dreg = alloc_ireg (cfg);
5760 int index_reg = alloc_preg (cfg);
5761 int add_reg = alloc_preg (cfg);
5763 #if SIZEOF_REGISTER == 8
5764 /* The array reg is 64 bits but the index reg is only 32 */
5765 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5767 index_reg = args [1]->dreg;
5769 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5771 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5772 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5773 add_reg = ins->dreg;
5774 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5777 int mult_reg = alloc_preg (cfg);
5778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5779 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5780 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5781 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5783 type_from_op (cfg, ins, NULL, NULL);
5785 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5786 int dreg = alloc_ireg (cfg);
5787 /* Decompose later to allow more optimizations */
5788 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5789 ins->type = STACK_I4;
5790 ins->flags |= MONO_INST_FAULT;
5791 cfg->cbb->has_array_access = TRUE;
5792 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5797 } else if (cmethod->klass == mono_defaults.object_class) {
5799 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5800 int dreg = alloc_ireg_ref (cfg);
5801 int vt_reg = alloc_preg (cfg);
5802 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5803 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5804 type_from_op (cfg, ins, NULL, NULL);
5807 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5808 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5809 int dreg = alloc_ireg (cfg);
5810 int t1 = alloc_ireg (cfg);
5812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5813 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5814 ins->type = STACK_I4;
5818 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5819 MONO_INST_NEW (cfg, ins, OP_NOP);
5820 MONO_ADD_INS (cfg->cbb, ins);
5824 } else if (cmethod->klass == mono_defaults.array_class) {
5825 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5826 return emit_array_generic_access (cfg, fsig, args, FALSE);
5827 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5828 return emit_array_generic_access (cfg, fsig, args, TRUE);
5830 #ifndef MONO_BIG_ARRAYS
5832 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5835 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5836 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5837 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5838 int dreg = alloc_ireg (cfg);
5839 int bounds_reg = alloc_ireg_mp (cfg);
5840 MonoBasicBlock *end_bb, *szarray_bb;
5841 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5843 NEW_BBLOCK (cfg, end_bb);
5844 NEW_BBLOCK (cfg, szarray_bb);
5846 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5847 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5850 /* Non-szarray case */
5852 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5853 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5855 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5856 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5858 MONO_START_BB (cfg, szarray_bb);
5861 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5862 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5864 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5865 MONO_START_BB (cfg, end_bb);
5867 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5868 ins->type = STACK_I4;
5874 if (cmethod->name [0] != 'g')
5877 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5878 int dreg = alloc_ireg (cfg);
5879 int vtable_reg = alloc_preg (cfg);
5880 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5881 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5882 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5883 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5884 type_from_op (cfg, ins, NULL, NULL);
5887 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5888 int dreg = alloc_ireg (cfg);
5890 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5891 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5892 type_from_op (cfg, ins, NULL, NULL);
5897 } else if (cmethod->klass == runtime_helpers_class) {
5899 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5900 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5904 } else if (cmethod->klass == mono_defaults.thread_class) {
5905 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5906 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5907 MONO_ADD_INS (cfg->cbb, ins);
5909 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5910 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5911 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5913 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5915 if (fsig->params [0]->type == MONO_TYPE_I1)
5916 opcode = OP_LOADI1_MEMBASE;
5917 else if (fsig->params [0]->type == MONO_TYPE_U1)
5918 opcode = OP_LOADU1_MEMBASE;
5919 else if (fsig->params [0]->type == MONO_TYPE_I2)
5920 opcode = OP_LOADI2_MEMBASE;
5921 else if (fsig->params [0]->type == MONO_TYPE_U2)
5922 opcode = OP_LOADU2_MEMBASE;
5923 else if (fsig->params [0]->type == MONO_TYPE_I4)
5924 opcode = OP_LOADI4_MEMBASE;
5925 else if (fsig->params [0]->type == MONO_TYPE_U4)
5926 opcode = OP_LOADU4_MEMBASE;
5927 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5928 opcode = OP_LOADI8_MEMBASE;
5929 else if (fsig->params [0]->type == MONO_TYPE_R4)
5930 opcode = OP_LOADR4_MEMBASE;
5931 else if (fsig->params [0]->type == MONO_TYPE_R8)
5932 opcode = OP_LOADR8_MEMBASE;
5933 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5934 opcode = OP_LOAD_MEMBASE;
5937 MONO_INST_NEW (cfg, ins, opcode);
5938 ins->inst_basereg = args [0]->dreg;
5939 ins->inst_offset = 0;
5940 MONO_ADD_INS (cfg->cbb, ins);
5942 switch (fsig->params [0]->type) {
5949 ins->dreg = mono_alloc_ireg (cfg);
5950 ins->type = STACK_I4;
5954 ins->dreg = mono_alloc_lreg (cfg);
5955 ins->type = STACK_I8;
5959 ins->dreg = mono_alloc_ireg (cfg);
5960 #if SIZEOF_REGISTER == 8
5961 ins->type = STACK_I8;
5963 ins->type = STACK_I4;
5968 ins->dreg = mono_alloc_freg (cfg);
5969 ins->type = STACK_R8;
5972 g_assert (mini_type_is_reference (fsig->params [0]));
5973 ins->dreg = mono_alloc_ireg_ref (cfg);
5974 ins->type = STACK_OBJ;
5978 if (opcode == OP_LOADI8_MEMBASE)
5979 ins = mono_decompose_opcode (cfg, ins);
5981 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5985 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5987 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5989 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5990 opcode = OP_STOREI1_MEMBASE_REG;
5991 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5992 opcode = OP_STOREI2_MEMBASE_REG;
5993 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5994 opcode = OP_STOREI4_MEMBASE_REG;
5995 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5996 opcode = OP_STOREI8_MEMBASE_REG;
5997 else if (fsig->params [0]->type == MONO_TYPE_R4)
5998 opcode = OP_STORER4_MEMBASE_REG;
5999 else if (fsig->params [0]->type == MONO_TYPE_R8)
6000 opcode = OP_STORER8_MEMBASE_REG;
6001 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6002 opcode = OP_STORE_MEMBASE_REG;
6005 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6007 MONO_INST_NEW (cfg, ins, opcode);
6008 ins->sreg1 = args [1]->dreg;
6009 ins->inst_destbasereg = args [0]->dreg;
6010 ins->inst_offset = 0;
6011 MONO_ADD_INS (cfg->cbb, ins);
6013 if (opcode == OP_STOREI8_MEMBASE_REG)
6014 ins = mono_decompose_opcode (cfg, ins);
6019 } else if (cmethod->klass->image == mono_defaults.corlib &&
6020 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6021 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6024 #if SIZEOF_REGISTER == 8
6025 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6026 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6027 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6028 ins->dreg = mono_alloc_preg (cfg);
6029 ins->sreg1 = args [0]->dreg;
6030 ins->type = STACK_I8;
6031 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6032 MONO_ADD_INS (cfg->cbb, ins);
6036 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6038 /* 64 bit reads are already atomic */
6039 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6040 load_ins->dreg = mono_alloc_preg (cfg);
6041 load_ins->inst_basereg = args [0]->dreg;
6042 load_ins->inst_offset = 0;
6043 load_ins->type = STACK_I8;
6044 MONO_ADD_INS (cfg->cbb, load_ins);
6046 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6053 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6054 MonoInst *ins_iconst;
6057 if (fsig->params [0]->type == MONO_TYPE_I4) {
6058 opcode = OP_ATOMIC_ADD_I4;
6059 cfg->has_atomic_add_i4 = TRUE;
6061 #if SIZEOF_REGISTER == 8
6062 else if (fsig->params [0]->type == MONO_TYPE_I8)
6063 opcode = OP_ATOMIC_ADD_I8;
6066 if (!mono_arch_opcode_supported (opcode))
6068 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6069 ins_iconst->inst_c0 = 1;
6070 ins_iconst->dreg = mono_alloc_ireg (cfg);
6071 MONO_ADD_INS (cfg->cbb, ins_iconst);
6073 MONO_INST_NEW (cfg, ins, opcode);
6074 ins->dreg = mono_alloc_ireg (cfg);
6075 ins->inst_basereg = args [0]->dreg;
6076 ins->inst_offset = 0;
6077 ins->sreg2 = ins_iconst->dreg;
6078 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6079 MONO_ADD_INS (cfg->cbb, ins);
6081 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6082 MonoInst *ins_iconst;
6085 if (fsig->params [0]->type == MONO_TYPE_I4) {
6086 opcode = OP_ATOMIC_ADD_I4;
6087 cfg->has_atomic_add_i4 = TRUE;
6089 #if SIZEOF_REGISTER == 8
6090 else if (fsig->params [0]->type == MONO_TYPE_I8)
6091 opcode = OP_ATOMIC_ADD_I8;
6094 if (!mono_arch_opcode_supported (opcode))
6096 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6097 ins_iconst->inst_c0 = -1;
6098 ins_iconst->dreg = mono_alloc_ireg (cfg);
6099 MONO_ADD_INS (cfg->cbb, ins_iconst);
6101 MONO_INST_NEW (cfg, ins, opcode);
6102 ins->dreg = mono_alloc_ireg (cfg);
6103 ins->inst_basereg = args [0]->dreg;
6104 ins->inst_offset = 0;
6105 ins->sreg2 = ins_iconst->dreg;
6106 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6107 MONO_ADD_INS (cfg->cbb, ins);
6109 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6112 if (fsig->params [0]->type == MONO_TYPE_I4) {
6113 opcode = OP_ATOMIC_ADD_I4;
6114 cfg->has_atomic_add_i4 = TRUE;
6116 #if SIZEOF_REGISTER == 8
6117 else if (fsig->params [0]->type == MONO_TYPE_I8)
6118 opcode = OP_ATOMIC_ADD_I8;
6121 if (!mono_arch_opcode_supported (opcode))
6123 MONO_INST_NEW (cfg, ins, opcode);
6124 ins->dreg = mono_alloc_ireg (cfg);
6125 ins->inst_basereg = args [0]->dreg;
6126 ins->inst_offset = 0;
6127 ins->sreg2 = args [1]->dreg;
6128 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6129 MONO_ADD_INS (cfg->cbb, ins);
6132 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6133 MonoInst *f2i = NULL, *i2f;
6134 guint32 opcode, f2i_opcode, i2f_opcode;
6135 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6136 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6138 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6139 fsig->params [0]->type == MONO_TYPE_R4) {
6140 opcode = OP_ATOMIC_EXCHANGE_I4;
6141 f2i_opcode = OP_MOVE_F_TO_I4;
6142 i2f_opcode = OP_MOVE_I4_TO_F;
6143 cfg->has_atomic_exchange_i4 = TRUE;
6145 #if SIZEOF_REGISTER == 8
6147 fsig->params [0]->type == MONO_TYPE_I8 ||
6148 fsig->params [0]->type == MONO_TYPE_R8 ||
6149 fsig->params [0]->type == MONO_TYPE_I) {
6150 opcode = OP_ATOMIC_EXCHANGE_I8;
6151 f2i_opcode = OP_MOVE_F_TO_I8;
6152 i2f_opcode = OP_MOVE_I8_TO_F;
6155 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6156 opcode = OP_ATOMIC_EXCHANGE_I4;
6157 cfg->has_atomic_exchange_i4 = TRUE;
6163 if (!mono_arch_opcode_supported (opcode))
6167 /* TODO: Decompose these opcodes instead of bailing here. */
6168 if (COMPILE_SOFT_FLOAT (cfg))
6171 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6172 f2i->dreg = mono_alloc_ireg (cfg);
6173 f2i->sreg1 = args [1]->dreg;
6174 if (f2i_opcode == OP_MOVE_F_TO_I4)
6175 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6176 MONO_ADD_INS (cfg->cbb, f2i);
6179 MONO_INST_NEW (cfg, ins, opcode);
6180 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6181 ins->inst_basereg = args [0]->dreg;
6182 ins->inst_offset = 0;
6183 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6184 MONO_ADD_INS (cfg->cbb, ins);
6186 switch (fsig->params [0]->type) {
6188 ins->type = STACK_I4;
6191 ins->type = STACK_I8;
6194 #if SIZEOF_REGISTER == 8
6195 ins->type = STACK_I8;
6197 ins->type = STACK_I4;
6202 ins->type = STACK_R8;
6205 g_assert (mini_type_is_reference (fsig->params [0]));
6206 ins->type = STACK_OBJ;
6211 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6212 i2f->dreg = mono_alloc_freg (cfg);
6213 i2f->sreg1 = ins->dreg;
6214 i2f->type = STACK_R8;
6215 if (i2f_opcode == OP_MOVE_I4_TO_F)
6216 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6217 MONO_ADD_INS (cfg->cbb, i2f);
6222 if (cfg->gen_write_barriers && is_ref)
6223 emit_write_barrier (cfg, args [0], args [1]);
6225 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6226 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6227 guint32 opcode, f2i_opcode, i2f_opcode;
6228 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6229 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6231 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6232 fsig->params [1]->type == MONO_TYPE_R4) {
6233 opcode = OP_ATOMIC_CAS_I4;
6234 f2i_opcode = OP_MOVE_F_TO_I4;
6235 i2f_opcode = OP_MOVE_I4_TO_F;
6236 cfg->has_atomic_cas_i4 = TRUE;
6238 #if SIZEOF_REGISTER == 8
6240 fsig->params [1]->type == MONO_TYPE_I8 ||
6241 fsig->params [1]->type == MONO_TYPE_R8 ||
6242 fsig->params [1]->type == MONO_TYPE_I) {
6243 opcode = OP_ATOMIC_CAS_I8;
6244 f2i_opcode = OP_MOVE_F_TO_I8;
6245 i2f_opcode = OP_MOVE_I8_TO_F;
6248 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6249 opcode = OP_ATOMIC_CAS_I4;
6250 cfg->has_atomic_cas_i4 = TRUE;
6256 if (!mono_arch_opcode_supported (opcode))
6260 /* TODO: Decompose these opcodes instead of bailing here. */
6261 if (COMPILE_SOFT_FLOAT (cfg))
6264 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6265 f2i_new->dreg = mono_alloc_ireg (cfg);
6266 f2i_new->sreg1 = args [1]->dreg;
6267 if (f2i_opcode == OP_MOVE_F_TO_I4)
6268 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6269 MONO_ADD_INS (cfg->cbb, f2i_new);
6271 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6272 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6273 f2i_cmp->sreg1 = args [2]->dreg;
6274 if (f2i_opcode == OP_MOVE_F_TO_I4)
6275 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6276 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6279 MONO_INST_NEW (cfg, ins, opcode);
6280 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6281 ins->sreg1 = args [0]->dreg;
6282 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6283 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6284 MONO_ADD_INS (cfg->cbb, ins);
6286 switch (fsig->params [1]->type) {
6288 ins->type = STACK_I4;
6291 ins->type = STACK_I8;
6294 #if SIZEOF_REGISTER == 8
6295 ins->type = STACK_I8;
6297 ins->type = STACK_I4;
6301 ins->type = cfg->r4_stack_type;
6304 ins->type = STACK_R8;
6307 g_assert (mini_type_is_reference (fsig->params [1]));
6308 ins->type = STACK_OBJ;
6313 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6314 i2f->dreg = mono_alloc_freg (cfg);
6315 i2f->sreg1 = ins->dreg;
6316 i2f->type = STACK_R8;
6317 if (i2f_opcode == OP_MOVE_I4_TO_F)
6318 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6319 MONO_ADD_INS (cfg->cbb, i2f);
6324 if (cfg->gen_write_barriers && is_ref)
6325 emit_write_barrier (cfg, args [0], args [1]);
6327 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6328 fsig->params [1]->type == MONO_TYPE_I4) {
6329 MonoInst *cmp, *ceq;
6331 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6334 /* int32 r = CAS (location, value, comparand); */
6335 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6336 ins->dreg = alloc_ireg (cfg);
6337 ins->sreg1 = args [0]->dreg;
6338 ins->sreg2 = args [1]->dreg;
6339 ins->sreg3 = args [2]->dreg;
6340 ins->type = STACK_I4;
6341 MONO_ADD_INS (cfg->cbb, ins);
6343 /* bool result = r == comparand; */
6344 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6345 cmp->sreg1 = ins->dreg;
6346 cmp->sreg2 = args [2]->dreg;
6347 cmp->type = STACK_I4;
6348 MONO_ADD_INS (cfg->cbb, cmp);
6350 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6351 ceq->dreg = alloc_ireg (cfg);
6352 ceq->type = STACK_I4;
6353 MONO_ADD_INS (cfg->cbb, ceq);
6355 /* *success = result; */
6356 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6358 cfg->has_atomic_cas_i4 = TRUE;
6360 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6361 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6365 } else if (cmethod->klass->image == mono_defaults.corlib &&
6366 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6367 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6370 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6372 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6373 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6375 if (fsig->params [0]->type == MONO_TYPE_I1)
6376 opcode = OP_ATOMIC_LOAD_I1;
6377 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6378 opcode = OP_ATOMIC_LOAD_U1;
6379 else if (fsig->params [0]->type == MONO_TYPE_I2)
6380 opcode = OP_ATOMIC_LOAD_I2;
6381 else if (fsig->params [0]->type == MONO_TYPE_U2)
6382 opcode = OP_ATOMIC_LOAD_U2;
6383 else if (fsig->params [0]->type == MONO_TYPE_I4)
6384 opcode = OP_ATOMIC_LOAD_I4;
6385 else if (fsig->params [0]->type == MONO_TYPE_U4)
6386 opcode = OP_ATOMIC_LOAD_U4;
6387 else if (fsig->params [0]->type == MONO_TYPE_R4)
6388 opcode = OP_ATOMIC_LOAD_R4;
6389 else if (fsig->params [0]->type == MONO_TYPE_R8)
6390 opcode = OP_ATOMIC_LOAD_R8;
6391 #if SIZEOF_REGISTER == 8
6392 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6393 opcode = OP_ATOMIC_LOAD_I8;
6394 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6395 opcode = OP_ATOMIC_LOAD_U8;
6397 else if (fsig->params [0]->type == MONO_TYPE_I)
6398 opcode = OP_ATOMIC_LOAD_I4;
6399 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6400 opcode = OP_ATOMIC_LOAD_U4;
6404 if (!mono_arch_opcode_supported (opcode))
6407 MONO_INST_NEW (cfg, ins, opcode);
6408 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6409 ins->sreg1 = args [0]->dreg;
6410 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6411 MONO_ADD_INS (cfg->cbb, ins);
6413 switch (fsig->params [0]->type) {
6414 case MONO_TYPE_BOOLEAN:
6421 ins->type = STACK_I4;
6425 ins->type = STACK_I8;
6429 #if SIZEOF_REGISTER == 8
6430 ins->type = STACK_I8;
6432 ins->type = STACK_I4;
6436 ins->type = cfg->r4_stack_type;
6439 ins->type = STACK_R8;
6442 g_assert (mini_type_is_reference (fsig->params [0]));
6443 ins->type = STACK_OBJ;
6449 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6451 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6453 if (fsig->params [0]->type == MONO_TYPE_I1)
6454 opcode = OP_ATOMIC_STORE_I1;
6455 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6456 opcode = OP_ATOMIC_STORE_U1;
6457 else if (fsig->params [0]->type == MONO_TYPE_I2)
6458 opcode = OP_ATOMIC_STORE_I2;
6459 else if (fsig->params [0]->type == MONO_TYPE_U2)
6460 opcode = OP_ATOMIC_STORE_U2;
6461 else if (fsig->params [0]->type == MONO_TYPE_I4)
6462 opcode = OP_ATOMIC_STORE_I4;
6463 else if (fsig->params [0]->type == MONO_TYPE_U4)
6464 opcode = OP_ATOMIC_STORE_U4;
6465 else if (fsig->params [0]->type == MONO_TYPE_R4)
6466 opcode = OP_ATOMIC_STORE_R4;
6467 else if (fsig->params [0]->type == MONO_TYPE_R8)
6468 opcode = OP_ATOMIC_STORE_R8;
6469 #if SIZEOF_REGISTER == 8
6470 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6471 opcode = OP_ATOMIC_STORE_I8;
6472 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6473 opcode = OP_ATOMIC_STORE_U8;
6475 else if (fsig->params [0]->type == MONO_TYPE_I)
6476 opcode = OP_ATOMIC_STORE_I4;
6477 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6478 opcode = OP_ATOMIC_STORE_U4;
6482 if (!mono_arch_opcode_supported (opcode))
6485 MONO_INST_NEW (cfg, ins, opcode);
6486 ins->dreg = args [0]->dreg;
6487 ins->sreg1 = args [1]->dreg;
6488 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6489 MONO_ADD_INS (cfg->cbb, ins);
6491 if (cfg->gen_write_barriers && is_ref)
6492 emit_write_barrier (cfg, args [0], args [1]);
6498 } else if (cmethod->klass->image == mono_defaults.corlib &&
6499 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6500 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6501 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6502 if (should_insert_brekpoint (cfg->method)) {
6503 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6505 MONO_INST_NEW (cfg, ins, OP_NOP);
6506 MONO_ADD_INS (cfg->cbb, ins);
6510 } else if (cmethod->klass->image == mono_defaults.corlib &&
6511 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6512 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6513 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6515 EMIT_NEW_ICONST (cfg, ins, 1);
6517 EMIT_NEW_ICONST (cfg, ins, 0);
6520 } else if (cmethod->klass == mono_defaults.math_class) {
6522 * There is general branchless code for Min/Max, but it does not work for
6524 * http://everything2.com/?node_id=1051618
6526 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6527 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6528 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6529 !strcmp (cmethod->klass->name, "Selector")) ||
6530 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6531 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6532 !strcmp (cmethod->klass->name, "Selector"))
6534 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6535 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6536 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6539 MonoJumpInfoToken *ji;
6542 cfg->disable_llvm = TRUE;
6544 if (args [0]->opcode == OP_GOT_ENTRY) {
6545 pi = args [0]->inst_p1;
6546 g_assert (pi->opcode == OP_PATCH_INFO);
6547 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6550 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6551 ji = args [0]->inst_p0;
6554 NULLIFY_INS (args [0]);
6557 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6558 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6559 ins->dreg = mono_alloc_ireg (cfg);
6561 ins->inst_p0 = mono_string_to_utf8 (s);
6562 MONO_ADD_INS (cfg->cbb, ins);
6568 #ifdef MONO_ARCH_SIMD_INTRINSICS
6569 if (cfg->opt & MONO_OPT_SIMD) {
6570 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6576 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6580 if (COMPILE_LLVM (cfg)) {
6581 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6586 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6590 * This entry point could be used later for arbitrary method
6593 inline static MonoInst*
6594 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6595 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6597 if (method->klass == mono_defaults.string_class) {
6598 /* managed string allocation support */
6599 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6600 MonoInst *iargs [2];
6601 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6602 MonoMethod *managed_alloc = NULL;
6604 g_assert (vtable); /*Should not fail since it System.String*/
6605 #ifndef MONO_CROSS_COMPILE
6606 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6610 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6611 iargs [1] = args [0];
6612 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6619 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6621 MonoInst *store, *temp;
6624 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6625 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6628 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6629 * would be different than the MonoInst's used to represent arguments, and
6630 * the ldelema implementation can't deal with that.
6631 * Solution: When ldelema is used on an inline argument, create a var for
6632 * it, emit ldelema on that var, and emit the saving code below in
6633 * inline_method () if needed.
6635 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6636 cfg->args [i] = temp;
6637 /* This uses cfg->args [i] which is set by the preceeding line */
6638 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6639 store->cil_code = sp [0]->cil_code;
6644 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6645 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6647 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6649 check_inline_called_method_name_limit (MonoMethod *called_method)
6652 static const char *limit = NULL;
6654 if (limit == NULL) {
6655 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6657 if (limit_string != NULL)
6658 limit = limit_string;
6663 if (limit [0] != '\0') {
6664 char *called_method_name = mono_method_full_name (called_method, TRUE);
6666 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6667 g_free (called_method_name);
6669 //return (strncmp_result <= 0);
6670 return (strncmp_result == 0);
6677 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6679 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6682 static const char *limit = NULL;
6684 if (limit == NULL) {
6685 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6686 if (limit_string != NULL) {
6687 limit = limit_string;
6693 if (limit [0] != '\0') {
6694 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6696 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6697 g_free (caller_method_name);
6699 //return (strncmp_result <= 0);
6700 return (strncmp_result == 0);
6708 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6710 static double r8_0 = 0.0;
6711 static float r4_0 = 0.0;
6715 rtype = mini_get_underlying_type (rtype);
6719 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6720 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6721 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6722 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6723 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6724 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6725 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6726 ins->type = STACK_R4;
6727 ins->inst_p0 = (void*)&r4_0;
6729 MONO_ADD_INS (cfg->cbb, ins);
6730 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6731 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6732 ins->type = STACK_R8;
6733 ins->inst_p0 = (void*)&r8_0;
6735 MONO_ADD_INS (cfg->cbb, ins);
6736 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6737 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6738 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6739 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6740 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6742 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6747 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6751 rtype = mini_get_underlying_type (rtype);
6755 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6756 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6757 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6758 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6759 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6760 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6761 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6762 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6763 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6764 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6765 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6766 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6767 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6768 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6770 emit_init_rvar (cfg, dreg, rtype);
6774 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6776 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6778 MonoInst *var = cfg->locals [local];
6779 if (COMPILE_SOFT_FLOAT (cfg)) {
6781 int reg = alloc_dreg (cfg, var->type);
6782 emit_init_rvar (cfg, reg, type);
6783 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6786 emit_init_rvar (cfg, var->dreg, type);
6788 emit_dummy_init_rvar (cfg, var->dreg, type);
6795 * Return the cost of inlining CMETHOD.
6798 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6799 guchar *ip, guint real_offset, gboolean inline_always)
6801 MonoInst *ins, *rvar = NULL;
6802 MonoMethodHeader *cheader;
6803 MonoBasicBlock *ebblock, *sbblock;
6805 MonoMethod *prev_inlined_method;
6806 MonoInst **prev_locals, **prev_args;
6807 MonoType **prev_arg_types;
6808 guint prev_real_offset;
6809 GHashTable *prev_cbb_hash;
6810 MonoBasicBlock **prev_cil_offset_to_bb;
6811 MonoBasicBlock *prev_cbb;
6812 unsigned char* prev_cil_start;
6813 guint32 prev_cil_offset_to_bb_len;
6814 MonoMethod *prev_current_method;
6815 MonoGenericContext *prev_generic_context;
6816 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6818 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6820 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6821 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6824 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6825 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6830 fsig = mono_method_signature (cmethod);
6832 if (cfg->verbose_level > 2)
6833 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6835 if (!cmethod->inline_info) {
6836 cfg->stat_inlineable_methods++;
6837 cmethod->inline_info = 1;
6840 /* allocate local variables */
6841 cheader = mono_method_get_header (cmethod);
6843 if (cheader == NULL || mono_loader_get_last_error ()) {
6844 MonoLoaderError *error = mono_loader_get_last_error ();
6847 mono_metadata_free_mh (cheader);
6848 if (inline_always && error)
6849 mono_cfg_set_exception (cfg, error->exception_type);
6851 mono_loader_clear_error ();
6855 /*Must verify before creating locals as it can cause the JIT to assert.*/
6856 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6857 mono_metadata_free_mh (cheader);
6861 /* allocate space to store the return value */
6862 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6863 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6866 prev_locals = cfg->locals;
6867 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6868 for (i = 0; i < cheader->num_locals; ++i)
6869 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6871 /* allocate start and end blocks */
6872 /* This is needed so if the inline is aborted, we can clean up */
6873 NEW_BBLOCK (cfg, sbblock);
6874 sbblock->real_offset = real_offset;
6876 NEW_BBLOCK (cfg, ebblock);
6877 ebblock->block_num = cfg->num_bblocks++;
6878 ebblock->real_offset = real_offset;
6880 prev_args = cfg->args;
6881 prev_arg_types = cfg->arg_types;
6882 prev_inlined_method = cfg->inlined_method;
6883 cfg->inlined_method = cmethod;
6884 cfg->ret_var_set = FALSE;
6885 cfg->inline_depth ++;
6886 prev_real_offset = cfg->real_offset;
6887 prev_cbb_hash = cfg->cbb_hash;
6888 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6889 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6890 prev_cil_start = cfg->cil_start;
6891 prev_cbb = cfg->cbb;
6892 prev_current_method = cfg->current_method;
6893 prev_generic_context = cfg->generic_context;
6894 prev_ret_var_set = cfg->ret_var_set;
6895 prev_disable_inline = cfg->disable_inline;
6897 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6900 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6902 ret_var_set = cfg->ret_var_set;
6904 cfg->inlined_method = prev_inlined_method;
6905 cfg->real_offset = prev_real_offset;
6906 cfg->cbb_hash = prev_cbb_hash;
6907 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6908 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6909 cfg->cil_start = prev_cil_start;
6910 cfg->locals = prev_locals;
6911 cfg->args = prev_args;
6912 cfg->arg_types = prev_arg_types;
6913 cfg->current_method = prev_current_method;
6914 cfg->generic_context = prev_generic_context;
6915 cfg->ret_var_set = prev_ret_var_set;
6916 cfg->disable_inline = prev_disable_inline;
6917 cfg->inline_depth --;
6919 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6920 if (cfg->verbose_level > 2)
6921 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6923 cfg->stat_inlined_methods++;
6925 /* always add some code to avoid block split failures */
6926 MONO_INST_NEW (cfg, ins, OP_NOP);
6927 MONO_ADD_INS (prev_cbb, ins);
6929 prev_cbb->next_bb = sbblock;
6930 link_bblock (cfg, prev_cbb, sbblock);
6933 * Get rid of the begin and end bblocks if possible to aid local
6936 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6938 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6939 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6941 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6942 MonoBasicBlock *prev = ebblock->in_bb [0];
6943 mono_merge_basic_blocks (cfg, prev, ebblock);
6945 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6946 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6947 cfg->cbb = prev_cbb;
6951 * Its possible that the rvar is set in some prev bblock, but not in others.
6957 for (i = 0; i < ebblock->in_count; ++i) {
6958 bb = ebblock->in_bb [i];
6960 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6963 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6973 * If the inlined method contains only a throw, then the ret var is not
6974 * set, so set it to a dummy value.
6977 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6979 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6982 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6985 if (cfg->verbose_level > 2)
6986 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6987 cfg->exception_type = MONO_EXCEPTION_NONE;
6988 mono_loader_clear_error ();
6990 /* This gets rid of the newly added bblocks */
6991 cfg->cbb = prev_cbb;
6993 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6998 * Some of these comments may well be out-of-date.
6999 * Design decisions: we do a single pass over the IL code (and we do bblock
7000 * splitting/merging in the few cases when it's required: a back jump to an IL
7001 * address that was not already seen as bblock starting point).
7002 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7003 * Complex operations are decomposed in simpler ones right away. We need to let the
7004 * arch-specific code peek and poke inside this process somehow (except when the
7005 * optimizations can take advantage of the full semantic info of coarse opcodes).
7006 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7007 * MonoInst->opcode initially is the IL opcode or some simplification of that
7008 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7009 * opcode with value bigger than OP_LAST.
7010 * At this point the IR can be handed over to an interpreter, a dumb code generator
7011 * or to the optimizing code generator that will translate it to SSA form.
7013 * Profiling directed optimizations.
7014 * We may compile by default with few or no optimizations and instrument the code
7015 * or the user may indicate what methods to optimize the most either in a config file
7016 * or through repeated runs where the compiler applies offline the optimizations to
7017 * each method and then decides if it was worth it.
7020 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7021 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7022 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7023 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7024 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7025 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7026 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7027 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7029 /* offset from br.s -> br like opcodes */
7030 #define BIG_BRANCH_OFFSET 13
7033 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7035 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7037 return b == NULL || b == bb;
7041 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7043 unsigned char *ip = start;
7044 unsigned char *target;
7047 MonoBasicBlock *bblock;
7048 const MonoOpcode *opcode;
7051 cli_addr = ip - start;
7052 i = mono_opcode_value ((const guint8 **)&ip, end);
7055 opcode = &mono_opcodes [i];
7056 switch (opcode->argument) {
7057 case MonoInlineNone:
7060 case MonoInlineString:
7061 case MonoInlineType:
7062 case MonoInlineField:
7063 case MonoInlineMethod:
7066 case MonoShortInlineR:
7073 case MonoShortInlineVar:
7074 case MonoShortInlineI:
7077 case MonoShortInlineBrTarget:
7078 target = start + cli_addr + 2 + (signed char)ip [1];
7079 GET_BBLOCK (cfg, bblock, target);
7082 GET_BBLOCK (cfg, bblock, ip);
7084 case MonoInlineBrTarget:
7085 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7086 GET_BBLOCK (cfg, bblock, target);
7089 GET_BBLOCK (cfg, bblock, ip);
7091 case MonoInlineSwitch: {
7092 guint32 n = read32 (ip + 1);
7095 cli_addr += 5 + 4 * n;
7096 target = start + cli_addr;
7097 GET_BBLOCK (cfg, bblock, target);
7099 for (j = 0; j < n; ++j) {
7100 target = start + cli_addr + (gint32)read32 (ip);
7101 GET_BBLOCK (cfg, bblock, target);
7111 g_assert_not_reached ();
7114 if (i == CEE_THROW) {
7115 unsigned char *bb_start = ip - 1;
7117 /* Find the start of the bblock containing the throw */
7119 while ((bb_start >= start) && !bblock) {
7120 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7124 bblock->out_of_line = 1;
7134 static inline MonoMethod *
7135 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7139 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7140 method = mono_method_get_wrapper_data (m, token);
7143 method = mono_class_inflate_generic_method_checked (method, context, &error);
7144 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7147 method = mono_get_method_full (m->klass->image, token, klass, context);
7153 static inline MonoMethod *
7154 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7156 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7158 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7164 static inline MonoClass*
7165 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7170 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7171 klass = mono_method_get_wrapper_data (method, token);
7173 klass = mono_class_inflate_generic_class (klass, context);
7175 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7176 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7179 mono_class_init (klass);
7183 static inline MonoMethodSignature*
7184 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7186 MonoMethodSignature *fsig;
7188 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7191 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7193 fsig = mono_inflate_generic_signature (fsig, context, &error);
7195 g_assert (mono_error_ok (&error));
7198 fsig = mono_metadata_parse_signature (method->klass->image, token);
7204 throw_exception (void)
7206 static MonoMethod *method = NULL;
7209 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7210 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7217 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7219 MonoMethod *thrower = throw_exception ();
7222 EMIT_NEW_PCONST (cfg, args [0], ex);
7223 mono_emit_method_call (cfg, thrower, args, NULL);
7227 * Return the original method is a wrapper is specified. We can only access
7228 * the custom attributes from the original method.
7231 get_original_method (MonoMethod *method)
7233 if (method->wrapper_type == MONO_WRAPPER_NONE)
7236 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7237 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7240 /* in other cases we need to find the original method */
7241 return mono_marshal_method_from_wrapper (method);
7245 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7247 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7248 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7250 emit_throw_exception (cfg, ex);
7254 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7256 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7257 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7259 emit_throw_exception (cfg, ex);
7263 * Check that the IL instructions at ip are the array initialization
7264 * sequence and return the pointer to the data and the size.
7267 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7270 * newarr[System.Int32]
7272 * ldtoken field valuetype ...
7273 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7275 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7277 guint32 token = read32 (ip + 7);
7278 guint32 field_token = read32 (ip + 2);
7279 guint32 field_index = field_token & 0xffffff;
7281 const char *data_ptr;
7283 MonoMethod *cmethod;
7284 MonoClass *dummy_class;
7285 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7289 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7293 *out_field_token = field_token;
7295 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7298 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7300 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7301 case MONO_TYPE_BOOLEAN:
7305 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7306 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7307 case MONO_TYPE_CHAR:
7324 if (size > mono_type_size (field->type, &dummy_align))
7327 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7328 if (!image_is_dynamic (method->klass->image)) {
7329 field_index = read32 (ip + 2) & 0xffffff;
7330 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7331 data_ptr = mono_image_rva_map (method->klass->image, rva);
7332 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7333 /* for aot code we do the lookup on load */
7334 if (aot && data_ptr)
7335 return GUINT_TO_POINTER (rva);
7337 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7339 data_ptr = mono_field_get_data (field);
7347 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7349 char *method_fname = mono_method_full_name (method, TRUE);
7351 MonoMethodHeader *header = mono_method_get_header (method);
7353 if (header->code_size == 0)
7354 method_code = g_strdup ("method body is empty.");
7356 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7358 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7359 g_free (method_fname);
7360 g_free (method_code);
7361 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7365 set_exception_object (MonoCompile *cfg, MonoException *exception)
7367 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7368 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7369 cfg->exception_ptr = exception;
7373 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7376 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7377 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7378 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7379 /* Optimize reg-reg moves away */
7381 * Can't optimize other opcodes, since sp[0] might point to
7382 * the last ins of a decomposed opcode.
7384 sp [0]->dreg = (cfg)->locals [n]->dreg;
7386 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7391 * ldloca inhibits many optimizations so try to get rid of it in common
7394 static inline unsigned char *
7395 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7405 local = read16 (ip + 2);
7409 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7410 /* From the INITOBJ case */
7411 token = read32 (ip + 2);
7412 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7413 CHECK_TYPELOAD (klass);
7414 type = mini_get_underlying_type (&klass->byval_arg);
7415 emit_init_local (cfg, local, type, TRUE);
7423 is_exception_class (MonoClass *klass)
7426 if (klass == mono_defaults.exception_class)
7428 klass = klass->parent;
7434 * is_jit_optimizer_disabled:
7436 * Determine whenever M's assembly has a DebuggableAttribute with the
7437 * IsJITOptimizerDisabled flag set.
7440 is_jit_optimizer_disabled (MonoMethod *m)
7442 MonoAssembly *ass = m->klass->image->assembly;
7443 MonoCustomAttrInfo* attrs;
7444 static MonoClass *klass;
7446 gboolean val = FALSE;
7449 if (ass->jit_optimizer_disabled_inited)
7450 return ass->jit_optimizer_disabled;
7453 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7456 ass->jit_optimizer_disabled = FALSE;
7457 mono_memory_barrier ();
7458 ass->jit_optimizer_disabled_inited = TRUE;
7462 attrs = mono_custom_attrs_from_assembly (ass);
7464 for (i = 0; i < attrs->num_attrs; ++i) {
7465 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7467 MonoMethodSignature *sig;
7469 if (!attr->ctor || attr->ctor->klass != klass)
7471 /* Decode the attribute. See reflection.c */
7472 p = (const char*)attr->data;
7473 g_assert (read16 (p) == 0x0001);
7476 // FIXME: Support named parameters
7477 sig = mono_method_signature (attr->ctor);
7478 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7480 /* Two boolean arguments */
7484 mono_custom_attrs_free (attrs);
7487 ass->jit_optimizer_disabled = val;
7488 mono_memory_barrier ();
7489 ass->jit_optimizer_disabled_inited = TRUE;
7495 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7497 gboolean supported_tail_call;
7500 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7501 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7503 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7506 for (i = 0; i < fsig->param_count; ++i) {
7507 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7508 /* These can point to the current method's stack */
7509 supported_tail_call = FALSE;
7511 if (fsig->hasthis && cmethod->klass->valuetype)
7512 /* this might point to the current method's stack */
7513 supported_tail_call = FALSE;
7514 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7515 supported_tail_call = FALSE;
7516 if (cfg->method->save_lmf)
7517 supported_tail_call = FALSE;
7518 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7519 supported_tail_call = FALSE;
7520 if (call_opcode != CEE_CALL)
7521 supported_tail_call = FALSE;
7523 /* Debugging support */
7525 if (supported_tail_call) {
7526 if (!mono_debug_count ())
7527 supported_tail_call = FALSE;
7531 return supported_tail_call;
7534 /* emits the code needed to access a managed tls var (like ThreadStatic)
7535 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7536 * pointer for the current thread.
7537 * Returns the MonoInst* representing the address of the tls var.
7540 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7543 int static_data_reg, array_reg, dreg;
7544 int offset2_reg, idx_reg;
7545 // inlined access to the tls data (see threads.c)
7546 static_data_reg = alloc_ireg (cfg);
7547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7548 idx_reg = alloc_ireg (cfg);
7549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7551 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7552 array_reg = alloc_ireg (cfg);
7553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7554 offset2_reg = alloc_ireg (cfg);
7555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7557 dreg = alloc_ireg (cfg);
7558 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7565 * Handle calls made to ctors from NEWOBJ opcodes.
7568 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7569 MonoInst **sp, guint8 *ip, int *inline_costs)
7571 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7573 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7574 mono_method_is_generic_sharable (cmethod, TRUE)) {
7575 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7576 mono_class_vtable (cfg->domain, cmethod->klass);
7577 CHECK_TYPELOAD (cmethod->klass);
7579 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7580 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7583 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7584 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7586 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7588 CHECK_TYPELOAD (cmethod->klass);
7589 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7594 /* Avoid virtual calls to ctors if possible */
7595 if (mono_class_is_marshalbyref (cmethod->klass))
7596 callvirt_this_arg = sp [0];
7598 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7599 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7600 CHECK_CFG_EXCEPTION;
7601 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7602 mono_method_check_inlining (cfg, cmethod) &&
7603 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7606 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7607 cfg->real_offset += 5;
7609 *inline_costs += costs - 5;
7611 INLINE_FAILURE ("inline failure");
7612 // FIXME-VT: Clean this up
7613 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7614 GSHAREDVT_FAILURE(*ip);
7615 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7617 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7620 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7621 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7622 } else if (context_used &&
7623 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7624 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7625 MonoInst *cmethod_addr;
7627 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7629 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7630 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7632 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7634 INLINE_FAILURE ("ctor call");
7635 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7636 callvirt_this_arg, NULL, vtable_arg);
7643 * mono_method_to_ir:
7645 * Translate the .net IL into linear IR.
7648 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7649 MonoInst *return_var, MonoInst **inline_args,
7650 guint inline_offset, gboolean is_virtual_call)
7653 MonoInst *ins, **sp, **stack_start;
7654 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7655 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7656 MonoMethod *cmethod, *method_definition;
7657 MonoInst **arg_array;
7658 MonoMethodHeader *header;
7660 guint32 token, ins_flag;
7662 MonoClass *constrained_class = NULL;
7663 unsigned char *ip, *end, *target, *err_pos;
7664 MonoMethodSignature *sig;
7665 MonoGenericContext *generic_context = NULL;
7666 MonoGenericContainer *generic_container = NULL;
7667 MonoType **param_types;
7668 int i, n, start_new_bblock, dreg;
7669 int num_calls = 0, inline_costs = 0;
7670 int breakpoint_id = 0;
7672 GSList *class_inits = NULL;
7673 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7675 gboolean init_locals, seq_points, skip_dead_blocks;
7676 gboolean sym_seq_points = FALSE;
7677 MonoDebugMethodInfo *minfo;
7678 MonoBitSet *seq_point_locs = NULL;
7679 MonoBitSet *seq_point_set_locs = NULL;
7681 cfg->disable_inline = is_jit_optimizer_disabled (method);
7683 /* serialization and xdomain stuff may need access to private fields and methods */
7684 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7685 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7686 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7687 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7688 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7689 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7691 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7692 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7693 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7694 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7695 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7697 image = method->klass->image;
7698 header = mono_method_get_header (method);
7700 MonoLoaderError *error;
7702 if ((error = mono_loader_get_last_error ())) {
7703 mono_cfg_set_exception (cfg, error->exception_type);
7705 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7706 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7708 goto exception_exit;
7710 generic_container = mono_method_get_generic_container (method);
7711 sig = mono_method_signature (method);
7712 num_args = sig->hasthis + sig->param_count;
7713 ip = (unsigned char*)header->code;
7714 cfg->cil_start = ip;
7715 end = ip + header->code_size;
7716 cfg->stat_cil_code_size += header->code_size;
7718 seq_points = cfg->gen_seq_points && cfg->method == method;
7720 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7721 /* We could hit a seq point before attaching to the JIT (#8338) */
7725 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7726 minfo = mono_debug_lookup_method (method);
7728 MonoSymSeqPoint *sps;
7729 int i, n_il_offsets;
7731 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7732 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7733 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7734 sym_seq_points = TRUE;
7735 for (i = 0; i < n_il_offsets; ++i) {
7736 if (sps [i].il_offset < header->code_size)
7737 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7740 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7741 /* Methods without line number info like auto-generated property accessors */
7742 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7743 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7744 sym_seq_points = TRUE;
7749 * Methods without init_locals set could cause asserts in various passes
7750 * (#497220). To work around this, we emit dummy initialization opcodes
7751 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7752 * on some platforms.
7754 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7755 init_locals = header->init_locals;
7759 method_definition = method;
7760 while (method_definition->is_inflated) {
7761 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7762 method_definition = imethod->declaring;
7765 /* SkipVerification is not allowed if core-clr is enabled */
7766 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7768 dont_verify_stloc = TRUE;
7771 if (sig->is_inflated)
7772 generic_context = mono_method_get_context (method);
7773 else if (generic_container)
7774 generic_context = &generic_container->context;
7775 cfg->generic_context = generic_context;
7778 g_assert (!sig->has_type_parameters);
7780 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7781 g_assert (method->is_inflated);
7782 g_assert (mono_method_get_context (method)->method_inst);
7784 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7785 g_assert (sig->generic_param_count);
7787 if (cfg->method == method) {
7788 cfg->real_offset = 0;
7790 cfg->real_offset = inline_offset;
7793 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7794 cfg->cil_offset_to_bb_len = header->code_size;
7796 cfg->current_method = method;
7798 if (cfg->verbose_level > 2)
7799 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7801 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7803 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7804 for (n = 0; n < sig->param_count; ++n)
7805 param_types [n + sig->hasthis] = sig->params [n];
7806 cfg->arg_types = param_types;
7808 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7809 if (cfg->method == method) {
7811 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7812 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7815 NEW_BBLOCK (cfg, start_bblock);
7816 cfg->bb_entry = start_bblock;
7817 start_bblock->cil_code = NULL;
7818 start_bblock->cil_length = 0;
7821 NEW_BBLOCK (cfg, end_bblock);
7822 cfg->bb_exit = end_bblock;
7823 end_bblock->cil_code = NULL;
7824 end_bblock->cil_length = 0;
7825 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7826 g_assert (cfg->num_bblocks == 2);
7828 arg_array = cfg->args;
7830 if (header->num_clauses) {
7831 cfg->spvars = g_hash_table_new (NULL, NULL);
7832 cfg->exvars = g_hash_table_new (NULL, NULL);
7834 /* handle exception clauses */
7835 for (i = 0; i < header->num_clauses; ++i) {
7836 MonoBasicBlock *try_bb;
7837 MonoExceptionClause *clause = &header->clauses [i];
7838 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7839 try_bb->real_offset = clause->try_offset;
7840 try_bb->try_start = TRUE;
7841 try_bb->region = ((i + 1) << 8) | clause->flags;
7842 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7843 tblock->real_offset = clause->handler_offset;
7844 tblock->flags |= BB_EXCEPTION_HANDLER;
7847 * Linking the try block with the EH block hinders inlining as we won't be able to
7848 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7850 if (COMPILE_LLVM (cfg))
7851 link_bblock (cfg, try_bb, tblock);
7853 if (*(ip + clause->handler_offset) == CEE_POP)
7854 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7856 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7857 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7858 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7859 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7860 MONO_ADD_INS (tblock, ins);
7862 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7863 /* finally clauses already have a seq point */
7864 /* seq points for filter clauses are emitted below */
7865 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7866 MONO_ADD_INS (tblock, ins);
7869 /* todo: is a fault block unsafe to optimize? */
7870 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7871 tblock->flags |= BB_EXCEPTION_UNSAFE;
7874 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7876 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7878 /* catch and filter blocks get the exception object on the stack */
7879 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7880 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7882 /* mostly like handle_stack_args (), but just sets the input args */
7883 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7884 tblock->in_scount = 1;
7885 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7886 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7890 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7891 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7892 if (!cfg->compile_llvm) {
7893 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7894 ins->dreg = tblock->in_stack [0]->dreg;
7895 MONO_ADD_INS (tblock, ins);
7898 MonoInst *dummy_use;
7901 * Add a dummy use for the exvar so its liveness info will be
7904 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7907 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7908 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7909 MONO_ADD_INS (tblock, ins);
7912 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7913 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7914 tblock->flags |= BB_EXCEPTION_HANDLER;
7915 tblock->real_offset = clause->data.filter_offset;
7916 tblock->in_scount = 1;
7917 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7918 /* The filter block shares the exvar with the handler block */
7919 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7920 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7921 MONO_ADD_INS (tblock, ins);
7925 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7926 clause->data.catch_class &&
7928 mono_class_check_context_used (clause->data.catch_class)) {
7930 * In shared generic code with catch
7931 * clauses containing type variables
7932 * the exception handling code has to
7933 * be able to get to the rgctx.
7934 * Therefore we have to make sure that
7935 * the vtable/mrgctx argument (for
7936 * static or generic methods) or the
7937 * "this" argument (for non-static
7938 * methods) are live.
7940 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7941 mini_method_get_context (method)->method_inst ||
7942 method->klass->valuetype) {
7943 mono_get_vtable_var (cfg);
7945 MonoInst *dummy_use;
7947 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7952 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7953 cfg->cbb = start_bblock;
7954 cfg->args = arg_array;
7955 mono_save_args (cfg, sig, inline_args);
7958 /* FIRST CODE BLOCK */
7959 NEW_BBLOCK (cfg, tblock);
7960 tblock->cil_code = ip;
7964 ADD_BBLOCK (cfg, tblock);
7966 if (cfg->method == method) {
7967 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7968 if (breakpoint_id) {
7969 MONO_INST_NEW (cfg, ins, OP_BREAK);
7970 MONO_ADD_INS (cfg->cbb, ins);
7974 /* we use a separate basic block for the initialization code */
7975 NEW_BBLOCK (cfg, init_localsbb);
7976 cfg->bb_init = init_localsbb;
7977 init_localsbb->real_offset = cfg->real_offset;
7978 start_bblock->next_bb = init_localsbb;
7979 init_localsbb->next_bb = cfg->cbb;
7980 link_bblock (cfg, start_bblock, init_localsbb);
7981 link_bblock (cfg, init_localsbb, cfg->cbb);
7983 cfg->cbb = init_localsbb;
7985 if (cfg->gsharedvt && cfg->method == method) {
7986 MonoGSharedVtMethodInfo *info;
7987 MonoInst *var, *locals_var;
7990 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7991 info->method = cfg->method;
7992 info->count_entries = 16;
7993 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7994 cfg->gsharedvt_info = info;
7996 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7997 /* prevent it from being register allocated */
7998 //var->flags |= MONO_INST_VOLATILE;
7999 cfg->gsharedvt_info_var = var;
8001 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8002 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8004 /* Allocate locals */
8005 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8006 /* prevent it from being register allocated */
8007 //locals_var->flags |= MONO_INST_VOLATILE;
8008 cfg->gsharedvt_locals_var = locals_var;
8010 dreg = alloc_ireg (cfg);
8011 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8013 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8014 ins->dreg = locals_var->dreg;
8016 MONO_ADD_INS (cfg->cbb, ins);
8017 cfg->gsharedvt_locals_var_ins = ins;
8019 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8022 ins->flags |= MONO_INST_INIT;
8026 if (mono_security_core_clr_enabled ()) {
8027 /* check if this is native code, e.g. an icall or a p/invoke */
8028 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8029 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8031 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8032 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8034 /* if this ia a native call then it can only be JITted from platform code */
8035 if ((icall || pinvk) && method->klass && method->klass->image) {
8036 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8037 MonoException *ex = icall ? mono_get_exception_security () :
8038 mono_get_exception_method_access ();
8039 emit_throw_exception (cfg, ex);
8046 CHECK_CFG_EXCEPTION;
8048 if (header->code_size == 0)
8051 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8056 if (cfg->method == method)
8057 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8059 for (n = 0; n < header->num_locals; ++n) {
8060 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8065 /* We force the vtable variable here for all shared methods
8066 for the possibility that they might show up in a stack
8067 trace where their exact instantiation is needed. */
8068 if (cfg->gshared && method == cfg->method) {
8069 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8070 mini_method_get_context (method)->method_inst ||
8071 method->klass->valuetype) {
8072 mono_get_vtable_var (cfg);
8074 /* FIXME: Is there a better way to do this?
8075 We need the variable live for the duration
8076 of the whole method. */
8077 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8081 /* add a check for this != NULL to inlined methods */
8082 if (is_virtual_call) {
8085 NEW_ARGLOAD (cfg, arg_ins, 0);
8086 MONO_ADD_INS (cfg->cbb, arg_ins);
8087 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8090 skip_dead_blocks = !dont_verify;
8091 if (skip_dead_blocks) {
8092 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8097 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8098 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8101 start_new_bblock = 0;
8103 if (cfg->method == method)
8104 cfg->real_offset = ip - header->code;
8106 cfg->real_offset = inline_offset;
8111 if (start_new_bblock) {
8112 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8113 if (start_new_bblock == 2) {
8114 g_assert (ip == tblock->cil_code);
8116 GET_BBLOCK (cfg, tblock, ip);
8118 cfg->cbb->next_bb = tblock;
8120 start_new_bblock = 0;
8121 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8122 if (cfg->verbose_level > 3)
8123 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8124 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8128 g_slist_free (class_inits);
8131 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8132 link_bblock (cfg, cfg->cbb, tblock);
8133 if (sp != stack_start) {
8134 handle_stack_args (cfg, stack_start, sp - stack_start);
8136 CHECK_UNVERIFIABLE (cfg);
8138 cfg->cbb->next_bb = tblock;
8140 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8141 if (cfg->verbose_level > 3)
8142 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8143 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8146 g_slist_free (class_inits);
8151 if (skip_dead_blocks) {
8152 int ip_offset = ip - header->code;
8154 if (ip_offset == bb->end)
8158 int op_size = mono_opcode_size (ip, end);
8159 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8161 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8163 if (ip_offset + op_size == bb->end) {
8164 MONO_INST_NEW (cfg, ins, OP_NOP);
8165 MONO_ADD_INS (cfg->cbb, ins);
8166 start_new_bblock = 1;
8174 * Sequence points are points where the debugger can place a breakpoint.
8175 * Currently, we generate these automatically at points where the IL
8178 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8180 * Make methods interruptable at the beginning, and at the targets of
8181 * backward branches.
8182 * Also, do this at the start of every bblock in methods with clauses too,
8183 * to be able to handle instructions with inprecise control flow like
8185 * Backward branches are handled at the end of method-to-ir ().
8187 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8188 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8190 /* Avoid sequence points on empty IL like .volatile */
8191 // FIXME: Enable this
8192 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8193 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8194 if ((sp != stack_start) && !sym_seq_point)
8195 ins->flags |= MONO_INST_NONEMPTY_STACK;
8196 MONO_ADD_INS (cfg->cbb, ins);
8199 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8202 cfg->cbb->real_offset = cfg->real_offset;
8204 if ((cfg->method == method) && cfg->coverage_info) {
8205 guint32 cil_offset = ip - header->code;
8206 cfg->coverage_info->data [cil_offset].cil_code = ip;
8208 /* TODO: Use an increment here */
8209 #if defined(TARGET_X86)
8210 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8211 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8213 MONO_ADD_INS (cfg->cbb, ins);
8215 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8216 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8220 if (cfg->verbose_level > 3)
8221 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8225 if (seq_points && !sym_seq_points && sp != stack_start) {
8227 * The C# compiler uses these nops to notify the JIT that it should
8228 * insert seq points.
8230 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8231 MONO_ADD_INS (cfg->cbb, ins);
8233 if (cfg->keep_cil_nops)
8234 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8236 MONO_INST_NEW (cfg, ins, OP_NOP);
8238 MONO_ADD_INS (cfg->cbb, ins);
8241 if (should_insert_brekpoint (cfg->method)) {
8242 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8244 MONO_INST_NEW (cfg, ins, OP_NOP);
8247 MONO_ADD_INS (cfg->cbb, ins);
8253 CHECK_STACK_OVF (1);
8254 n = (*ip)-CEE_LDARG_0;
8256 EMIT_NEW_ARGLOAD (cfg, ins, n);
8264 CHECK_STACK_OVF (1);
8265 n = (*ip)-CEE_LDLOC_0;
8267 EMIT_NEW_LOCLOAD (cfg, ins, n);
8276 n = (*ip)-CEE_STLOC_0;
8279 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8281 emit_stloc_ir (cfg, sp, header, n);
8288 CHECK_STACK_OVF (1);
8291 EMIT_NEW_ARGLOAD (cfg, ins, n);
8297 CHECK_STACK_OVF (1);
8300 NEW_ARGLOADA (cfg, ins, n);
8301 MONO_ADD_INS (cfg->cbb, ins);
8311 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8313 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8318 CHECK_STACK_OVF (1);
8321 EMIT_NEW_LOCLOAD (cfg, ins, n);
8325 case CEE_LDLOCA_S: {
8326 unsigned char *tmp_ip;
8328 CHECK_STACK_OVF (1);
8329 CHECK_LOCAL (ip [1]);
8331 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8337 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8346 CHECK_LOCAL (ip [1]);
8347 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8349 emit_stloc_ir (cfg, sp, header, ip [1]);
8354 CHECK_STACK_OVF (1);
8355 EMIT_NEW_PCONST (cfg, ins, NULL);
8356 ins->type = STACK_OBJ;
8361 CHECK_STACK_OVF (1);
8362 EMIT_NEW_ICONST (cfg, ins, -1);
8375 CHECK_STACK_OVF (1);
8376 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8382 CHECK_STACK_OVF (1);
8384 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8390 CHECK_STACK_OVF (1);
8391 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8397 CHECK_STACK_OVF (1);
8398 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8399 ins->type = STACK_I8;
8400 ins->dreg = alloc_dreg (cfg, STACK_I8);
8402 ins->inst_l = (gint64)read64 (ip);
8403 MONO_ADD_INS (cfg->cbb, ins);
8409 gboolean use_aotconst = FALSE;
8411 #ifdef TARGET_POWERPC
8412 /* FIXME: Clean this up */
8413 if (cfg->compile_aot)
8414 use_aotconst = TRUE;
8417 /* FIXME: we should really allocate this only late in the compilation process */
8418 f = mono_domain_alloc (cfg->domain, sizeof (float));
8420 CHECK_STACK_OVF (1);
8426 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8428 dreg = alloc_freg (cfg);
8429 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8430 ins->type = cfg->r4_stack_type;
8432 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8433 ins->type = cfg->r4_stack_type;
8434 ins->dreg = alloc_dreg (cfg, STACK_R8);
8436 MONO_ADD_INS (cfg->cbb, ins);
8446 gboolean use_aotconst = FALSE;
8448 #ifdef TARGET_POWERPC
8449 /* FIXME: Clean this up */
8450 if (cfg->compile_aot)
8451 use_aotconst = TRUE;
8454 /* FIXME: we should really allocate this only late in the compilation process */
8455 d = mono_domain_alloc (cfg->domain, sizeof (double));
8457 CHECK_STACK_OVF (1);
8463 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8465 dreg = alloc_freg (cfg);
8466 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8467 ins->type = STACK_R8;
8469 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8470 ins->type = STACK_R8;
8471 ins->dreg = alloc_dreg (cfg, STACK_R8);
8473 MONO_ADD_INS (cfg->cbb, ins);
8482 MonoInst *temp, *store;
8484 CHECK_STACK_OVF (1);
8488 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8489 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8491 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8494 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8507 if (sp [0]->type == STACK_R8)
8508 /* we need to pop the value from the x86 FP stack */
8509 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8515 INLINE_FAILURE ("jmp");
8516 GSHAREDVT_FAILURE (*ip);
8519 if (stack_start != sp)
8521 token = read32 (ip + 1);
8522 /* FIXME: check the signature matches */
8523 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8525 if (!cmethod || mono_loader_get_last_error ())
8528 if (cfg->gshared && mono_method_check_context_used (cmethod))
8529 GENERIC_SHARING_FAILURE (CEE_JMP);
8531 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8533 if (ARCH_HAVE_OP_TAIL_CALL) {
8534 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8537 /* Handle tail calls similarly to calls */
8538 n = fsig->param_count + fsig->hasthis;
8542 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8543 call->method = cmethod;
8544 call->tail_call = TRUE;
8545 call->signature = mono_method_signature (cmethod);
8546 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8547 call->inst.inst_p0 = cmethod;
8548 for (i = 0; i < n; ++i)
8549 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8551 mono_arch_emit_call (cfg, call);
8552 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8553 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8555 for (i = 0; i < num_args; ++i)
8556 /* Prevent arguments from being optimized away */
8557 arg_array [i]->flags |= MONO_INST_VOLATILE;
8559 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8560 ins = (MonoInst*)call;
8561 ins->inst_p0 = cmethod;
8562 MONO_ADD_INS (cfg->cbb, ins);
8566 start_new_bblock = 1;
8571 MonoMethodSignature *fsig;
8574 token = read32 (ip + 1);
8578 //GSHAREDVT_FAILURE (*ip);
8583 fsig = mini_get_signature (method, token, generic_context);
8585 if (method->dynamic && fsig->pinvoke) {
8589 * This is a call through a function pointer using a pinvoke
8590 * signature. Have to create a wrapper and call that instead.
8591 * FIXME: This is very slow, need to create a wrapper at JIT time
8592 * instead based on the signature.
8594 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8595 EMIT_NEW_PCONST (cfg, args [1], fsig);
8597 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8600 n = fsig->param_count + fsig->hasthis;
8604 //g_assert (!virtual || fsig->hasthis);
8608 inline_costs += 10 * num_calls++;
8611 * Making generic calls out of gsharedvt methods.
8612 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8613 * patching gshared method addresses into a gsharedvt method.
8615 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8617 * We pass the address to the gsharedvt trampoline in the rgctx reg
8619 MonoInst *callee = addr;
8621 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8623 GSHAREDVT_FAILURE (*ip);
8625 addr = emit_get_rgctx_sig (cfg, context_used,
8626 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8627 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8631 /* Prevent inlining of methods with indirect calls */
8632 INLINE_FAILURE ("indirect call");
8634 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8639 * Instead of emitting an indirect call, emit a direct call
8640 * with the contents of the aotconst as the patch info.
8642 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8643 info_type = addr->inst_c1;
8644 info_data = addr->inst_p0;
8646 info_type = addr->inst_right->inst_c1;
8647 info_data = addr->inst_right->inst_left;
8650 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8651 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8656 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8660 /* End of call, INS should contain the result of the call, if any */
8662 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8664 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8667 CHECK_CFG_EXCEPTION;
8671 constrained_class = NULL;
8675 case CEE_CALLVIRT: {
8676 MonoInst *addr = NULL;
8677 MonoMethodSignature *fsig = NULL;
8679 int virtual = *ip == CEE_CALLVIRT;
8680 gboolean pass_imt_from_rgctx = FALSE;
8681 MonoInst *imt_arg = NULL;
8682 MonoInst *keep_this_alive = NULL;
8683 gboolean pass_vtable = FALSE;
8684 gboolean pass_mrgctx = FALSE;
8685 MonoInst *vtable_arg = NULL;
8686 gboolean check_this = FALSE;
8687 gboolean supported_tail_call = FALSE;
8688 gboolean tail_call = FALSE;
8689 gboolean need_seq_point = FALSE;
8690 guint32 call_opcode = *ip;
8691 gboolean emit_widen = TRUE;
8692 gboolean push_res = TRUE;
8693 gboolean skip_ret = FALSE;
8694 gboolean delegate_invoke = FALSE;
8695 gboolean direct_icall = FALSE;
8696 gboolean constrained_partial_call = FALSE;
8697 MonoMethod *cil_method;
8700 token = read32 (ip + 1);
8704 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8705 cil_method = cmethod;
8707 if (constrained_class) {
8708 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8709 if (!mini_is_gsharedvt_klass (constrained_class)) {
8710 g_assert (!cmethod->klass->valuetype);
8711 if (!mini_type_is_reference (&constrained_class->byval_arg))
8712 constrained_partial_call = TRUE;
8716 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8717 if (cfg->verbose_level > 2)
8718 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8719 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8720 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8722 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8726 if (cfg->verbose_level > 2)
8727 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8729 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8731 * This is needed since get_method_constrained can't find
8732 * the method in klass representing a type var.
8733 * The type var is guaranteed to be a reference type in this
8736 if (!mini_is_gsharedvt_klass (constrained_class))
8737 g_assert (!cmethod->klass->valuetype);
8739 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8745 if (!cmethod || mono_loader_get_last_error ())
8747 if (!dont_verify && !cfg->skip_visibility) {
8748 MonoMethod *target_method = cil_method;
8749 if (method->is_inflated) {
8750 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8752 if (!mono_method_can_access_method (method_definition, target_method) &&
8753 !mono_method_can_access_method (method, cil_method))
8754 METHOD_ACCESS_FAILURE (method, cil_method);
8757 if (mono_security_core_clr_enabled ())
8758 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8760 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8761 /* MS.NET seems to silently convert this to a callvirt */
8766 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8767 * converts to a callvirt.
8769 * tests/bug-515884.il is an example of this behavior
8771 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8772 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8773 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8777 if (!cmethod->klass->inited)
8778 if (!mono_class_init (cmethod->klass))
8779 TYPE_LOAD_ERROR (cmethod->klass);
8781 fsig = mono_method_signature (cmethod);
8784 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8785 mini_class_is_system_array (cmethod->klass)) {
8786 array_rank = cmethod->klass->rank;
8787 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8788 direct_icall = TRUE;
8789 } else if (fsig->pinvoke) {
8790 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8791 fsig = mono_method_signature (wrapper);
8792 } else if (constrained_class) {
8794 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8798 /* See code below */
8799 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8800 MonoBasicBlock *tbb;
8802 GET_BBLOCK (cfg, tbb, ip + 5);
8803 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8805 * We want to extend the try block to cover the call, but we can't do it if the
8806 * call is made directly since its followed by an exception check.
8808 direct_icall = FALSE;
8812 mono_save_token_info (cfg, image, token, cil_method);
8814 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8815 need_seq_point = TRUE;
8817 /* Don't support calls made using type arguments for now */
8819 if (cfg->gsharedvt) {
8820 if (mini_is_gsharedvt_signature (fsig))
8821 GSHAREDVT_FAILURE (*ip);
8825 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8826 g_assert_not_reached ();
8828 n = fsig->param_count + fsig->hasthis;
8830 if (!cfg->gshared && cmethod->klass->generic_container)
8834 g_assert (!mono_method_check_context_used (cmethod));
8838 //g_assert (!virtual || fsig->hasthis);
8842 if (constrained_class) {
8843 if (mini_is_gsharedvt_klass (constrained_class)) {
8844 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8845 /* The 'Own method' case below */
8846 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8847 /* 'The type parameter is instantiated as a reference type' case below. */
8849 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8850 CHECK_CFG_EXCEPTION;
8857 * We have the `constrained.' prefix opcode.
8859 if (constrained_partial_call) {
8860 gboolean need_box = TRUE;
8863 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8864 * called method is not known at compile time either. The called method could end up being
8865 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8866 * to box the receiver.
8867 * A simple solution would be to box always and make a normal virtual call, but that would
8868 * be bad performance wise.
8870 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8872 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8877 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8878 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8879 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8880 ins->klass = constrained_class;
8881 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8882 CHECK_CFG_EXCEPTION;
8883 } else if (need_box) {
8885 MonoBasicBlock *is_ref_bb, *end_bb;
8886 MonoInst *nonbox_call;
8889 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8891 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8892 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8894 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8896 NEW_BBLOCK (cfg, is_ref_bb);
8897 NEW_BBLOCK (cfg, end_bb);
8899 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8901 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8904 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8906 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8909 MONO_START_BB (cfg, is_ref_bb);
8910 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8911 ins->klass = constrained_class;
8912 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8913 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8917 MONO_START_BB (cfg, end_bb);
8920 nonbox_call->dreg = ins->dreg;
8923 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8924 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8925 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8928 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8930 * The type parameter is instantiated as a valuetype,
8931 * but that type doesn't override the method we're
8932 * calling, so we need to box `this'.
8934 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8935 ins->klass = constrained_class;
8936 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8937 CHECK_CFG_EXCEPTION;
8938 } else if (!constrained_class->valuetype) {
8939 int dreg = alloc_ireg_ref (cfg);
8942 * The type parameter is instantiated as a reference
8943 * type. We have a managed pointer on the stack, so
8944 * we need to dereference it here.
8946 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8947 ins->type = STACK_OBJ;
8950 if (cmethod->klass->valuetype) {
8953 /* Interface method */
8956 mono_class_setup_vtable (constrained_class);
8957 CHECK_TYPELOAD (constrained_class);
8958 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8960 TYPE_LOAD_ERROR (constrained_class);
8961 slot = mono_method_get_vtable_slot (cmethod);
8963 TYPE_LOAD_ERROR (cmethod->klass);
8964 cmethod = constrained_class->vtable [ioffset + slot];
8966 if (cmethod->klass == mono_defaults.enum_class) {
8967 /* Enum implements some interfaces, so treat this as the first case */
8968 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8969 ins->klass = constrained_class;
8970 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8971 CHECK_CFG_EXCEPTION;
8976 constrained_class = NULL;
8979 if (check_call_signature (cfg, fsig, sp))
8982 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8983 delegate_invoke = TRUE;
8985 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8986 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8987 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8995 * If the callee is a shared method, then its static cctor
8996 * might not get called after the call was patched.
8998 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8999 emit_class_init (cfg, cmethod->klass);
9000 CHECK_TYPELOAD (cmethod->klass);
9003 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9006 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9008 context_used = mini_method_check_context_used (cfg, cmethod);
9010 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9011 /* Generic method interface
9012 calls are resolved via a
9013 helper function and don't
9015 if (!cmethod_context || !cmethod_context->method_inst)
9016 pass_imt_from_rgctx = TRUE;
9020 * If a shared method calls another
9021 * shared method then the caller must
9022 * have a generic sharing context
9023 * because the magic trampoline
9024 * requires it. FIXME: We shouldn't
9025 * have to force the vtable/mrgctx
9026 * variable here. Instead there
9027 * should be a flag in the cfg to
9028 * request a generic sharing context.
9031 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9032 mono_get_vtable_var (cfg);
9037 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9039 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9041 CHECK_TYPELOAD (cmethod->klass);
9042 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9047 g_assert (!vtable_arg);
9049 if (!cfg->compile_aot) {
9051 * emit_get_rgctx_method () calls mono_class_vtable () so check
9052 * for type load errors before.
9054 mono_class_setup_vtable (cmethod->klass);
9055 CHECK_TYPELOAD (cmethod->klass);
9058 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9060 /* !marshalbyref is needed to properly handle generic methods + remoting */
9061 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9062 MONO_METHOD_IS_FINAL (cmethod)) &&
9063 !mono_class_is_marshalbyref (cmethod->klass)) {
9070 if (pass_imt_from_rgctx) {
9071 g_assert (!pass_vtable);
9073 imt_arg = emit_get_rgctx_method (cfg, context_used,
9074 cmethod, MONO_RGCTX_INFO_METHOD);
9078 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9080 /* Calling virtual generic methods */
9081 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9082 !(MONO_METHOD_IS_FINAL (cmethod) &&
9083 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9084 fsig->generic_param_count &&
9085 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))) {
9086 MonoInst *this_temp, *this_arg_temp, *store;
9087 MonoInst *iargs [4];
9088 gboolean use_imt = FALSE;
9090 g_assert (fsig->is_inflated);
9092 /* Prevent inlining of methods that contain indirect calls */
9093 INLINE_FAILURE ("virtual generic call");
9095 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9096 GSHAREDVT_FAILURE (*ip);
9098 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9099 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9104 g_assert (!imt_arg);
9106 g_assert (cmethod->is_inflated);
9107 imt_arg = emit_get_rgctx_method (cfg, context_used,
9108 cmethod, MONO_RGCTX_INFO_METHOD);
9109 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9111 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9112 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9113 MONO_ADD_INS (cfg->cbb, store);
9115 /* FIXME: This should be a managed pointer */
9116 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9118 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9119 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9120 cmethod, MONO_RGCTX_INFO_METHOD);
9121 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9122 addr = mono_emit_jit_icall (cfg,
9123 mono_helper_compile_generic_method, iargs);
9125 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9127 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9134 * Implement a workaround for the inherent races involved in locking:
9140 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9141 * try block, the Exit () won't be executed, see:
9142 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9143 * To work around this, we extend such try blocks to include the last x bytes
9144 * of the Monitor.Enter () call.
9146 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9147 MonoBasicBlock *tbb;
9149 GET_BBLOCK (cfg, tbb, ip + 5);
9151 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9152 * from Monitor.Enter like ArgumentNullException.
9154 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9155 /* Mark this bblock as needing to be extended */
9156 tbb->extend_try_block = TRUE;
9160 /* Conversion to a JIT intrinsic */
9161 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9162 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9163 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9170 if ((cfg->opt & MONO_OPT_INLINE) &&
9171 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9172 mono_method_check_inlining (cfg, cmethod)) {
9174 gboolean always = FALSE;
9176 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9177 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9178 /* Prevent inlining of methods that call wrappers */
9179 INLINE_FAILURE ("wrapper call");
9180 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9184 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9186 cfg->real_offset += 5;
9188 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9189 /* *sp is already set by inline_method */
9194 inline_costs += costs;
9200 /* Tail recursion elimination */
9201 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9202 gboolean has_vtargs = FALSE;
9205 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9206 INLINE_FAILURE ("tail call");
9208 /* keep it simple */
9209 for (i = fsig->param_count - 1; i >= 0; i--) {
9210 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9215 for (i = 0; i < n; ++i)
9216 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9217 MONO_INST_NEW (cfg, ins, OP_BR);
9218 MONO_ADD_INS (cfg->cbb, ins);
9219 tblock = start_bblock->out_bb [0];
9220 link_bblock (cfg, cfg->cbb, tblock);
9221 ins->inst_target_bb = tblock;
9222 start_new_bblock = 1;
9224 /* skip the CEE_RET, too */
9225 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9232 inline_costs += 10 * num_calls++;
9235 * Making generic calls out of gsharedvt methods.
9236 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9237 * patching gshared method addresses into a gsharedvt method.
9239 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9240 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9241 MonoRgctxInfoType info_type;
9244 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9245 //GSHAREDVT_FAILURE (*ip);
9246 // disable for possible remoting calls
9247 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9248 GSHAREDVT_FAILURE (*ip);
9249 if (fsig->generic_param_count) {
9250 /* virtual generic call */
9251 g_assert (!imt_arg);
9252 /* Same as the virtual generic case above */
9253 imt_arg = emit_get_rgctx_method (cfg, context_used,
9254 cmethod, MONO_RGCTX_INFO_METHOD);
9255 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9257 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9258 /* This can happen when we call a fully instantiated iface method */
9259 imt_arg = emit_get_rgctx_method (cfg, context_used,
9260 cmethod, MONO_RGCTX_INFO_METHOD);
9265 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9266 keep_this_alive = sp [0];
9268 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9269 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9271 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9272 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9274 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9278 /* Generic sharing */
9281 * Use this if the callee is gsharedvt sharable too, since
9282 * at runtime we might find an instantiation so the call cannot
9283 * be patched (the 'no_patch' code path in mini-trampolines.c).
9285 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9286 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9287 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9288 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9289 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9290 INLINE_FAILURE ("gshared");
9292 g_assert (cfg->gshared && cmethod);
9296 * We are compiling a call to a
9297 * generic method from shared code,
9298 * which means that we have to look up
9299 * the method in the rgctx and do an
9303 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9305 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9306 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9310 /* Direct calls to icalls */
9312 MonoMethod *wrapper;
9315 /* Inline the wrapper */
9316 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9318 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9319 g_assert (costs > 0);
9320 cfg->real_offset += 5;
9322 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9323 /* *sp is already set by inline_method */
9328 inline_costs += costs;
9337 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9338 MonoInst *val = sp [fsig->param_count];
9340 if (val->type == STACK_OBJ) {
9341 MonoInst *iargs [2];
9346 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9349 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9350 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9351 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9352 emit_write_barrier (cfg, addr, val);
9353 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9354 GSHAREDVT_FAILURE (*ip);
9355 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9356 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9358 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9359 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9360 if (!cmethod->klass->element_class->valuetype && !readonly)
9361 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9362 CHECK_TYPELOAD (cmethod->klass);
9365 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9368 g_assert_not_reached ();
9375 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9379 /* Tail prefix / tail call optimization */
9381 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9382 /* FIXME: runtime generic context pointer for jumps? */
9383 /* FIXME: handle this for generic sharing eventually */
9384 if ((ins_flag & MONO_INST_TAILCALL) &&
9385 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9386 supported_tail_call = TRUE;
9388 if (supported_tail_call) {
9391 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9392 INLINE_FAILURE ("tail call");
9394 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9396 if (ARCH_HAVE_OP_TAIL_CALL) {
9397 /* Handle tail calls similarly to normal calls */
9400 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9402 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9403 call->tail_call = TRUE;
9404 call->method = cmethod;
9405 call->signature = mono_method_signature (cmethod);
9408 * We implement tail calls by storing the actual arguments into the
9409 * argument variables, then emitting a CEE_JMP.
9411 for (i = 0; i < n; ++i) {
9412 /* Prevent argument from being register allocated */
9413 arg_array [i]->flags |= MONO_INST_VOLATILE;
9414 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9416 ins = (MonoInst*)call;
9417 ins->inst_p0 = cmethod;
9418 ins->inst_p1 = arg_array [0];
9419 MONO_ADD_INS (cfg->cbb, ins);
9420 link_bblock (cfg, cfg->cbb, end_bblock);
9421 start_new_bblock = 1;
9423 // FIXME: Eliminate unreachable epilogs
9426 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9427 * only reachable from this call.
9429 GET_BBLOCK (cfg, tblock, ip + 5);
9430 if (tblock == cfg->cbb || tblock->in_count == 0)
9439 * Synchronized wrappers.
9440 * Its hard to determine where to replace a method with its synchronized
9441 * wrapper without causing an infinite recursion. The current solution is
9442 * to add the synchronized wrapper in the trampolines, and to
9443 * change the called method to a dummy wrapper, and resolve that wrapper
9444 * to the real method in mono_jit_compile_method ().
9446 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9447 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9448 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9449 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9453 INLINE_FAILURE ("call");
9454 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9455 imt_arg, vtable_arg);
9458 link_bblock (cfg, cfg->cbb, end_bblock);
9459 start_new_bblock = 1;
9461 // FIXME: Eliminate unreachable epilogs
9464 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9465 * only reachable from this call.
9467 GET_BBLOCK (cfg, tblock, ip + 5);
9468 if (tblock == cfg->cbb || tblock->in_count == 0)
9475 /* End of call, INS should contain the result of the call, if any */
9477 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9480 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9485 if (keep_this_alive) {
9486 MonoInst *dummy_use;
9488 /* See mono_emit_method_call_full () */
9489 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9492 CHECK_CFG_EXCEPTION;
9496 g_assert (*ip == CEE_RET);
9500 constrained_class = NULL;
9502 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9506 if (cfg->method != method) {
9507 /* return from inlined method */
9509 * If in_count == 0, that means the ret is unreachable due to
9510 * being preceeded by a throw. In that case, inline_method () will
9511 * handle setting the return value
9512 * (test case: test_0_inline_throw ()).
9514 if (return_var && cfg->cbb->in_count) {
9515 MonoType *ret_type = mono_method_signature (method)->ret;
9521 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9524 //g_assert (returnvar != -1);
9525 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9526 cfg->ret_var_set = TRUE;
9529 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9531 if (cfg->lmf_var && cfg->cbb->in_count)
9535 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9537 if (seq_points && !sym_seq_points) {
9539 * Place a seq point here too even through the IL stack is not
9540 * empty, so a step over on
9543 * will work correctly.
9545 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9546 MONO_ADD_INS (cfg->cbb, ins);
9549 g_assert (!return_var);
9553 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9556 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9559 if (!cfg->vret_addr) {
9562 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9564 EMIT_NEW_RETLOADA (cfg, ret_addr);
9566 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9567 ins->klass = mono_class_from_mono_type (ret_type);
9570 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9571 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9572 MonoInst *iargs [1];
9576 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9577 mono_arch_emit_setret (cfg, method, conv);
9579 mono_arch_emit_setret (cfg, method, *sp);
9582 mono_arch_emit_setret (cfg, method, *sp);
9587 if (sp != stack_start)
9589 MONO_INST_NEW (cfg, ins, OP_BR);
9591 ins->inst_target_bb = end_bblock;
9592 MONO_ADD_INS (cfg->cbb, ins);
9593 link_bblock (cfg, cfg->cbb, end_bblock);
9594 start_new_bblock = 1;
9598 MONO_INST_NEW (cfg, ins, OP_BR);
9600 target = ip + 1 + (signed char)(*ip);
9602 GET_BBLOCK (cfg, tblock, target);
9603 link_bblock (cfg, cfg->cbb, tblock);
9604 ins->inst_target_bb = tblock;
9605 if (sp != stack_start) {
9606 handle_stack_args (cfg, stack_start, sp - stack_start);
9608 CHECK_UNVERIFIABLE (cfg);
9610 MONO_ADD_INS (cfg->cbb, ins);
9611 start_new_bblock = 1;
9612 inline_costs += BRANCH_COST;
9626 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9628 target = ip + 1 + *(signed char*)ip;
9634 inline_costs += BRANCH_COST;
9638 MONO_INST_NEW (cfg, ins, OP_BR);
9641 target = ip + 4 + (gint32)read32(ip);
9643 GET_BBLOCK (cfg, tblock, target);
9644 link_bblock (cfg, cfg->cbb, tblock);
9645 ins->inst_target_bb = tblock;
9646 if (sp != stack_start) {
9647 handle_stack_args (cfg, stack_start, sp - stack_start);
9649 CHECK_UNVERIFIABLE (cfg);
9652 MONO_ADD_INS (cfg->cbb, ins);
9654 start_new_bblock = 1;
9655 inline_costs += BRANCH_COST;
9662 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9663 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9664 guint32 opsize = is_short ? 1 : 4;
9666 CHECK_OPSIZE (opsize);
9668 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9671 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9676 GET_BBLOCK (cfg, tblock, target);
9677 link_bblock (cfg, cfg->cbb, tblock);
9678 GET_BBLOCK (cfg, tblock, ip);
9679 link_bblock (cfg, cfg->cbb, tblock);
9681 if (sp != stack_start) {
9682 handle_stack_args (cfg, stack_start, sp - stack_start);
9683 CHECK_UNVERIFIABLE (cfg);
9686 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9687 cmp->sreg1 = sp [0]->dreg;
9688 type_from_op (cfg, cmp, sp [0], NULL);
9691 #if SIZEOF_REGISTER == 4
9692 if (cmp->opcode == OP_LCOMPARE_IMM) {
9693 /* Convert it to OP_LCOMPARE */
9694 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9695 ins->type = STACK_I8;
9696 ins->dreg = alloc_dreg (cfg, STACK_I8);
9698 MONO_ADD_INS (cfg->cbb, ins);
9699 cmp->opcode = OP_LCOMPARE;
9700 cmp->sreg2 = ins->dreg;
9703 MONO_ADD_INS (cfg->cbb, cmp);
9705 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9706 type_from_op (cfg, ins, sp [0], NULL);
9707 MONO_ADD_INS (cfg->cbb, ins);
9708 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9709 GET_BBLOCK (cfg, tblock, target);
9710 ins->inst_true_bb = tblock;
9711 GET_BBLOCK (cfg, tblock, ip);
9712 ins->inst_false_bb = tblock;
9713 start_new_bblock = 2;
9716 inline_costs += BRANCH_COST;
9731 MONO_INST_NEW (cfg, ins, *ip);
9733 target = ip + 4 + (gint32)read32(ip);
9739 inline_costs += BRANCH_COST;
9743 MonoBasicBlock **targets;
9744 MonoBasicBlock *default_bblock;
9745 MonoJumpInfoBBTable *table;
9746 int offset_reg = alloc_preg (cfg);
9747 int target_reg = alloc_preg (cfg);
9748 int table_reg = alloc_preg (cfg);
9749 int sum_reg = alloc_preg (cfg);
9750 gboolean use_op_switch;
9754 n = read32 (ip + 1);
9757 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9761 CHECK_OPSIZE (n * sizeof (guint32));
9762 target = ip + n * sizeof (guint32);
9764 GET_BBLOCK (cfg, default_bblock, target);
9765 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9767 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9768 for (i = 0; i < n; ++i) {
9769 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9770 targets [i] = tblock;
9771 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9775 if (sp != stack_start) {
9777 * Link the current bb with the targets as well, so handle_stack_args
9778 * will set their in_stack correctly.
9780 link_bblock (cfg, cfg->cbb, default_bblock);
9781 for (i = 0; i < n; ++i)
9782 link_bblock (cfg, cfg->cbb, targets [i]);
9784 handle_stack_args (cfg, stack_start, sp - stack_start);
9786 CHECK_UNVERIFIABLE (cfg);
9789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9792 for (i = 0; i < n; ++i)
9793 link_bblock (cfg, cfg->cbb, targets [i]);
9795 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9796 table->table = targets;
9797 table->table_size = n;
9799 use_op_switch = FALSE;
9801 /* ARM implements SWITCH statements differently */
9802 /* FIXME: Make it use the generic implementation */
9803 if (!cfg->compile_aot)
9804 use_op_switch = TRUE;
9807 if (COMPILE_LLVM (cfg))
9808 use_op_switch = TRUE;
9810 cfg->cbb->has_jump_table = 1;
9812 if (use_op_switch) {
9813 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9814 ins->sreg1 = src1->dreg;
9815 ins->inst_p0 = table;
9816 ins->inst_many_bb = targets;
9817 ins->klass = GUINT_TO_POINTER (n);
9818 MONO_ADD_INS (cfg->cbb, ins);
9820 if (sizeof (gpointer) == 8)
9821 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9825 #if SIZEOF_REGISTER == 8
9826 /* The upper word might not be zero, and we add it to a 64 bit address later */
9827 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9830 if (cfg->compile_aot) {
9831 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9833 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9834 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9835 ins->inst_p0 = table;
9836 ins->dreg = table_reg;
9837 MONO_ADD_INS (cfg->cbb, ins);
9840 /* FIXME: Use load_memindex */
9841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9842 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9843 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9845 start_new_bblock = 1;
9846 inline_costs += (BRANCH_COST * 2);
9866 dreg = alloc_freg (cfg);
9869 dreg = alloc_lreg (cfg);
9872 dreg = alloc_ireg_ref (cfg);
9875 dreg = alloc_preg (cfg);
9878 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9879 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9880 if (*ip == CEE_LDIND_R4)
9881 ins->type = cfg->r4_stack_type;
9882 ins->flags |= ins_flag;
9883 MONO_ADD_INS (cfg->cbb, ins);
9885 if (ins_flag & MONO_INST_VOLATILE) {
9886 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9887 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9903 if (ins_flag & MONO_INST_VOLATILE) {
9904 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9905 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9908 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9909 ins->flags |= ins_flag;
9912 MONO_ADD_INS (cfg->cbb, ins);
9914 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9915 emit_write_barrier (cfg, sp [0], sp [1]);
9924 MONO_INST_NEW (cfg, ins, (*ip));
9926 ins->sreg1 = sp [0]->dreg;
9927 ins->sreg2 = sp [1]->dreg;
9928 type_from_op (cfg, ins, sp [0], sp [1]);
9930 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9932 /* Use the immediate opcodes if possible */
9933 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9934 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9935 if (imm_opcode != -1) {
9936 ins->opcode = imm_opcode;
9937 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9940 NULLIFY_INS (sp [1]);
9944 MONO_ADD_INS ((cfg)->cbb, (ins));
9946 *sp++ = mono_decompose_opcode (cfg, ins);
9963 MONO_INST_NEW (cfg, ins, (*ip));
9965 ins->sreg1 = sp [0]->dreg;
9966 ins->sreg2 = sp [1]->dreg;
9967 type_from_op (cfg, ins, sp [0], sp [1]);
9969 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9970 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9972 /* FIXME: Pass opcode to is_inst_imm */
9974 /* Use the immediate opcodes if possible */
9975 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9978 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9979 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9980 /* Keep emulated opcodes which are optimized away later */
9981 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9982 imm_opcode = mono_op_to_op_imm (ins->opcode);
9985 if (imm_opcode != -1) {
9986 ins->opcode = imm_opcode;
9987 if (sp [1]->opcode == OP_I8CONST) {
9988 #if SIZEOF_REGISTER == 8
9989 ins->inst_imm = sp [1]->inst_l;
9991 ins->inst_ls_word = sp [1]->inst_ls_word;
9992 ins->inst_ms_word = sp [1]->inst_ms_word;
9996 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9999 /* Might be followed by an instruction added by add_widen_op */
10000 if (sp [1]->next == NULL)
10001 NULLIFY_INS (sp [1]);
10004 MONO_ADD_INS ((cfg)->cbb, (ins));
10006 *sp++ = mono_decompose_opcode (cfg, ins);
10019 case CEE_CONV_OVF_I8:
10020 case CEE_CONV_OVF_U8:
10021 case CEE_CONV_R_UN:
10024 /* Special case this earlier so we have long constants in the IR */
10025 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10026 int data = sp [-1]->inst_c0;
10027 sp [-1]->opcode = OP_I8CONST;
10028 sp [-1]->type = STACK_I8;
10029 #if SIZEOF_REGISTER == 8
10030 if ((*ip) == CEE_CONV_U8)
10031 sp [-1]->inst_c0 = (guint32)data;
10033 sp [-1]->inst_c0 = data;
10035 sp [-1]->inst_ls_word = data;
10036 if ((*ip) == CEE_CONV_U8)
10037 sp [-1]->inst_ms_word = 0;
10039 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10041 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10048 case CEE_CONV_OVF_I4:
10049 case CEE_CONV_OVF_I1:
10050 case CEE_CONV_OVF_I2:
10051 case CEE_CONV_OVF_I:
10052 case CEE_CONV_OVF_U:
10055 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10056 ADD_UNOP (CEE_CONV_OVF_I8);
10063 case CEE_CONV_OVF_U1:
10064 case CEE_CONV_OVF_U2:
10065 case CEE_CONV_OVF_U4:
10068 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10069 ADD_UNOP (CEE_CONV_OVF_U8);
10076 case CEE_CONV_OVF_I1_UN:
10077 case CEE_CONV_OVF_I2_UN:
10078 case CEE_CONV_OVF_I4_UN:
10079 case CEE_CONV_OVF_I8_UN:
10080 case CEE_CONV_OVF_U1_UN:
10081 case CEE_CONV_OVF_U2_UN:
10082 case CEE_CONV_OVF_U4_UN:
10083 case CEE_CONV_OVF_U8_UN:
10084 case CEE_CONV_OVF_I_UN:
10085 case CEE_CONV_OVF_U_UN:
10092 CHECK_CFG_EXCEPTION;
10096 case CEE_ADD_OVF_UN:
10098 case CEE_MUL_OVF_UN:
10100 case CEE_SUB_OVF_UN:
10106 GSHAREDVT_FAILURE (*ip);
10109 token = read32 (ip + 1);
10110 klass = mini_get_class (method, token, generic_context);
10111 CHECK_TYPELOAD (klass);
10113 if (generic_class_is_reference_type (cfg, klass)) {
10114 MonoInst *store, *load;
10115 int dreg = alloc_ireg_ref (cfg);
10117 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10118 load->flags |= ins_flag;
10119 MONO_ADD_INS (cfg->cbb, load);
10121 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10122 store->flags |= ins_flag;
10123 MONO_ADD_INS (cfg->cbb, store);
10125 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10126 emit_write_barrier (cfg, sp [0], sp [1]);
10128 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10134 int loc_index = -1;
10140 token = read32 (ip + 1);
10141 klass = mini_get_class (method, token, generic_context);
10142 CHECK_TYPELOAD (klass);
10144 /* Optimize the common ldobj+stloc combination */
10147 loc_index = ip [6];
10154 loc_index = ip [5] - CEE_STLOC_0;
10161 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10162 CHECK_LOCAL (loc_index);
10164 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10165 ins->dreg = cfg->locals [loc_index]->dreg;
10166 ins->flags |= ins_flag;
10169 if (ins_flag & MONO_INST_VOLATILE) {
10170 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10171 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10177 /* Optimize the ldobj+stobj combination */
10178 /* The reference case ends up being a load+store anyway */
10179 /* Skip this if the operation is volatile. */
10180 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10185 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10192 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10193 ins->flags |= ins_flag;
10196 if (ins_flag & MONO_INST_VOLATILE) {
10197 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10198 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10207 CHECK_STACK_OVF (1);
10209 n = read32 (ip + 1);
10211 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10212 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10213 ins->type = STACK_OBJ;
10216 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10217 MonoInst *iargs [1];
10218 char *str = mono_method_get_wrapper_data (method, n);
10220 if (cfg->compile_aot)
10221 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10223 EMIT_NEW_PCONST (cfg, iargs [0], str);
10224 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10226 if (cfg->opt & MONO_OPT_SHARED) {
10227 MonoInst *iargs [3];
10229 if (cfg->compile_aot) {
10230 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10232 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10233 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10234 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10235 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10236 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10238 if (cfg->cbb->out_of_line) {
10239 MonoInst *iargs [2];
10241 if (image == mono_defaults.corlib) {
10243 * Avoid relocations in AOT and save some space by using a
10244 * version of helper_ldstr specialized to mscorlib.
10246 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10247 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10249 /* Avoid creating the string object */
10250 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10251 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10252 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10256 if (cfg->compile_aot) {
10257 NEW_LDSTRCONST (cfg, ins, image, n);
10259 MONO_ADD_INS (cfg->cbb, ins);
10262 NEW_PCONST (cfg, ins, NULL);
10263 ins->type = STACK_OBJ;
10264 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10266 OUT_OF_MEMORY_FAILURE;
10269 MONO_ADD_INS (cfg->cbb, ins);
10278 MonoInst *iargs [2];
10279 MonoMethodSignature *fsig;
10282 MonoInst *vtable_arg = NULL;
10285 token = read32 (ip + 1);
10286 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10287 if (!cmethod || mono_loader_get_last_error ())
10289 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10292 mono_save_token_info (cfg, image, token, cmethod);
10294 if (!mono_class_init (cmethod->klass))
10295 TYPE_LOAD_ERROR (cmethod->klass);
10297 context_used = mini_method_check_context_used (cfg, cmethod);
10299 if (mono_security_core_clr_enabled ())
10300 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10302 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10303 emit_class_init (cfg, cmethod->klass);
10304 CHECK_TYPELOAD (cmethod->klass);
10308 if (cfg->gsharedvt) {
10309 if (mini_is_gsharedvt_variable_signature (sig))
10310 GSHAREDVT_FAILURE (*ip);
10314 n = fsig->param_count;
10318 * Generate smaller code for the common newobj <exception> instruction in
10319 * argument checking code.
10321 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10322 is_exception_class (cmethod->klass) && n <= 2 &&
10323 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10324 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10325 MonoInst *iargs [3];
10329 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10332 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10335 iargs [1] = sp [0];
10336 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10339 iargs [1] = sp [0];
10340 iargs [2] = sp [1];
10341 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10344 g_assert_not_reached ();
10352 /* move the args to allow room for 'this' in the first position */
10358 /* check_call_signature () requires sp[0] to be set */
10359 this_ins.type = STACK_OBJ;
10360 sp [0] = &this_ins;
10361 if (check_call_signature (cfg, fsig, sp))
10366 if (mini_class_is_system_array (cmethod->klass)) {
10367 *sp = emit_get_rgctx_method (cfg, context_used,
10368 cmethod, MONO_RGCTX_INFO_METHOD);
10370 /* Avoid varargs in the common case */
10371 if (fsig->param_count == 1)
10372 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10373 else if (fsig->param_count == 2)
10374 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10375 else if (fsig->param_count == 3)
10376 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10377 else if (fsig->param_count == 4)
10378 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10380 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10381 } else if (cmethod->string_ctor) {
10382 g_assert (!context_used);
10383 g_assert (!vtable_arg);
10384 /* we simply pass a null pointer */
10385 EMIT_NEW_PCONST (cfg, *sp, NULL);
10386 /* now call the string ctor */
10387 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10389 if (cmethod->klass->valuetype) {
10390 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10391 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10392 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10397 * The code generated by mini_emit_virtual_call () expects
10398 * iargs [0] to be a boxed instance, but luckily the vcall
10399 * will be transformed into a normal call there.
10401 } else if (context_used) {
10402 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10405 MonoVTable *vtable = NULL;
10407 if (!cfg->compile_aot)
10408 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10409 CHECK_TYPELOAD (cmethod->klass);
10412 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10413 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10414 * As a workaround, we call class cctors before allocating objects.
10416 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10417 emit_class_init (cfg, cmethod->klass);
10418 if (cfg->verbose_level > 2)
10419 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10420 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10423 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10426 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10429 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10431 /* Now call the actual ctor */
10432 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10433 CHECK_CFG_EXCEPTION;
10436 if (alloc == NULL) {
10438 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10439 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10447 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10448 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10451 case CEE_CASTCLASS:
10455 token = read32 (ip + 1);
10456 klass = mini_get_class (method, token, generic_context);
10457 CHECK_TYPELOAD (klass);
10458 if (sp [0]->type != STACK_OBJ)
10461 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10462 CHECK_CFG_EXCEPTION;
10471 token = read32 (ip + 1);
10472 klass = mini_get_class (method, token, generic_context);
10473 CHECK_TYPELOAD (klass);
10474 if (sp [0]->type != STACK_OBJ)
10477 context_used = mini_class_check_context_used (cfg, klass);
10479 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10480 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10481 MonoInst *args [3];
10488 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10491 if (cfg->compile_aot) {
10492 idx = get_castclass_cache_idx (cfg);
10493 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10495 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10498 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10501 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10502 MonoMethod *mono_isinst;
10503 MonoInst *iargs [1];
10506 mono_isinst = mono_marshal_get_isinst (klass);
10507 iargs [0] = sp [0];
10509 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10510 iargs, ip, cfg->real_offset, TRUE);
10511 CHECK_CFG_EXCEPTION;
10512 g_assert (costs > 0);
10515 cfg->real_offset += 5;
10519 inline_costs += costs;
10522 ins = handle_isinst (cfg, klass, *sp, context_used);
10523 CHECK_CFG_EXCEPTION;
10529 case CEE_UNBOX_ANY: {
10530 MonoInst *res, *addr;
10535 token = read32 (ip + 1);
10536 klass = mini_get_class (method, token, generic_context);
10537 CHECK_TYPELOAD (klass);
10539 mono_save_token_info (cfg, image, token, klass);
10541 context_used = mini_class_check_context_used (cfg, klass);
10543 if (mini_is_gsharedvt_klass (klass)) {
10544 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10546 } else if (generic_class_is_reference_type (cfg, klass)) {
10547 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10548 CHECK_CFG_EXCEPTION;
10549 } else if (mono_class_is_nullable (klass)) {
10550 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10552 addr = handle_unbox (cfg, klass, sp, context_used);
10554 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10565 MonoClass *enum_class;
10566 MonoMethod *has_flag;
10572 token = read32 (ip + 1);
10573 klass = mini_get_class (method, token, generic_context);
10574 CHECK_TYPELOAD (klass);
10576 mono_save_token_info (cfg, image, token, klass);
10578 context_used = mini_class_check_context_used (cfg, klass);
10580 if (generic_class_is_reference_type (cfg, klass)) {
10586 if (klass == mono_defaults.void_class)
10588 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10590 /* frequent check in generic code: box (struct), brtrue */
10595 * <push int/long ptr>
10598 * constrained. MyFlags
10599 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10601 * If we find this sequence and the operand types on box and constrained
10602 * are equal, we can emit a specialized instruction sequence instead of
10603 * the very slow HasFlag () call.
10605 if ((cfg->opt & MONO_OPT_INTRINS) &&
10606 /* Cheap checks first. */
10607 ip + 5 + 6 + 5 < end &&
10608 ip [5] == CEE_PREFIX1 &&
10609 ip [6] == CEE_CONSTRAINED_ &&
10610 ip [11] == CEE_CALLVIRT &&
10611 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10612 mono_class_is_enum (klass) &&
10613 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10614 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10615 has_flag->klass == mono_defaults.enum_class &&
10616 !strcmp (has_flag->name, "HasFlag") &&
10617 has_flag->signature->hasthis &&
10618 has_flag->signature->param_count == 1) {
10619 CHECK_TYPELOAD (enum_class);
10621 if (enum_class == klass) {
10622 MonoInst *enum_this, *enum_flag;
10627 enum_this = sp [0];
10628 enum_flag = sp [1];
10630 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10635 // FIXME: LLVM can't handle the inconsistent bb linking
10636 if (!mono_class_is_nullable (klass) &&
10637 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10638 (ip [5] == CEE_BRTRUE ||
10639 ip [5] == CEE_BRTRUE_S ||
10640 ip [5] == CEE_BRFALSE ||
10641 ip [5] == CEE_BRFALSE_S)) {
10642 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10644 MonoBasicBlock *true_bb, *false_bb;
10648 if (cfg->verbose_level > 3) {
10649 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10650 printf ("<box+brtrue opt>\n");
10655 case CEE_BRFALSE_S:
10658 target = ip + 1 + (signed char)(*ip);
10665 target = ip + 4 + (gint)(read32 (ip));
10669 g_assert_not_reached ();
10673 * We need to link both bblocks, since it is needed for handling stack
10674 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10675 * Branching to only one of them would lead to inconsistencies, so
10676 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10678 GET_BBLOCK (cfg, true_bb, target);
10679 GET_BBLOCK (cfg, false_bb, ip);
10681 mono_link_bblock (cfg, cfg->cbb, true_bb);
10682 mono_link_bblock (cfg, cfg->cbb, false_bb);
10684 if (sp != stack_start) {
10685 handle_stack_args (cfg, stack_start, sp - stack_start);
10687 CHECK_UNVERIFIABLE (cfg);
10690 if (COMPILE_LLVM (cfg)) {
10691 dreg = alloc_ireg (cfg);
10692 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10695 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10697 /* The JIT can't eliminate the iconst+compare */
10698 MONO_INST_NEW (cfg, ins, OP_BR);
10699 ins->inst_target_bb = is_true ? true_bb : false_bb;
10700 MONO_ADD_INS (cfg->cbb, ins);
10703 start_new_bblock = 1;
10707 *sp++ = handle_box (cfg, val, klass, context_used);
10709 CHECK_CFG_EXCEPTION;
10718 token = read32 (ip + 1);
10719 klass = mini_get_class (method, token, generic_context);
10720 CHECK_TYPELOAD (klass);
10722 mono_save_token_info (cfg, image, token, klass);
10724 context_used = mini_class_check_context_used (cfg, klass);
10726 if (mono_class_is_nullable (klass)) {
10729 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10730 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10734 ins = handle_unbox (cfg, klass, sp, context_used);
10747 MonoClassField *field;
10748 #ifndef DISABLE_REMOTING
10752 gboolean is_instance;
10754 gpointer addr = NULL;
10755 gboolean is_special_static;
10757 MonoInst *store_val = NULL;
10758 MonoInst *thread_ins;
10761 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10763 if (op == CEE_STFLD) {
10766 store_val = sp [1];
10771 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10773 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10776 if (op == CEE_STSFLD) {
10779 store_val = sp [0];
10784 token = read32 (ip + 1);
10785 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10786 field = mono_method_get_wrapper_data (method, token);
10787 klass = field->parent;
10790 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10793 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10794 FIELD_ACCESS_FAILURE (method, field);
10795 mono_class_init (klass);
10797 /* if the class is Critical then transparent code cannot access it's fields */
10798 if (!is_instance && mono_security_core_clr_enabled ())
10799 ensure_method_is_allowed_to_access_field (cfg, method, field);
10801 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10802 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10803 if (mono_security_core_clr_enabled ())
10804 ensure_method_is_allowed_to_access_field (cfg, method, field);
10808 * LDFLD etc. is usable on static fields as well, so convert those cases to
10811 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10823 g_assert_not_reached ();
10825 is_instance = FALSE;
10828 context_used = mini_class_check_context_used (cfg, klass);
10830 /* INSTANCE CASE */
10832 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10833 if (op == CEE_STFLD) {
10834 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10836 #ifndef DISABLE_REMOTING
10837 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10838 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10839 MonoInst *iargs [5];
10841 GSHAREDVT_FAILURE (op);
10843 iargs [0] = sp [0];
10844 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10845 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10846 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10848 iargs [4] = sp [1];
10850 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10851 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10852 iargs, ip, cfg->real_offset, TRUE);
10853 CHECK_CFG_EXCEPTION;
10854 g_assert (costs > 0);
10856 cfg->real_offset += 5;
10858 inline_costs += costs;
10860 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10867 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10869 if (mini_is_gsharedvt_klass (klass)) {
10870 MonoInst *offset_ins;
10872 context_used = mini_class_check_context_used (cfg, klass);
10874 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10875 dreg = alloc_ireg_mp (cfg);
10876 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10877 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10878 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10880 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10882 if (sp [0]->opcode != OP_LDADDR)
10883 store->flags |= MONO_INST_FAULT;
10885 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10886 /* insert call to write barrier */
10890 dreg = alloc_ireg_mp (cfg);
10891 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10892 emit_write_barrier (cfg, ptr, sp [1]);
10895 store->flags |= ins_flag;
10902 #ifndef DISABLE_REMOTING
10903 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10904 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10905 MonoInst *iargs [4];
10907 GSHAREDVT_FAILURE (op);
10909 iargs [0] = sp [0];
10910 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10911 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10912 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10913 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10914 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10915 iargs, ip, cfg->real_offset, TRUE);
10916 CHECK_CFG_EXCEPTION;
10917 g_assert (costs > 0);
10919 cfg->real_offset += 5;
10923 inline_costs += costs;
10925 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10931 if (sp [0]->type == STACK_VTYPE) {
10934 /* Have to compute the address of the variable */
10936 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10938 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10940 g_assert (var->klass == klass);
10942 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10946 if (op == CEE_LDFLDA) {
10947 if (sp [0]->type == STACK_OBJ) {
10948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10949 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10952 dreg = alloc_ireg_mp (cfg);
10954 if (mini_is_gsharedvt_klass (klass)) {
10955 MonoInst *offset_ins;
10957 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10958 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10960 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10962 ins->klass = mono_class_from_mono_type (field->type);
10963 ins->type = STACK_MP;
10968 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10970 if (mini_is_gsharedvt_klass (klass)) {
10971 MonoInst *offset_ins;
10973 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10974 dreg = alloc_ireg_mp (cfg);
10975 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10976 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10980 load->flags |= ins_flag;
10981 if (sp [0]->opcode != OP_LDADDR)
10982 load->flags |= MONO_INST_FAULT;
10994 context_used = mini_class_check_context_used (cfg, klass);
10996 ftype = mono_field_get_type (field);
10998 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11001 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11002 * to be called here.
11004 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11005 mono_class_vtable (cfg->domain, klass);
11006 CHECK_TYPELOAD (klass);
11008 mono_domain_lock (cfg->domain);
11009 if (cfg->domain->special_static_fields)
11010 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11011 mono_domain_unlock (cfg->domain);
11013 is_special_static = mono_class_field_is_special_static (field);
11015 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11016 thread_ins = mono_get_thread_intrinsic (cfg);
11020 /* Generate IR to compute the field address */
11021 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11023 * Fast access to TLS data
11024 * Inline version of get_thread_static_data () in
11028 int idx, static_data_reg, array_reg, dreg;
11030 GSHAREDVT_FAILURE (op);
11032 MONO_ADD_INS (cfg->cbb, thread_ins);
11033 static_data_reg = alloc_ireg (cfg);
11034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11036 if (cfg->compile_aot) {
11037 int offset_reg, offset2_reg, idx_reg;
11039 /* For TLS variables, this will return the TLS offset */
11040 EMIT_NEW_SFLDACONST (cfg, ins, field);
11041 offset_reg = ins->dreg;
11042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11043 idx_reg = alloc_ireg (cfg);
11044 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11046 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11047 array_reg = alloc_ireg (cfg);
11048 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11049 offset2_reg = alloc_ireg (cfg);
11050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11051 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11052 dreg = alloc_ireg (cfg);
11053 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11055 offset = (gsize)addr & 0x7fffffff;
11056 idx = offset & 0x3f;
11058 array_reg = alloc_ireg (cfg);
11059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11060 dreg = alloc_ireg (cfg);
11061 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11063 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11064 (cfg->compile_aot && is_special_static) ||
11065 (context_used && is_special_static)) {
11066 MonoInst *iargs [2];
11068 g_assert (field->parent);
11069 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11070 if (context_used) {
11071 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11072 field, MONO_RGCTX_INFO_CLASS_FIELD);
11074 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11076 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11077 } else if (context_used) {
11078 MonoInst *static_data;
11081 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11082 method->klass->name_space, method->klass->name, method->name,
11083 depth, field->offset);
11086 if (mono_class_needs_cctor_run (klass, method))
11087 emit_class_init (cfg, klass);
11090 * The pointer we're computing here is
11092 * super_info.static_data + field->offset
11094 static_data = emit_get_rgctx_klass (cfg, context_used,
11095 klass, MONO_RGCTX_INFO_STATIC_DATA);
11097 if (mini_is_gsharedvt_klass (klass)) {
11098 MonoInst *offset_ins;
11100 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11101 dreg = alloc_ireg_mp (cfg);
11102 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11103 } else if (field->offset == 0) {
11106 int addr_reg = mono_alloc_preg (cfg);
11107 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11109 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11110 MonoInst *iargs [2];
11112 g_assert (field->parent);
11113 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11114 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11115 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11117 MonoVTable *vtable = NULL;
11119 if (!cfg->compile_aot)
11120 vtable = mono_class_vtable (cfg->domain, klass);
11121 CHECK_TYPELOAD (klass);
11124 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11125 if (!(g_slist_find (class_inits, klass))) {
11126 emit_class_init (cfg, klass);
11127 if (cfg->verbose_level > 2)
11128 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11129 class_inits = g_slist_prepend (class_inits, klass);
11132 if (cfg->run_cctors) {
11134 /* This makes so that inline cannot trigger */
11135 /* .cctors: too many apps depend on them */
11136 /* running with a specific order... */
11138 if (! vtable->initialized)
11139 INLINE_FAILURE ("class init");
11140 ex = mono_runtime_class_init_full (vtable, FALSE);
11142 set_exception_object (cfg, ex);
11143 goto exception_exit;
11147 if (cfg->compile_aot)
11148 EMIT_NEW_SFLDACONST (cfg, ins, field);
11151 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11153 EMIT_NEW_PCONST (cfg, ins, addr);
11156 MonoInst *iargs [1];
11157 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11158 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11162 /* Generate IR to do the actual load/store operation */
11164 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11165 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11166 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11169 if (op == CEE_LDSFLDA) {
11170 ins->klass = mono_class_from_mono_type (ftype);
11171 ins->type = STACK_PTR;
11173 } else if (op == CEE_STSFLD) {
11176 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11177 store->flags |= ins_flag;
11179 gboolean is_const = FALSE;
11180 MonoVTable *vtable = NULL;
11181 gpointer addr = NULL;
11183 if (!context_used) {
11184 vtable = mono_class_vtable (cfg->domain, klass);
11185 CHECK_TYPELOAD (klass);
11187 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11188 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11189 int ro_type = ftype->type;
11191 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11192 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11193 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11196 GSHAREDVT_FAILURE (op);
11198 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11201 case MONO_TYPE_BOOLEAN:
11203 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11207 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11210 case MONO_TYPE_CHAR:
11212 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11216 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11221 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11225 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11230 case MONO_TYPE_PTR:
11231 case MONO_TYPE_FNPTR:
11232 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11233 type_to_eval_stack_type ((cfg), field->type, *sp);
11236 case MONO_TYPE_STRING:
11237 case MONO_TYPE_OBJECT:
11238 case MONO_TYPE_CLASS:
11239 case MONO_TYPE_SZARRAY:
11240 case MONO_TYPE_ARRAY:
11241 if (!mono_gc_is_moving ()) {
11242 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11243 type_to_eval_stack_type ((cfg), field->type, *sp);
11251 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11256 case MONO_TYPE_VALUETYPE:
11266 CHECK_STACK_OVF (1);
11268 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11269 load->flags |= ins_flag;
11275 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11276 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11277 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11288 token = read32 (ip + 1);
11289 klass = mini_get_class (method, token, generic_context);
11290 CHECK_TYPELOAD (klass);
11291 if (ins_flag & MONO_INST_VOLATILE) {
11292 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11293 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11295 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11296 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11297 ins->flags |= ins_flag;
11298 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11299 generic_class_is_reference_type (cfg, klass)) {
11300 /* insert call to write barrier */
11301 emit_write_barrier (cfg, sp [0], sp [1]);
11313 const char *data_ptr;
11315 guint32 field_token;
11321 token = read32 (ip + 1);
11323 klass = mini_get_class (method, token, generic_context);
11324 CHECK_TYPELOAD (klass);
11326 context_used = mini_class_check_context_used (cfg, klass);
11328 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11329 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11330 ins->sreg1 = sp [0]->dreg;
11331 ins->type = STACK_I4;
11332 ins->dreg = alloc_ireg (cfg);
11333 MONO_ADD_INS (cfg->cbb, ins);
11334 *sp = mono_decompose_opcode (cfg, ins);
11337 if (context_used) {
11338 MonoInst *args [3];
11339 MonoClass *array_class = mono_array_class_get (klass, 1);
11340 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11342 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11345 args [0] = emit_get_rgctx_klass (cfg, context_used,
11346 array_class, MONO_RGCTX_INFO_VTABLE);
11351 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11353 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11355 if (cfg->opt & MONO_OPT_SHARED) {
11356 /* Decompose now to avoid problems with references to the domainvar */
11357 MonoInst *iargs [3];
11359 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11360 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11361 iargs [2] = sp [0];
11363 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11365 /* Decompose later since it is needed by abcrem */
11366 MonoClass *array_type = mono_array_class_get (klass, 1);
11367 mono_class_vtable (cfg->domain, array_type);
11368 CHECK_TYPELOAD (array_type);
11370 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11371 ins->dreg = alloc_ireg_ref (cfg);
11372 ins->sreg1 = sp [0]->dreg;
11373 ins->inst_newa_class = klass;
11374 ins->type = STACK_OBJ;
11375 ins->klass = array_type;
11376 MONO_ADD_INS (cfg->cbb, ins);
11377 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11378 cfg->cbb->has_array_access = TRUE;
11380 /* Needed so mono_emit_load_get_addr () gets called */
11381 mono_get_got_var (cfg);
11391 * we inline/optimize the initialization sequence if possible.
11392 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11393 * for small sizes open code the memcpy
11394 * ensure the rva field is big enough
11396 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11397 MonoMethod *memcpy_method = get_memcpy_method ();
11398 MonoInst *iargs [3];
11399 int add_reg = alloc_ireg_mp (cfg);
11401 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11402 if (cfg->compile_aot) {
11403 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11405 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11407 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11408 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11417 if (sp [0]->type != STACK_OBJ)
11420 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11421 ins->dreg = alloc_preg (cfg);
11422 ins->sreg1 = sp [0]->dreg;
11423 ins->type = STACK_I4;
11424 /* This flag will be inherited by the decomposition */
11425 ins->flags |= MONO_INST_FAULT;
11426 MONO_ADD_INS (cfg->cbb, ins);
11427 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11428 cfg->cbb->has_array_access = TRUE;
11436 if (sp [0]->type != STACK_OBJ)
11439 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11441 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11442 CHECK_TYPELOAD (klass);
11443 /* we need to make sure that this array is exactly the type it needs
11444 * to be for correctness. the wrappers are lax with their usage
11445 * so we need to ignore them here
11447 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11448 MonoClass *array_class = mono_array_class_get (klass, 1);
11449 mini_emit_check_array_type (cfg, sp [0], array_class);
11450 CHECK_TYPELOAD (array_class);
11454 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11459 case CEE_LDELEM_I1:
11460 case CEE_LDELEM_U1:
11461 case CEE_LDELEM_I2:
11462 case CEE_LDELEM_U2:
11463 case CEE_LDELEM_I4:
11464 case CEE_LDELEM_U4:
11465 case CEE_LDELEM_I8:
11467 case CEE_LDELEM_R4:
11468 case CEE_LDELEM_R8:
11469 case CEE_LDELEM_REF: {
11475 if (*ip == CEE_LDELEM) {
11477 token = read32 (ip + 1);
11478 klass = mini_get_class (method, token, generic_context);
11479 CHECK_TYPELOAD (klass);
11480 mono_class_init (klass);
11483 klass = array_access_to_klass (*ip);
11485 if (sp [0]->type != STACK_OBJ)
11488 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11490 if (mini_is_gsharedvt_variable_klass (klass)) {
11491 // FIXME-VT: OP_ICONST optimization
11492 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11493 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11494 ins->opcode = OP_LOADV_MEMBASE;
11495 } else if (sp [1]->opcode == OP_ICONST) {
11496 int array_reg = sp [0]->dreg;
11497 int index_reg = sp [1]->dreg;
11498 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11500 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11501 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11503 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11504 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11507 if (*ip == CEE_LDELEM)
11514 case CEE_STELEM_I1:
11515 case CEE_STELEM_I2:
11516 case CEE_STELEM_I4:
11517 case CEE_STELEM_I8:
11518 case CEE_STELEM_R4:
11519 case CEE_STELEM_R8:
11520 case CEE_STELEM_REF:
11525 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11527 if (*ip == CEE_STELEM) {
11529 token = read32 (ip + 1);
11530 klass = mini_get_class (method, token, generic_context);
11531 CHECK_TYPELOAD (klass);
11532 mono_class_init (klass);
11535 klass = array_access_to_klass (*ip);
11537 if (sp [0]->type != STACK_OBJ)
11540 emit_array_store (cfg, klass, sp, TRUE);
11542 if (*ip == CEE_STELEM)
11549 case CEE_CKFINITE: {
11553 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11554 ins->sreg1 = sp [0]->dreg;
11555 ins->dreg = alloc_freg (cfg);
11556 ins->type = STACK_R8;
11557 MONO_ADD_INS (cfg->cbb, ins);
11559 *sp++ = mono_decompose_opcode (cfg, ins);
11564 case CEE_REFANYVAL: {
11565 MonoInst *src_var, *src;
11567 int klass_reg = alloc_preg (cfg);
11568 int dreg = alloc_preg (cfg);
11570 GSHAREDVT_FAILURE (*ip);
11573 MONO_INST_NEW (cfg, ins, *ip);
11576 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11577 CHECK_TYPELOAD (klass);
11579 context_used = mini_class_check_context_used (cfg, klass);
11582 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11584 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11585 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11588 if (context_used) {
11589 MonoInst *klass_ins;
11591 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11592 klass, MONO_RGCTX_INFO_KLASS);
11595 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11596 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11598 mini_emit_class_check (cfg, klass_reg, klass);
11600 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11601 ins->type = STACK_MP;
11602 ins->klass = klass;
11607 case CEE_MKREFANY: {
11608 MonoInst *loc, *addr;
11610 GSHAREDVT_FAILURE (*ip);
11613 MONO_INST_NEW (cfg, ins, *ip);
11616 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11617 CHECK_TYPELOAD (klass);
11619 context_used = mini_class_check_context_used (cfg, klass);
11621 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11622 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11624 if (context_used) {
11625 MonoInst *const_ins;
11626 int type_reg = alloc_preg (cfg);
11628 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11632 } else if (cfg->compile_aot) {
11633 int const_reg = alloc_preg (cfg);
11634 int type_reg = alloc_preg (cfg);
11636 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11646 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11647 ins->type = STACK_VTYPE;
11648 ins->klass = mono_defaults.typed_reference_class;
11653 case CEE_LDTOKEN: {
11655 MonoClass *handle_class;
11657 CHECK_STACK_OVF (1);
11660 n = read32 (ip + 1);
11662 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11663 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11664 handle = mono_method_get_wrapper_data (method, n);
11665 handle_class = mono_method_get_wrapper_data (method, n + 1);
11666 if (handle_class == mono_defaults.typehandle_class)
11667 handle = &((MonoClass*)handle)->byval_arg;
11670 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11675 mono_class_init (handle_class);
11676 if (cfg->gshared) {
11677 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11678 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11679 /* This case handles ldtoken
11680 of an open type, like for
11683 } else if (handle_class == mono_defaults.typehandle_class) {
11684 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11685 } else if (handle_class == mono_defaults.fieldhandle_class)
11686 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11687 else if (handle_class == mono_defaults.methodhandle_class)
11688 context_used = mini_method_check_context_used (cfg, handle);
11690 g_assert_not_reached ();
11693 if ((cfg->opt & MONO_OPT_SHARED) &&
11694 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11695 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11696 MonoInst *addr, *vtvar, *iargs [3];
11697 int method_context_used;
11699 method_context_used = mini_method_check_context_used (cfg, method);
11701 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11703 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11704 EMIT_NEW_ICONST (cfg, iargs [1], n);
11705 if (method_context_used) {
11706 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11707 method, MONO_RGCTX_INFO_METHOD);
11708 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11710 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11711 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11713 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11715 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11717 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11719 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11720 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11721 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11722 (cmethod->klass == mono_defaults.systemtype_class) &&
11723 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11724 MonoClass *tclass = mono_class_from_mono_type (handle);
11726 mono_class_init (tclass);
11727 if (context_used) {
11728 ins = emit_get_rgctx_klass (cfg, context_used,
11729 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11730 } else if (cfg->compile_aot) {
11731 if (method->wrapper_type) {
11732 mono_error_init (&error); //got to do it since there are multiple conditionals below
11733 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11734 /* Special case for static synchronized wrappers */
11735 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11737 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11738 /* FIXME: n is not a normal token */
11740 EMIT_NEW_PCONST (cfg, ins, NULL);
11743 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11746 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11748 ins->type = STACK_OBJ;
11749 ins->klass = cmethod->klass;
11752 MonoInst *addr, *vtvar;
11754 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11756 if (context_used) {
11757 if (handle_class == mono_defaults.typehandle_class) {
11758 ins = emit_get_rgctx_klass (cfg, context_used,
11759 mono_class_from_mono_type (handle),
11760 MONO_RGCTX_INFO_TYPE);
11761 } else if (handle_class == mono_defaults.methodhandle_class) {
11762 ins = emit_get_rgctx_method (cfg, context_used,
11763 handle, MONO_RGCTX_INFO_METHOD);
11764 } else if (handle_class == mono_defaults.fieldhandle_class) {
11765 ins = emit_get_rgctx_field (cfg, context_used,
11766 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11768 g_assert_not_reached ();
11770 } else if (cfg->compile_aot) {
11771 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11773 EMIT_NEW_PCONST (cfg, ins, handle);
11775 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11776 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11777 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11787 MONO_INST_NEW (cfg, ins, OP_THROW);
11789 ins->sreg1 = sp [0]->dreg;
11791 cfg->cbb->out_of_line = TRUE;
11792 MONO_ADD_INS (cfg->cbb, ins);
11793 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11794 MONO_ADD_INS (cfg->cbb, ins);
11797 link_bblock (cfg, cfg->cbb, end_bblock);
11798 start_new_bblock = 1;
11800 case CEE_ENDFINALLY:
11801 /* mono_save_seq_point_info () depends on this */
11802 if (sp != stack_start)
11803 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11804 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11805 MONO_ADD_INS (cfg->cbb, ins);
11807 start_new_bblock = 1;
11810 * Control will leave the method so empty the stack, otherwise
11811 * the next basic block will start with a nonempty stack.
11813 while (sp != stack_start) {
11818 case CEE_LEAVE_S: {
11821 if (*ip == CEE_LEAVE) {
11823 target = ip + 5 + (gint32)read32(ip + 1);
11826 target = ip + 2 + (signed char)(ip [1]);
11829 /* empty the stack */
11830 while (sp != stack_start) {
11835 * If this leave statement is in a catch block, check for a
11836 * pending exception, and rethrow it if necessary.
11837 * We avoid doing this in runtime invoke wrappers, since those are called
11838 * by native code which excepts the wrapper to catch all exceptions.
11840 for (i = 0; i < header->num_clauses; ++i) {
11841 MonoExceptionClause *clause = &header->clauses [i];
11844 * Use <= in the final comparison to handle clauses with multiple
11845 * leave statements, like in bug #78024.
11846 * The ordering of the exception clauses guarantees that we find the
11847 * innermost clause.
11849 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11851 MonoBasicBlock *dont_throw;
11856 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11859 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11861 NEW_BBLOCK (cfg, dont_throw);
11864 * Currently, we always rethrow the abort exception, despite the
11865 * fact that this is not correct. See thread6.cs for an example.
11866 * But propagating the abort exception is more important than
11867 * getting the sematics right.
11869 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11870 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11871 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11873 MONO_START_BB (cfg, dont_throw);
11877 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11879 MonoExceptionClause *clause;
11881 for (tmp = handlers; tmp; tmp = tmp->next) {
11882 clause = tmp->data;
11883 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11885 link_bblock (cfg, cfg->cbb, tblock);
11886 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11887 ins->inst_target_bb = tblock;
11888 ins->inst_eh_block = clause;
11889 MONO_ADD_INS (cfg->cbb, ins);
11890 cfg->cbb->has_call_handler = 1;
11891 if (COMPILE_LLVM (cfg)) {
11892 MonoBasicBlock *target_bb;
11895 * Link the finally bblock with the target, since it will
11896 * conceptually branch there.
11897 * FIXME: Have to link the bblock containing the endfinally.
11899 GET_BBLOCK (cfg, target_bb, target);
11900 link_bblock (cfg, tblock, target_bb);
11903 g_list_free (handlers);
11906 MONO_INST_NEW (cfg, ins, OP_BR);
11907 MONO_ADD_INS (cfg->cbb, ins);
11908 GET_BBLOCK (cfg, tblock, target);
11909 link_bblock (cfg, cfg->cbb, tblock);
11910 ins->inst_target_bb = tblock;
11911 start_new_bblock = 1;
11913 if (*ip == CEE_LEAVE)
11922 * Mono specific opcodes
11924 case MONO_CUSTOM_PREFIX: {
11926 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11930 case CEE_MONO_ICALL: {
11932 MonoJitICallInfo *info;
11934 token = read32 (ip + 2);
11935 func = mono_method_get_wrapper_data (method, token);
11936 info = mono_find_jit_icall_by_addr (func);
11938 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11941 CHECK_STACK (info->sig->param_count);
11942 sp -= info->sig->param_count;
11944 ins = mono_emit_jit_icall (cfg, info->func, sp);
11945 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11949 inline_costs += 10 * num_calls++;
11953 case CEE_MONO_LDPTR_CARD_TABLE: {
11955 gpointer card_mask;
11956 CHECK_STACK_OVF (1);
11958 if (cfg->compile_aot)
11959 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11961 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
11965 inline_costs += 10 * num_calls++;
11968 case CEE_MONO_LDPTR_NURSERY_START: {
11971 CHECK_STACK_OVF (1);
11973 if (cfg->compile_aot)
11974 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11976 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
11980 inline_costs += 10 * num_calls++;
11983 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11984 CHECK_STACK_OVF (1);
11986 if (cfg->compile_aot)
11987 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11989 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
11993 inline_costs += 10 * num_calls++;
11996 case CEE_MONO_LDPTR: {
11999 CHECK_STACK_OVF (1);
12001 token = read32 (ip + 2);
12003 ptr = mono_method_get_wrapper_data (method, token);
12004 EMIT_NEW_PCONST (cfg, ins, ptr);
12007 inline_costs += 10 * num_calls++;
12008 /* Can't embed random pointers into AOT code */
12012 case CEE_MONO_JIT_ICALL_ADDR: {
12013 MonoJitICallInfo *callinfo;
12016 CHECK_STACK_OVF (1);
12018 token = read32 (ip + 2);
12020 ptr = mono_method_get_wrapper_data (method, token);
12021 callinfo = mono_find_jit_icall_by_addr (ptr);
12022 g_assert (callinfo);
12023 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12026 inline_costs += 10 * num_calls++;
12029 case CEE_MONO_ICALL_ADDR: {
12030 MonoMethod *cmethod;
12033 CHECK_STACK_OVF (1);
12035 token = read32 (ip + 2);
12037 cmethod = mono_method_get_wrapper_data (method, token);
12039 if (cfg->compile_aot) {
12040 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12042 ptr = mono_lookup_internal_call (cmethod);
12044 EMIT_NEW_PCONST (cfg, ins, ptr);
12050 case CEE_MONO_VTADDR: {
12051 MonoInst *src_var, *src;
12057 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12058 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12063 case CEE_MONO_NEWOBJ: {
12064 MonoInst *iargs [2];
12066 CHECK_STACK_OVF (1);
12068 token = read32 (ip + 2);
12069 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12070 mono_class_init (klass);
12071 NEW_DOMAINCONST (cfg, iargs [0]);
12072 MONO_ADD_INS (cfg->cbb, iargs [0]);
12073 NEW_CLASSCONST (cfg, iargs [1], klass);
12074 MONO_ADD_INS (cfg->cbb, iargs [1]);
12075 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12077 inline_costs += 10 * num_calls++;
12080 case CEE_MONO_OBJADDR:
12083 MONO_INST_NEW (cfg, ins, OP_MOVE);
12084 ins->dreg = alloc_ireg_mp (cfg);
12085 ins->sreg1 = sp [0]->dreg;
12086 ins->type = STACK_MP;
12087 MONO_ADD_INS (cfg->cbb, ins);
12091 case CEE_MONO_LDNATIVEOBJ:
12093 * Similar to LDOBJ, but instead load the unmanaged
12094 * representation of the vtype to the stack.
12099 token = read32 (ip + 2);
12100 klass = mono_method_get_wrapper_data (method, token);
12101 g_assert (klass->valuetype);
12102 mono_class_init (klass);
12105 MonoInst *src, *dest, *temp;
12108 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12109 temp->backend.is_pinvoke = 1;
12110 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12111 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12113 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12114 dest->type = STACK_VTYPE;
12115 dest->klass = klass;
12121 case CEE_MONO_RETOBJ: {
12123 * Same as RET, but return the native representation of a vtype
12126 g_assert (cfg->ret);
12127 g_assert (mono_method_signature (method)->pinvoke);
12132 token = read32 (ip + 2);
12133 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12135 if (!cfg->vret_addr) {
12136 g_assert (cfg->ret_var_is_local);
12138 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12140 EMIT_NEW_RETLOADA (cfg, ins);
12142 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12144 if (sp != stack_start)
12147 MONO_INST_NEW (cfg, ins, OP_BR);
12148 ins->inst_target_bb = end_bblock;
12149 MONO_ADD_INS (cfg->cbb, ins);
12150 link_bblock (cfg, cfg->cbb, end_bblock);
12151 start_new_bblock = 1;
12155 case CEE_MONO_CISINST:
12156 case CEE_MONO_CCASTCLASS: {
12161 token = read32 (ip + 2);
12162 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12163 if (ip [1] == CEE_MONO_CISINST)
12164 ins = handle_cisinst (cfg, klass, sp [0]);
12166 ins = handle_ccastclass (cfg, klass, sp [0]);
12171 case CEE_MONO_SAVE_LMF:
12172 case CEE_MONO_RESTORE_LMF:
12173 #ifdef MONO_ARCH_HAVE_LMF_OPS
12174 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12175 MONO_ADD_INS (cfg->cbb, ins);
12176 cfg->need_lmf_area = TRUE;
12180 case CEE_MONO_CLASSCONST:
12181 CHECK_STACK_OVF (1);
12183 token = read32 (ip + 2);
12184 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12187 inline_costs += 10 * num_calls++;
12189 case CEE_MONO_NOT_TAKEN:
12190 cfg->cbb->out_of_line = TRUE;
12193 case CEE_MONO_TLS: {
12196 CHECK_STACK_OVF (1);
12198 key = (gint32)read32 (ip + 2);
12199 g_assert (key < TLS_KEY_NUM);
12201 ins = mono_create_tls_get (cfg, key);
12203 if (cfg->compile_aot) {
12205 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12206 ins->dreg = alloc_preg (cfg);
12207 ins->type = STACK_PTR;
12209 g_assert_not_reached ();
12212 ins->type = STACK_PTR;
12213 MONO_ADD_INS (cfg->cbb, ins);
12218 case CEE_MONO_DYN_CALL: {
12219 MonoCallInst *call;
12221 /* It would be easier to call a trampoline, but that would put an
12222 * extra frame on the stack, confusing exception handling. So
12223 * implement it inline using an opcode for now.
12226 if (!cfg->dyn_call_var) {
12227 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12228 /* prevent it from being register allocated */
12229 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12232 /* Has to use a call inst since it local regalloc expects it */
12233 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12234 ins = (MonoInst*)call;
12236 ins->sreg1 = sp [0]->dreg;
12237 ins->sreg2 = sp [1]->dreg;
12238 MONO_ADD_INS (cfg->cbb, ins);
12240 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12243 inline_costs += 10 * num_calls++;
12247 case CEE_MONO_MEMORY_BARRIER: {
12249 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12253 case CEE_MONO_JIT_ATTACH: {
12254 MonoInst *args [16], *domain_ins;
12255 MonoInst *ad_ins, *jit_tls_ins;
12256 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12258 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12260 EMIT_NEW_PCONST (cfg, ins, NULL);
12261 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12263 ad_ins = mono_get_domain_intrinsic (cfg);
12264 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12266 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12267 NEW_BBLOCK (cfg, next_bb);
12268 NEW_BBLOCK (cfg, call_bb);
12270 if (cfg->compile_aot) {
12271 /* AOT code is only used in the root domain */
12272 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12274 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12276 MONO_ADD_INS (cfg->cbb, ad_ins);
12277 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12280 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12285 MONO_START_BB (cfg, call_bb);
12288 if (cfg->compile_aot) {
12289 /* AOT code is only used in the root domain */
12290 EMIT_NEW_PCONST (cfg, args [0], NULL);
12292 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12294 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12295 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12298 MONO_START_BB (cfg, next_bb);
12302 case CEE_MONO_JIT_DETACH: {
12303 MonoInst *args [16];
12305 /* Restore the original domain */
12306 dreg = alloc_ireg (cfg);
12307 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12308 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12313 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12319 case CEE_PREFIX1: {
12322 case CEE_ARGLIST: {
12323 /* somewhat similar to LDTOKEN */
12324 MonoInst *addr, *vtvar;
12325 CHECK_STACK_OVF (1);
12326 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12328 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12329 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12331 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12332 ins->type = STACK_VTYPE;
12333 ins->klass = mono_defaults.argumenthandle_class;
12343 MonoInst *cmp, *arg1, *arg2;
12351 * The following transforms:
12352 * CEE_CEQ into OP_CEQ
12353 * CEE_CGT into OP_CGT
12354 * CEE_CGT_UN into OP_CGT_UN
12355 * CEE_CLT into OP_CLT
12356 * CEE_CLT_UN into OP_CLT_UN
12358 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12360 MONO_INST_NEW (cfg, ins, cmp->opcode);
12361 cmp->sreg1 = arg1->dreg;
12362 cmp->sreg2 = arg2->dreg;
12363 type_from_op (cfg, cmp, arg1, arg2);
12365 add_widen_op (cfg, cmp, &arg1, &arg2);
12366 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12367 cmp->opcode = OP_LCOMPARE;
12368 else if (arg1->type == STACK_R4)
12369 cmp->opcode = OP_RCOMPARE;
12370 else if (arg1->type == STACK_R8)
12371 cmp->opcode = OP_FCOMPARE;
12373 cmp->opcode = OP_ICOMPARE;
12374 MONO_ADD_INS (cfg->cbb, cmp);
12375 ins->type = STACK_I4;
12376 ins->dreg = alloc_dreg (cfg, ins->type);
12377 type_from_op (cfg, ins, arg1, arg2);
12379 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12381 * The backends expect the fceq opcodes to do the
12384 ins->sreg1 = cmp->sreg1;
12385 ins->sreg2 = cmp->sreg2;
12388 MONO_ADD_INS (cfg->cbb, ins);
12394 MonoInst *argconst;
12395 MonoMethod *cil_method;
12397 CHECK_STACK_OVF (1);
12399 n = read32 (ip + 2);
12400 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12401 if (!cmethod || mono_loader_get_last_error ())
12403 mono_class_init (cmethod->klass);
12405 mono_save_token_info (cfg, image, n, cmethod);
12407 context_used = mini_method_check_context_used (cfg, cmethod);
12409 cil_method = cmethod;
12410 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12411 METHOD_ACCESS_FAILURE (method, cil_method);
12413 if (mono_security_core_clr_enabled ())
12414 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12417 * Optimize the common case of ldftn+delegate creation
12419 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12420 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12421 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12422 MonoInst *target_ins, *handle_ins;
12423 MonoMethod *invoke;
12424 int invoke_context_used;
12426 invoke = mono_get_delegate_invoke (ctor_method->klass);
12427 if (!invoke || !mono_method_signature (invoke))
12430 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12432 target_ins = sp [-1];
12434 if (mono_security_core_clr_enabled ())
12435 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12437 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12438 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12439 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12441 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12445 /* FIXME: SGEN support */
12446 if (invoke_context_used == 0) {
12448 if (cfg->verbose_level > 3)
12449 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12450 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12453 CHECK_CFG_EXCEPTION;
12463 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12464 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12468 inline_costs += 10 * num_calls++;
12471 case CEE_LDVIRTFTN: {
12472 MonoInst *args [2];
12476 n = read32 (ip + 2);
12477 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12478 if (!cmethod || mono_loader_get_last_error ())
12480 mono_class_init (cmethod->klass);
12482 context_used = mini_method_check_context_used (cfg, cmethod);
12484 if (mono_security_core_clr_enabled ())
12485 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12488 * Optimize the common case of ldvirtftn+delegate creation
12490 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12491 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12492 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12493 MonoInst *target_ins, *handle_ins;
12494 MonoMethod *invoke;
12495 int invoke_context_used;
12496 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12498 invoke = mono_get_delegate_invoke (ctor_method->klass);
12499 if (!invoke || !mono_method_signature (invoke))
12502 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12504 target_ins = sp [-1];
12506 if (mono_security_core_clr_enabled ())
12507 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12509 /* FIXME: SGEN support */
12510 if (invoke_context_used == 0) {
12512 if (cfg->verbose_level > 3)
12513 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12514 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12517 CHECK_CFG_EXCEPTION;
12530 args [1] = emit_get_rgctx_method (cfg, context_used,
12531 cmethod, MONO_RGCTX_INFO_METHOD);
12534 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12536 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12539 inline_costs += 10 * num_calls++;
12543 CHECK_STACK_OVF (1);
12545 n = read16 (ip + 2);
12547 EMIT_NEW_ARGLOAD (cfg, ins, n);
12552 CHECK_STACK_OVF (1);
12554 n = read16 (ip + 2);
12556 NEW_ARGLOADA (cfg, ins, n);
12557 MONO_ADD_INS (cfg->cbb, ins);
12565 n = read16 (ip + 2);
12567 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12569 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12573 CHECK_STACK_OVF (1);
12575 n = read16 (ip + 2);
12577 EMIT_NEW_LOCLOAD (cfg, ins, n);
12582 unsigned char *tmp_ip;
12583 CHECK_STACK_OVF (1);
12585 n = read16 (ip + 2);
12588 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12594 EMIT_NEW_LOCLOADA (cfg, ins, n);
12603 n = read16 (ip + 2);
12605 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12607 emit_stloc_ir (cfg, sp, header, n);
12614 if (sp != stack_start)
12616 if (cfg->method != method)
12618 * Inlining this into a loop in a parent could lead to
12619 * stack overflows which is different behavior than the
12620 * non-inlined case, thus disable inlining in this case.
12622 INLINE_FAILURE("localloc");
12624 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12625 ins->dreg = alloc_preg (cfg);
12626 ins->sreg1 = sp [0]->dreg;
12627 ins->type = STACK_PTR;
12628 MONO_ADD_INS (cfg->cbb, ins);
12630 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12632 ins->flags |= MONO_INST_INIT;
12637 case CEE_ENDFILTER: {
12638 MonoExceptionClause *clause, *nearest;
12643 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12645 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12646 ins->sreg1 = (*sp)->dreg;
12647 MONO_ADD_INS (cfg->cbb, ins);
12648 start_new_bblock = 1;
12652 for (cc = 0; cc < header->num_clauses; ++cc) {
12653 clause = &header->clauses [cc];
12654 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12655 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12656 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12659 g_assert (nearest);
12660 if ((ip - header->code) != nearest->handler_offset)
12665 case CEE_UNALIGNED_:
12666 ins_flag |= MONO_INST_UNALIGNED;
12667 /* FIXME: record alignment? we can assume 1 for now */
12671 case CEE_VOLATILE_:
12672 ins_flag |= MONO_INST_VOLATILE;
12676 ins_flag |= MONO_INST_TAILCALL;
12677 cfg->flags |= MONO_CFG_HAS_TAIL;
12678 /* Can't inline tail calls at this time */
12679 inline_costs += 100000;
12686 token = read32 (ip + 2);
12687 klass = mini_get_class (method, token, generic_context);
12688 CHECK_TYPELOAD (klass);
12689 if (generic_class_is_reference_type (cfg, klass))
12690 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12692 mini_emit_initobj (cfg, *sp, NULL, klass);
12696 case CEE_CONSTRAINED_:
12698 token = read32 (ip + 2);
12699 constrained_class = mini_get_class (method, token, generic_context);
12700 CHECK_TYPELOAD (constrained_class);
12704 case CEE_INITBLK: {
12705 MonoInst *iargs [3];
12709 /* Skip optimized paths for volatile operations. */
12710 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12711 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12712 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12713 /* emit_memset only works when val == 0 */
12714 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12717 iargs [0] = sp [0];
12718 iargs [1] = sp [1];
12719 iargs [2] = sp [2];
12720 if (ip [1] == CEE_CPBLK) {
12722 * FIXME: It's unclear whether we should be emitting both the acquire
12723 * and release barriers for cpblk. It is technically both a load and
12724 * store operation, so it seems like that's the sensible thing to do.
12726 * FIXME: We emit full barriers on both sides of the operation for
12727 * simplicity. We should have a separate atomic memcpy method instead.
12729 MonoMethod *memcpy_method = get_memcpy_method ();
12731 if (ins_flag & MONO_INST_VOLATILE)
12732 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12734 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12735 call->flags |= ins_flag;
12737 if (ins_flag & MONO_INST_VOLATILE)
12738 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12740 MonoMethod *memset_method = get_memset_method ();
12741 if (ins_flag & MONO_INST_VOLATILE) {
12742 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12743 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12745 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12746 call->flags |= ins_flag;
12757 ins_flag |= MONO_INST_NOTYPECHECK;
12759 ins_flag |= MONO_INST_NORANGECHECK;
12760 /* we ignore the no-nullcheck for now since we
12761 * really do it explicitly only when doing callvirt->call
12765 case CEE_RETHROW: {
12767 int handler_offset = -1;
12769 for (i = 0; i < header->num_clauses; ++i) {
12770 MonoExceptionClause *clause = &header->clauses [i];
12771 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12772 handler_offset = clause->handler_offset;
12777 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12779 if (handler_offset == -1)
12782 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12783 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12784 ins->sreg1 = load->dreg;
12785 MONO_ADD_INS (cfg->cbb, ins);
12787 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12788 MONO_ADD_INS (cfg->cbb, ins);
12791 link_bblock (cfg, cfg->cbb, end_bblock);
12792 start_new_bblock = 1;
12800 CHECK_STACK_OVF (1);
12802 token = read32 (ip + 2);
12803 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12804 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12807 val = mono_type_size (type, &ialign);
12809 MonoClass *klass = mini_get_class (method, token, generic_context);
12810 CHECK_TYPELOAD (klass);
12812 val = mono_type_size (&klass->byval_arg, &ialign);
12814 if (mini_is_gsharedvt_klass (klass))
12815 GSHAREDVT_FAILURE (*ip);
12817 EMIT_NEW_ICONST (cfg, ins, val);
12822 case CEE_REFANYTYPE: {
12823 MonoInst *src_var, *src;
12825 GSHAREDVT_FAILURE (*ip);
12831 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12833 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12834 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12835 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12840 case CEE_READONLY_:
12853 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12863 g_warning ("opcode 0x%02x not handled", *ip);
12867 if (start_new_bblock != 1)
12870 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12871 if (cfg->cbb->next_bb) {
12872 /* This could already be set because of inlining, #693905 */
12873 MonoBasicBlock *bb = cfg->cbb;
12875 while (bb->next_bb)
12877 bb->next_bb = end_bblock;
12879 cfg->cbb->next_bb = end_bblock;
12882 if (cfg->method == method && cfg->domainvar) {
12884 MonoInst *get_domain;
12886 cfg->cbb = init_localsbb;
12888 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12889 MONO_ADD_INS (cfg->cbb, get_domain);
12891 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12893 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12894 MONO_ADD_INS (cfg->cbb, store);
12897 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12898 if (cfg->compile_aot)
12899 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12900 mono_get_got_var (cfg);
12903 if (cfg->method == method && cfg->got_var)
12904 mono_emit_load_got_addr (cfg);
12906 if (init_localsbb) {
12907 cfg->cbb = init_localsbb;
12909 for (i = 0; i < header->num_locals; ++i) {
12910 emit_init_local (cfg, i, header->locals [i], init_locals);
12914 if (cfg->init_ref_vars && cfg->method == method) {
12915 /* Emit initialization for ref vars */
12916 // FIXME: Avoid duplication initialization for IL locals.
12917 for (i = 0; i < cfg->num_varinfo; ++i) {
12918 MonoInst *ins = cfg->varinfo [i];
12920 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12921 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12925 if (cfg->lmf_var && cfg->method == method) {
12926 cfg->cbb = init_localsbb;
12927 emit_push_lmf (cfg);
12930 cfg->cbb = init_localsbb;
12931 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12934 MonoBasicBlock *bb;
12937 * Make seq points at backward branch targets interruptable.
12939 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12940 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12941 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12944 /* Add a sequence point for method entry/exit events */
12945 if (seq_points && cfg->gen_sdb_seq_points) {
12946 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12947 MONO_ADD_INS (init_localsbb, ins);
12948 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12949 MONO_ADD_INS (cfg->bb_exit, ins);
12953 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12954 * the code they refer to was dead (#11880).
12956 if (sym_seq_points) {
12957 for (i = 0; i < header->code_size; ++i) {
12958 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12961 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12962 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12969 if (cfg->method == method) {
12970 MonoBasicBlock *bb;
12971 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12972 bb->region = mono_find_block_region (cfg, bb->real_offset);
12974 mono_create_spvar_for_region (cfg, bb->region);
12975 if (cfg->verbose_level > 2)
12976 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12980 if (inline_costs < 0) {
12983 /* Method is too large */
12984 mname = mono_method_full_name (method, TRUE);
12985 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12986 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12990 if ((cfg->verbose_level > 2) && (cfg->method == method))
12991 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12996 g_assert (!mono_error_ok (&cfg->error));
13000 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13004 set_exception_type_from_invalid_il (cfg, method, ip);
13008 g_slist_free (class_inits);
13009 mono_basic_block_free (original_bb);
13010 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13011 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13012 if (cfg->exception_type)
13015 return inline_costs;
13019 store_membase_reg_to_store_membase_imm (int opcode)
13022 case OP_STORE_MEMBASE_REG:
13023 return OP_STORE_MEMBASE_IMM;
13024 case OP_STOREI1_MEMBASE_REG:
13025 return OP_STOREI1_MEMBASE_IMM;
13026 case OP_STOREI2_MEMBASE_REG:
13027 return OP_STOREI2_MEMBASE_IMM;
13028 case OP_STOREI4_MEMBASE_REG:
13029 return OP_STOREI4_MEMBASE_IMM;
13030 case OP_STOREI8_MEMBASE_REG:
13031 return OP_STOREI8_MEMBASE_IMM;
13033 g_assert_not_reached ();
13040 mono_op_to_op_imm (int opcode)
13044 return OP_IADD_IMM;
13046 return OP_ISUB_IMM;
13048 return OP_IDIV_IMM;
13050 return OP_IDIV_UN_IMM;
13052 return OP_IREM_IMM;
13054 return OP_IREM_UN_IMM;
13056 return OP_IMUL_IMM;
13058 return OP_IAND_IMM;
13062 return OP_IXOR_IMM;
13064 return OP_ISHL_IMM;
13066 return OP_ISHR_IMM;
13068 return OP_ISHR_UN_IMM;
13071 return OP_LADD_IMM;
13073 return OP_LSUB_IMM;
13075 return OP_LAND_IMM;
13079 return OP_LXOR_IMM;
13081 return OP_LSHL_IMM;
13083 return OP_LSHR_IMM;
13085 return OP_LSHR_UN_IMM;
13086 #if SIZEOF_REGISTER == 8
13088 return OP_LREM_IMM;
13092 return OP_COMPARE_IMM;
13094 return OP_ICOMPARE_IMM;
13096 return OP_LCOMPARE_IMM;
13098 case OP_STORE_MEMBASE_REG:
13099 return OP_STORE_MEMBASE_IMM;
13100 case OP_STOREI1_MEMBASE_REG:
13101 return OP_STOREI1_MEMBASE_IMM;
13102 case OP_STOREI2_MEMBASE_REG:
13103 return OP_STOREI2_MEMBASE_IMM;
13104 case OP_STOREI4_MEMBASE_REG:
13105 return OP_STOREI4_MEMBASE_IMM;
13107 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13109 return OP_X86_PUSH_IMM;
13110 case OP_X86_COMPARE_MEMBASE_REG:
13111 return OP_X86_COMPARE_MEMBASE_IMM;
13113 #if defined(TARGET_AMD64)
13114 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13115 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13117 case OP_VOIDCALL_REG:
13118 return OP_VOIDCALL;
13126 return OP_LOCALLOC_IMM;
13133 ldind_to_load_membase (int opcode)
13137 return OP_LOADI1_MEMBASE;
13139 return OP_LOADU1_MEMBASE;
13141 return OP_LOADI2_MEMBASE;
13143 return OP_LOADU2_MEMBASE;
13145 return OP_LOADI4_MEMBASE;
13147 return OP_LOADU4_MEMBASE;
13149 return OP_LOAD_MEMBASE;
13150 case CEE_LDIND_REF:
13151 return OP_LOAD_MEMBASE;
13153 return OP_LOADI8_MEMBASE;
13155 return OP_LOADR4_MEMBASE;
13157 return OP_LOADR8_MEMBASE;
13159 g_assert_not_reached ();
13166 stind_to_store_membase (int opcode)
13170 return OP_STOREI1_MEMBASE_REG;
13172 return OP_STOREI2_MEMBASE_REG;
13174 return OP_STOREI4_MEMBASE_REG;
13176 case CEE_STIND_REF:
13177 return OP_STORE_MEMBASE_REG;
13179 return OP_STOREI8_MEMBASE_REG;
13181 return OP_STORER4_MEMBASE_REG;
13183 return OP_STORER8_MEMBASE_REG;
13185 g_assert_not_reached ();
13192 mono_load_membase_to_load_mem (int opcode)
13194 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13195 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13197 case OP_LOAD_MEMBASE:
13198 return OP_LOAD_MEM;
13199 case OP_LOADU1_MEMBASE:
13200 return OP_LOADU1_MEM;
13201 case OP_LOADU2_MEMBASE:
13202 return OP_LOADU2_MEM;
13203 case OP_LOADI4_MEMBASE:
13204 return OP_LOADI4_MEM;
13205 case OP_LOADU4_MEMBASE:
13206 return OP_LOADU4_MEM;
13207 #if SIZEOF_REGISTER == 8
13208 case OP_LOADI8_MEMBASE:
13209 return OP_LOADI8_MEM;
13218 op_to_op_dest_membase (int store_opcode, int opcode)
13220 #if defined(TARGET_X86)
13221 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13226 return OP_X86_ADD_MEMBASE_REG;
13228 return OP_X86_SUB_MEMBASE_REG;
13230 return OP_X86_AND_MEMBASE_REG;
13232 return OP_X86_OR_MEMBASE_REG;
13234 return OP_X86_XOR_MEMBASE_REG;
13237 return OP_X86_ADD_MEMBASE_IMM;
13240 return OP_X86_SUB_MEMBASE_IMM;
13243 return OP_X86_AND_MEMBASE_IMM;
13246 return OP_X86_OR_MEMBASE_IMM;
13249 return OP_X86_XOR_MEMBASE_IMM;
13255 #if defined(TARGET_AMD64)
13256 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13261 return OP_X86_ADD_MEMBASE_REG;
13263 return OP_X86_SUB_MEMBASE_REG;
13265 return OP_X86_AND_MEMBASE_REG;
13267 return OP_X86_OR_MEMBASE_REG;
13269 return OP_X86_XOR_MEMBASE_REG;
13271 return OP_X86_ADD_MEMBASE_IMM;
13273 return OP_X86_SUB_MEMBASE_IMM;
13275 return OP_X86_AND_MEMBASE_IMM;
13277 return OP_X86_OR_MEMBASE_IMM;
13279 return OP_X86_XOR_MEMBASE_IMM;
13281 return OP_AMD64_ADD_MEMBASE_REG;
13283 return OP_AMD64_SUB_MEMBASE_REG;
13285 return OP_AMD64_AND_MEMBASE_REG;
13287 return OP_AMD64_OR_MEMBASE_REG;
13289 return OP_AMD64_XOR_MEMBASE_REG;
13292 return OP_AMD64_ADD_MEMBASE_IMM;
13295 return OP_AMD64_SUB_MEMBASE_IMM;
13298 return OP_AMD64_AND_MEMBASE_IMM;
13301 return OP_AMD64_OR_MEMBASE_IMM;
13304 return OP_AMD64_XOR_MEMBASE_IMM;
13314 op_to_op_store_membase (int store_opcode, int opcode)
13316 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13319 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13320 return OP_X86_SETEQ_MEMBASE;
13322 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13323 return OP_X86_SETNE_MEMBASE;
13331 op_to_op_src1_membase (int load_opcode, int opcode)
13334 /* FIXME: This has sign extension issues */
13336 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13337 return OP_X86_COMPARE_MEMBASE8_IMM;
13340 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13345 return OP_X86_PUSH_MEMBASE;
13346 case OP_COMPARE_IMM:
13347 case OP_ICOMPARE_IMM:
13348 return OP_X86_COMPARE_MEMBASE_IMM;
13351 return OP_X86_COMPARE_MEMBASE_REG;
13355 #ifdef TARGET_AMD64
13356 /* FIXME: This has sign extension issues */
13358 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13359 return OP_X86_COMPARE_MEMBASE8_IMM;
13364 #ifdef __mono_ilp32__
13365 if (load_opcode == OP_LOADI8_MEMBASE)
13367 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13369 return OP_X86_PUSH_MEMBASE;
13371 /* FIXME: This only works for 32 bit immediates
13372 case OP_COMPARE_IMM:
13373 case OP_LCOMPARE_IMM:
13374 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13375 return OP_AMD64_COMPARE_MEMBASE_IMM;
13377 case OP_ICOMPARE_IMM:
13378 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13379 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13383 #ifdef __mono_ilp32__
13384 if (load_opcode == OP_LOAD_MEMBASE)
13385 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13386 if (load_opcode == OP_LOADI8_MEMBASE)
13388 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13390 return OP_AMD64_COMPARE_MEMBASE_REG;
13393 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13394 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13403 op_to_op_src2_membase (int load_opcode, int opcode)
13406 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13412 return OP_X86_COMPARE_REG_MEMBASE;
13414 return OP_X86_ADD_REG_MEMBASE;
13416 return OP_X86_SUB_REG_MEMBASE;
13418 return OP_X86_AND_REG_MEMBASE;
13420 return OP_X86_OR_REG_MEMBASE;
13422 return OP_X86_XOR_REG_MEMBASE;
13426 #ifdef TARGET_AMD64
13427 #ifdef __mono_ilp32__
13428 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13430 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13434 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13436 return OP_X86_ADD_REG_MEMBASE;
13438 return OP_X86_SUB_REG_MEMBASE;
13440 return OP_X86_AND_REG_MEMBASE;
13442 return OP_X86_OR_REG_MEMBASE;
13444 return OP_X86_XOR_REG_MEMBASE;
13446 #ifdef __mono_ilp32__
13447 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13449 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13454 return OP_AMD64_COMPARE_REG_MEMBASE;
13456 return OP_AMD64_ADD_REG_MEMBASE;
13458 return OP_AMD64_SUB_REG_MEMBASE;
13460 return OP_AMD64_AND_REG_MEMBASE;
13462 return OP_AMD64_OR_REG_MEMBASE;
13464 return OP_AMD64_XOR_REG_MEMBASE;
13473 mono_op_to_op_imm_noemul (int opcode)
13476 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13482 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13489 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13494 return mono_op_to_op_imm (opcode);
13499 * mono_handle_global_vregs:
13501 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13505 mono_handle_global_vregs (MonoCompile *cfg)
13507 gint32 *vreg_to_bb;
13508 MonoBasicBlock *bb;
13511 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13513 #ifdef MONO_ARCH_SIMD_INTRINSICS
13514 if (cfg->uses_simd_intrinsics)
13515 mono_simd_simplify_indirection (cfg);
13518 /* Find local vregs used in more than one bb */
13519 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13520 MonoInst *ins = bb->code;
13521 int block_num = bb->block_num;
13523 if (cfg->verbose_level > 2)
13524 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13527 for (; ins; ins = ins->next) {
13528 const char *spec = INS_INFO (ins->opcode);
13529 int regtype = 0, regindex;
13532 if (G_UNLIKELY (cfg->verbose_level > 2))
13533 mono_print_ins (ins);
13535 g_assert (ins->opcode >= MONO_CEE_LAST);
13537 for (regindex = 0; regindex < 4; regindex ++) {
13540 if (regindex == 0) {
13541 regtype = spec [MONO_INST_DEST];
13542 if (regtype == ' ')
13545 } else if (regindex == 1) {
13546 regtype = spec [MONO_INST_SRC1];
13547 if (regtype == ' ')
13550 } else if (regindex == 2) {
13551 regtype = spec [MONO_INST_SRC2];
13552 if (regtype == ' ')
13555 } else if (regindex == 3) {
13556 regtype = spec [MONO_INST_SRC3];
13557 if (regtype == ' ')
13562 #if SIZEOF_REGISTER == 4
13563 /* In the LLVM case, the long opcodes are not decomposed */
13564 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13566 * Since some instructions reference the original long vreg,
13567 * and some reference the two component vregs, it is quite hard
13568 * to determine when it needs to be global. So be conservative.
13570 if (!get_vreg_to_inst (cfg, vreg)) {
13571 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13573 if (cfg->verbose_level > 2)
13574 printf ("LONG VREG R%d made global.\n", vreg);
13578 * Make the component vregs volatile since the optimizations can
13579 * get confused otherwise.
13581 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13582 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13586 g_assert (vreg != -1);
13588 prev_bb = vreg_to_bb [vreg];
13589 if (prev_bb == 0) {
13590 /* 0 is a valid block num */
13591 vreg_to_bb [vreg] = block_num + 1;
13592 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13593 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13596 if (!get_vreg_to_inst (cfg, vreg)) {
13597 if (G_UNLIKELY (cfg->verbose_level > 2))
13598 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13602 if (vreg_is_ref (cfg, vreg))
13603 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13605 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13608 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13611 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13614 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13617 g_assert_not_reached ();
13621 /* Flag as having been used in more than one bb */
13622 vreg_to_bb [vreg] = -1;
13628 /* If a variable is used in only one bblock, convert it into a local vreg */
13629 for (i = 0; i < cfg->num_varinfo; i++) {
13630 MonoInst *var = cfg->varinfo [i];
13631 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13633 switch (var->type) {
13639 #if SIZEOF_REGISTER == 8
13642 #if !defined(TARGET_X86)
13643 /* Enabling this screws up the fp stack on x86 */
13646 if (mono_arch_is_soft_float ())
13649 /* Arguments are implicitly global */
13650 /* Putting R4 vars into registers doesn't work currently */
13651 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13652 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13654 * Make that the variable's liveness interval doesn't contain a call, since
13655 * that would cause the lvreg to be spilled, making the whole optimization
13658 /* This is too slow for JIT compilation */
13660 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13662 int def_index, call_index, ins_index;
13663 gboolean spilled = FALSE;
13668 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13669 const char *spec = INS_INFO (ins->opcode);
13671 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13672 def_index = ins_index;
13674 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13675 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13676 if (call_index > def_index) {
13682 if (MONO_IS_CALL (ins))
13683 call_index = ins_index;
13693 if (G_UNLIKELY (cfg->verbose_level > 2))
13694 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13695 var->flags |= MONO_INST_IS_DEAD;
13696 cfg->vreg_to_inst [var->dreg] = NULL;
13703 * Compress the varinfo and vars tables so the liveness computation is faster and
13704 * takes up less space.
13707 for (i = 0; i < cfg->num_varinfo; ++i) {
13708 MonoInst *var = cfg->varinfo [i];
13709 if (pos < i && cfg->locals_start == i)
13710 cfg->locals_start = pos;
13711 if (!(var->flags & MONO_INST_IS_DEAD)) {
13713 cfg->varinfo [pos] = cfg->varinfo [i];
13714 cfg->varinfo [pos]->inst_c0 = pos;
13715 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13716 cfg->vars [pos].idx = pos;
13717 #if SIZEOF_REGISTER == 4
13718 if (cfg->varinfo [pos]->type == STACK_I8) {
13719 /* Modify the two component vars too */
13722 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13723 var1->inst_c0 = pos;
13724 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13725 var1->inst_c0 = pos;
13732 cfg->num_varinfo = pos;
13733 if (cfg->locals_start > cfg->num_varinfo)
13734 cfg->locals_start = cfg->num_varinfo;
13738 * mono_spill_global_vars:
13740 * Generate spill code for variables which are not allocated to registers,
13741 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13742 * code is generated which could be optimized by the local optimization passes.
13745 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13747 MonoBasicBlock *bb;
13749 int orig_next_vreg;
13750 guint32 *vreg_to_lvreg;
13752 guint32 i, lvregs_len;
13753 gboolean dest_has_lvreg = FALSE;
13754 guint32 stacktypes [128];
13755 MonoInst **live_range_start, **live_range_end;
13756 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13757 int *gsharedvt_vreg_to_idx = NULL;
13759 *need_local_opts = FALSE;
13761 memset (spec2, 0, sizeof (spec2));
13763 /* FIXME: Move this function to mini.c */
13764 stacktypes ['i'] = STACK_PTR;
13765 stacktypes ['l'] = STACK_I8;
13766 stacktypes ['f'] = STACK_R8;
13767 #ifdef MONO_ARCH_SIMD_INTRINSICS
13768 stacktypes ['x'] = STACK_VTYPE;
13771 #if SIZEOF_REGISTER == 4
13772 /* Create MonoInsts for longs */
13773 for (i = 0; i < cfg->num_varinfo; i++) {
13774 MonoInst *ins = cfg->varinfo [i];
13776 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13777 switch (ins->type) {
13782 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13785 g_assert (ins->opcode == OP_REGOFFSET);
13787 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13789 tree->opcode = OP_REGOFFSET;
13790 tree->inst_basereg = ins->inst_basereg;
13791 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13793 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13795 tree->opcode = OP_REGOFFSET;
13796 tree->inst_basereg = ins->inst_basereg;
13797 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13807 if (cfg->compute_gc_maps) {
13808 /* registers need liveness info even for !non refs */
13809 for (i = 0; i < cfg->num_varinfo; i++) {
13810 MonoInst *ins = cfg->varinfo [i];
13812 if (ins->opcode == OP_REGVAR)
13813 ins->flags |= MONO_INST_GC_TRACK;
13817 if (cfg->gsharedvt) {
13818 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13820 for (i = 0; i < cfg->num_varinfo; ++i) {
13821 MonoInst *ins = cfg->varinfo [i];
13824 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13825 if (i >= cfg->locals_start) {
13827 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13828 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13829 ins->opcode = OP_GSHAREDVT_LOCAL;
13830 ins->inst_imm = idx;
13833 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13834 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13840 /* FIXME: widening and truncation */
13843 * As an optimization, when a variable allocated to the stack is first loaded into
13844 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13845 * the variable again.
13847 orig_next_vreg = cfg->next_vreg;
13848 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13849 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13853 * These arrays contain the first and last instructions accessing a given
13855 * Since we emit bblocks in the same order we process them here, and we
13856 * don't split live ranges, these will precisely describe the live range of
13857 * the variable, i.e. the instruction range where a valid value can be found
13858 * in the variables location.
13859 * The live range is computed using the liveness info computed by the liveness pass.
13860 * We can't use vmv->range, since that is an abstract live range, and we need
13861 * one which is instruction precise.
13862 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13864 /* FIXME: Only do this if debugging info is requested */
13865 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13866 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13867 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13868 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13870 /* Add spill loads/stores */
13871 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13874 if (cfg->verbose_level > 2)
13875 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13877 /* Clear vreg_to_lvreg array */
13878 for (i = 0; i < lvregs_len; i++)
13879 vreg_to_lvreg [lvregs [i]] = 0;
13883 MONO_BB_FOR_EACH_INS (bb, ins) {
13884 const char *spec = INS_INFO (ins->opcode);
13885 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13886 gboolean store, no_lvreg;
13887 int sregs [MONO_MAX_SRC_REGS];
13889 if (G_UNLIKELY (cfg->verbose_level > 2))
13890 mono_print_ins (ins);
13892 if (ins->opcode == OP_NOP)
13896 * We handle LDADDR here as well, since it can only be decomposed
13897 * when variable addresses are known.
13899 if (ins->opcode == OP_LDADDR) {
13900 MonoInst *var = ins->inst_p0;
13902 if (var->opcode == OP_VTARG_ADDR) {
13903 /* Happens on SPARC/S390 where vtypes are passed by reference */
13904 MonoInst *vtaddr = var->inst_left;
13905 if (vtaddr->opcode == OP_REGVAR) {
13906 ins->opcode = OP_MOVE;
13907 ins->sreg1 = vtaddr->dreg;
13909 else if (var->inst_left->opcode == OP_REGOFFSET) {
13910 ins->opcode = OP_LOAD_MEMBASE;
13911 ins->inst_basereg = vtaddr->inst_basereg;
13912 ins->inst_offset = vtaddr->inst_offset;
13915 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13916 /* gsharedvt arg passed by ref */
13917 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13919 ins->opcode = OP_LOAD_MEMBASE;
13920 ins->inst_basereg = var->inst_basereg;
13921 ins->inst_offset = var->inst_offset;
13922 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13923 MonoInst *load, *load2, *load3;
13924 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13925 int reg1, reg2, reg3;
13926 MonoInst *info_var = cfg->gsharedvt_info_var;
13927 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13931 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13934 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13936 g_assert (info_var);
13937 g_assert (locals_var);
13939 /* Mark the instruction used to compute the locals var as used */
13940 cfg->gsharedvt_locals_var_ins = NULL;
13942 /* Load the offset */
13943 if (info_var->opcode == OP_REGOFFSET) {
13944 reg1 = alloc_ireg (cfg);
13945 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13946 } else if (info_var->opcode == OP_REGVAR) {
13948 reg1 = info_var->dreg;
13950 g_assert_not_reached ();
13952 reg2 = alloc_ireg (cfg);
13953 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13954 /* Load the locals area address */
13955 reg3 = alloc_ireg (cfg);
13956 if (locals_var->opcode == OP_REGOFFSET) {
13957 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13958 } else if (locals_var->opcode == OP_REGVAR) {
13959 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13961 g_assert_not_reached ();
13963 /* Compute the address */
13964 ins->opcode = OP_PADD;
13968 mono_bblock_insert_before_ins (bb, ins, load3);
13969 mono_bblock_insert_before_ins (bb, load3, load2);
13971 mono_bblock_insert_before_ins (bb, load2, load);
13973 g_assert (var->opcode == OP_REGOFFSET);
13975 ins->opcode = OP_ADD_IMM;
13976 ins->sreg1 = var->inst_basereg;
13977 ins->inst_imm = var->inst_offset;
13980 *need_local_opts = TRUE;
13981 spec = INS_INFO (ins->opcode);
13984 if (ins->opcode < MONO_CEE_LAST) {
13985 mono_print_ins (ins);
13986 g_assert_not_reached ();
13990 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13994 if (MONO_IS_STORE_MEMBASE (ins)) {
13995 tmp_reg = ins->dreg;
13996 ins->dreg = ins->sreg2;
13997 ins->sreg2 = tmp_reg;
14000 spec2 [MONO_INST_DEST] = ' ';
14001 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14002 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14003 spec2 [MONO_INST_SRC3] = ' ';
14005 } else if (MONO_IS_STORE_MEMINDEX (ins))
14006 g_assert_not_reached ();
14011 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14012 printf ("\t %.3s %d", spec, ins->dreg);
14013 num_sregs = mono_inst_get_src_registers (ins, sregs);
14014 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14015 printf (" %d", sregs [srcindex]);
14022 regtype = spec [MONO_INST_DEST];
14023 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14026 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14027 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14028 MonoInst *store_ins;
14030 MonoInst *def_ins = ins;
14031 int dreg = ins->dreg; /* The original vreg */
14033 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14035 if (var->opcode == OP_REGVAR) {
14036 ins->dreg = var->dreg;
14037 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14039 * Instead of emitting a load+store, use a _membase opcode.
14041 g_assert (var->opcode == OP_REGOFFSET);
14042 if (ins->opcode == OP_MOVE) {
14046 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14047 ins->inst_basereg = var->inst_basereg;
14048 ins->inst_offset = var->inst_offset;
14051 spec = INS_INFO (ins->opcode);
14055 g_assert (var->opcode == OP_REGOFFSET);
14057 prev_dreg = ins->dreg;
14059 /* Invalidate any previous lvreg for this vreg */
14060 vreg_to_lvreg [ins->dreg] = 0;
14064 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14066 store_opcode = OP_STOREI8_MEMBASE_REG;
14069 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14071 #if SIZEOF_REGISTER != 8
14072 if (regtype == 'l') {
14073 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14074 mono_bblock_insert_after_ins (bb, ins, store_ins);
14075 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14076 mono_bblock_insert_after_ins (bb, ins, store_ins);
14077 def_ins = store_ins;
14082 g_assert (store_opcode != OP_STOREV_MEMBASE);
14084 /* Try to fuse the store into the instruction itself */
14085 /* FIXME: Add more instructions */
14086 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14087 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14088 ins->inst_imm = ins->inst_c0;
14089 ins->inst_destbasereg = var->inst_basereg;
14090 ins->inst_offset = var->inst_offset;
14091 spec = INS_INFO (ins->opcode);
14092 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14093 ins->opcode = store_opcode;
14094 ins->inst_destbasereg = var->inst_basereg;
14095 ins->inst_offset = var->inst_offset;
14099 tmp_reg = ins->dreg;
14100 ins->dreg = ins->sreg2;
14101 ins->sreg2 = tmp_reg;
14104 spec2 [MONO_INST_DEST] = ' ';
14105 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14106 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14107 spec2 [MONO_INST_SRC3] = ' ';
14109 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14110 // FIXME: The backends expect the base reg to be in inst_basereg
14111 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14113 ins->inst_basereg = var->inst_basereg;
14114 ins->inst_offset = var->inst_offset;
14115 spec = INS_INFO (ins->opcode);
14117 /* printf ("INS: "); mono_print_ins (ins); */
14118 /* Create a store instruction */
14119 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14121 /* Insert it after the instruction */
14122 mono_bblock_insert_after_ins (bb, ins, store_ins);
14124 def_ins = store_ins;
14127 * We can't assign ins->dreg to var->dreg here, since the
14128 * sregs could use it. So set a flag, and do it after
14131 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14132 dest_has_lvreg = TRUE;
14137 if (def_ins && !live_range_start [dreg]) {
14138 live_range_start [dreg] = def_ins;
14139 live_range_start_bb [dreg] = bb;
14142 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14145 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14146 tmp->inst_c1 = dreg;
14147 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14154 num_sregs = mono_inst_get_src_registers (ins, sregs);
14155 for (srcindex = 0; srcindex < 3; ++srcindex) {
14156 regtype = spec [MONO_INST_SRC1 + srcindex];
14157 sreg = sregs [srcindex];
14159 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14160 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14161 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14162 MonoInst *use_ins = ins;
14163 MonoInst *load_ins;
14164 guint32 load_opcode;
14166 if (var->opcode == OP_REGVAR) {
14167 sregs [srcindex] = var->dreg;
14168 //mono_inst_set_src_registers (ins, sregs);
14169 live_range_end [sreg] = use_ins;
14170 live_range_end_bb [sreg] = bb;
14172 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14175 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14176 /* var->dreg is a hreg */
14177 tmp->inst_c1 = sreg;
14178 mono_bblock_insert_after_ins (bb, ins, tmp);
14184 g_assert (var->opcode == OP_REGOFFSET);
14186 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14188 g_assert (load_opcode != OP_LOADV_MEMBASE);
14190 if (vreg_to_lvreg [sreg]) {
14191 g_assert (vreg_to_lvreg [sreg] != -1);
14193 /* The variable is already loaded to an lvreg */
14194 if (G_UNLIKELY (cfg->verbose_level > 2))
14195 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14196 sregs [srcindex] = vreg_to_lvreg [sreg];
14197 //mono_inst_set_src_registers (ins, sregs);
14201 /* Try to fuse the load into the instruction */
14202 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14203 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14204 sregs [0] = var->inst_basereg;
14205 //mono_inst_set_src_registers (ins, sregs);
14206 ins->inst_offset = var->inst_offset;
14207 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14208 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14209 sregs [1] = var->inst_basereg;
14210 //mono_inst_set_src_registers (ins, sregs);
14211 ins->inst_offset = var->inst_offset;
14213 if (MONO_IS_REAL_MOVE (ins)) {
14214 ins->opcode = OP_NOP;
14217 //printf ("%d ", srcindex); mono_print_ins (ins);
14219 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14221 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14222 if (var->dreg == prev_dreg) {
14224 * sreg refers to the value loaded by the load
14225 * emitted below, but we need to use ins->dreg
14226 * since it refers to the store emitted earlier.
14230 g_assert (sreg != -1);
14231 vreg_to_lvreg [var->dreg] = sreg;
14232 g_assert (lvregs_len < 1024);
14233 lvregs [lvregs_len ++] = var->dreg;
14237 sregs [srcindex] = sreg;
14238 //mono_inst_set_src_registers (ins, sregs);
14240 #if SIZEOF_REGISTER != 8
14241 if (regtype == 'l') {
14242 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14243 mono_bblock_insert_before_ins (bb, ins, load_ins);
14244 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14245 mono_bblock_insert_before_ins (bb, ins, load_ins);
14246 use_ins = load_ins;
14251 #if SIZEOF_REGISTER == 4
14252 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14254 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14255 mono_bblock_insert_before_ins (bb, ins, load_ins);
14256 use_ins = load_ins;
14260 if (var->dreg < orig_next_vreg) {
14261 live_range_end [var->dreg] = use_ins;
14262 live_range_end_bb [var->dreg] = bb;
14265 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14268 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14269 tmp->inst_c1 = var->dreg;
14270 mono_bblock_insert_after_ins (bb, ins, tmp);
14274 mono_inst_set_src_registers (ins, sregs);
14276 if (dest_has_lvreg) {
14277 g_assert (ins->dreg != -1);
14278 vreg_to_lvreg [prev_dreg] = ins->dreg;
14279 g_assert (lvregs_len < 1024);
14280 lvregs [lvregs_len ++] = prev_dreg;
14281 dest_has_lvreg = FALSE;
14285 tmp_reg = ins->dreg;
14286 ins->dreg = ins->sreg2;
14287 ins->sreg2 = tmp_reg;
14290 if (MONO_IS_CALL (ins)) {
14291 /* Clear vreg_to_lvreg array */
14292 for (i = 0; i < lvregs_len; i++)
14293 vreg_to_lvreg [lvregs [i]] = 0;
14295 } else if (ins->opcode == OP_NOP) {
14297 MONO_INST_NULLIFY_SREGS (ins);
14300 if (cfg->verbose_level > 2)
14301 mono_print_ins_index (1, ins);
14304 /* Extend the live range based on the liveness info */
14305 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14306 for (i = 0; i < cfg->num_varinfo; i ++) {
14307 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14309 if (vreg_is_volatile (cfg, vi->vreg))
14310 /* The liveness info is incomplete */
14313 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14314 /* Live from at least the first ins of this bb */
14315 live_range_start [vi->vreg] = bb->code;
14316 live_range_start_bb [vi->vreg] = bb;
14319 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14320 /* Live at least until the last ins of this bb */
14321 live_range_end [vi->vreg] = bb->last_ins;
14322 live_range_end_bb [vi->vreg] = bb;
14328 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14330 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14331 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14333 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14334 for (i = 0; i < cfg->num_varinfo; ++i) {
14335 int vreg = MONO_VARINFO (cfg, i)->vreg;
14338 if (live_range_start [vreg]) {
14339 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14341 ins->inst_c1 = vreg;
14342 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14344 if (live_range_end [vreg]) {
14345 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14347 ins->inst_c1 = vreg;
14348 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14349 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14351 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14357 if (cfg->gsharedvt_locals_var_ins) {
14358 /* Nullify if unused */
14359 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14360 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14363 g_free (live_range_start);
14364 g_free (live_range_end);
14365 g_free (live_range_start_bb);
14366 g_free (live_range_end_bb);
14371 * - use 'iadd' instead of 'int_add'
14372 * - handling ovf opcodes: decompose in method_to_ir.
14373 * - unify iregs/fregs
14374 * -> partly done, the missing parts are:
14375 * - a more complete unification would involve unifying the hregs as well, so
14376 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14377 * would no longer map to the machine hregs, so the code generators would need to
14378 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14379 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14380 * fp/non-fp branches speeds it up by about 15%.
14381 * - use sext/zext opcodes instead of shifts
14383 * - get rid of TEMPLOADs if possible and use vregs instead
14384 * - clean up usage of OP_P/OP_ opcodes
14385 * - cleanup usage of DUMMY_USE
14386 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14388 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14389 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14390 * - make sure handle_stack_args () is called before the branch is emitted
14391 * - when the new IR is done, get rid of all unused stuff
14392 * - COMPARE/BEQ as separate instructions or unify them ?
14393 * - keeping them separate allows specialized compare instructions like
14394 * compare_imm, compare_membase
14395 * - most back ends unify fp compare+branch, fp compare+ceq
14396 * - integrate mono_save_args into inline_method
14397 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14398 * - handle long shift opts on 32 bit platforms somehow: they require
14399 * 3 sregs (2 for arg1 and 1 for arg2)
14400 * - make byref a 'normal' type.
14401 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14402 * variable if needed.
14403 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14404 * like inline_method.
14405 * - remove inlining restrictions
14406 * - fix LNEG and enable cfold of INEG
14407 * - generalize x86 optimizations like ldelema as a peephole optimization
14408 * - add store_mem_imm for amd64
14409 * - optimize the loading of the interruption flag in the managed->native wrappers
14410 * - avoid special handling of OP_NOP in passes
14411 * - move code inserting instructions into one function/macro.
14412 * - try a coalescing phase after liveness analysis
14413 * - add float -> vreg conversion + local optimizations on !x86
14414 * - figure out how to handle decomposed branches during optimizations, ie.
14415 * compare+branch, op_jump_table+op_br etc.
14416 * - promote RuntimeXHandles to vregs
14417 * - vtype cleanups:
14418 * - add a NEW_VARLOADA_VREG macro
14419 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14420 * accessing vtype fields.
14421 * - get rid of I8CONST on 64 bit platforms
14422 * - dealing with the increase in code size due to branches created during opcode
14424 * - use extended basic blocks
14425 * - all parts of the JIT
14426 * - handle_global_vregs () && local regalloc
14427 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14428 * - sources of increase in code size:
14431 * - isinst and castclass
14432 * - lvregs not allocated to global registers even if used multiple times
14433 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14435 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14436 * - add all micro optimizations from the old JIT
14437 * - put tree optimizations into the deadce pass
14438 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14439 * specific function.
14440 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14441 * fcompare + branchCC.
14442 * - create a helper function for allocating a stack slot, taking into account
14443 * MONO_CFG_HAS_SPILLUP.
14445 * - merge the ia64 switch changes.
14446 * - optimize mono_regstate2_alloc_int/float.
14447 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14448 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14449 * parts of the tree could be separated by other instructions, killing the tree
14450 * arguments, or stores killing loads etc. Also, should we fold loads into other
14451 * instructions if the result of the load is used multiple times ?
14452 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14453 * - LAST MERGE: 108395.
14454 * - when returning vtypes in registers, generate IR and append it to the end of the
14455 * last bb instead of doing it in the epilog.
14456 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14464 - When to decompose opcodes:
14465 - earlier: this makes some optimizations hard to implement, since the low level IR
14466 no longer contains the neccessary information. But it is easier to do.
14467 - later: harder to implement, enables more optimizations.
14468 - Branches inside bblocks:
14469 - created when decomposing complex opcodes.
14470 - branches to another bblock: harmless, but not tracked by the branch
14471 optimizations, so need to branch to a label at the start of the bblock.
14472 - branches to inside the same bblock: very problematic, trips up the local
14473 reg allocator. Can be fixed by spitting the current bblock, but that is a
14474 complex operation, since some local vregs can become global vregs etc.
14475 - Local/global vregs:
14476 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14477 local register allocator.
14478 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14479 structure, created by mono_create_var (). Assigned to hregs or the stack by
14480 the global register allocator.
14481 - When to do optimizations like alu->alu_imm:
14482 - earlier -> saves work later on since the IR will be smaller/simpler
14483 - later -> can work on more instructions
14484 - Handling of valuetypes:
14485 - When a vtype is pushed on the stack, a new temporary is created, an
14486 instruction computing its address (LDADDR) is emitted and pushed on
14487 the stack. Need to optimize cases when the vtype is used immediately as in
14488 argument passing, stloc etc.
14489 - Instead of the to_end stuff in the old JIT, simply call the function handling
14490 the values on the stack before emitting the last instruction of the bb.
14493 #endif /* DISABLE_JIT */